xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision aeb2ffde14d914a2adf90754989f139d4c934d4c)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf.c
22  * QCA driver framework(QDF) network buffer management APIs
23  */
24 
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/proc_fs.h>
31 #include <qdf_atomic.h>
32 #include <qdf_debugfs.h>
33 #include <qdf_lock.h>
34 #include <qdf_mem.h>
35 #include <qdf_module.h>
36 #include <qdf_nbuf.h>
37 #include <qdf_status.h>
38 #include "qdf_str.h"
39 #include <qdf_trace.h>
40 #include "qdf_tracker.h"
41 #include <qdf_types.h>
42 #include <net/ieee80211_radiotap.h>
43 #include <pld_common.h>
44 
45 #if defined(FEATURE_TSO)
46 #include <net/ipv6.h>
47 #include <linux/ipv6.h>
48 #include <linux/tcp.h>
49 #include <linux/if_vlan.h>
50 #include <linux/ip.h>
51 #endif /* FEATURE_TSO */
52 
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
54 
55 #define qdf_nbuf_users_inc atomic_inc
56 #define qdf_nbuf_users_dec atomic_dec
57 #define qdf_nbuf_users_set atomic_set
58 #define qdf_nbuf_users_read atomic_read
59 #else
60 #define qdf_nbuf_users_inc refcount_inc
61 #define qdf_nbuf_users_dec refcount_dec
62 #define qdf_nbuf_users_set refcount_set
63 #define qdf_nbuf_users_read refcount_read
64 #endif /* KERNEL_VERSION(4, 13, 0) */
65 
66 #define IEEE80211_RADIOTAP_VHT_BW_20	0
67 #define IEEE80211_RADIOTAP_VHT_BW_40	1
68 #define IEEE80211_RADIOTAP_VHT_BW_80	2
69 #define IEEE80211_RADIOTAP_VHT_BW_160	3
70 
71 #define RADIOTAP_VHT_BW_20	0
72 #define RADIOTAP_VHT_BW_40	1
73 #define RADIOTAP_VHT_BW_80	4
74 #define RADIOTAP_VHT_BW_160	11
75 
76 /* tx status */
77 #define RADIOTAP_TX_STATUS_FAIL		1
78 #define RADIOTAP_TX_STATUS_NOACK	2
79 
80 /* channel number to freq conversion */
81 #define CHANNEL_NUM_14 14
82 #define CHANNEL_NUM_15 15
83 #define CHANNEL_NUM_27 27
84 #define CHANNEL_NUM_35 35
85 #define CHANNEL_NUM_182 182
86 #define CHANNEL_NUM_197 197
87 #define CHANNEL_FREQ_2484 2484
88 #define CHANNEL_FREQ_2407 2407
89 #define CHANNEL_FREQ_2512 2512
90 #define CHANNEL_FREQ_5000 5000
91 #define CHANNEL_FREQ_4000 4000
92 #define CHANNEL_FREQ_5150 5150
93 #define FREQ_MULTIPLIER_CONST_5MHZ 5
94 #define FREQ_MULTIPLIER_CONST_20MHZ 20
95 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
96 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
97 #define RADIOTAP_CCK_CHANNEL 0x0020
98 #define RADIOTAP_OFDM_CHANNEL 0x0040
99 
100 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
101 #include <qdf_mc_timer.h>
102 
103 struct qdf_track_timer {
104 	qdf_mc_timer_t track_timer;
105 	qdf_atomic_t alloc_fail_cnt;
106 };
107 
108 static struct qdf_track_timer alloc_track_timer;
109 
110 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
111 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
112 #endif
113 
114 #ifdef NBUF_MEMORY_DEBUG
115 /* SMMU crash indication*/
116 static qdf_atomic_t smmu_crashed;
117 /* Number of nbuf not added to history*/
118 unsigned long g_histroy_add_drop;
119 #endif
120 
121 /* Packet Counter */
122 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
123 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
124 #ifdef QDF_NBUF_GLOBAL_COUNT
125 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
126 static qdf_atomic_t nbuf_count;
127 #endif
128 
129 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
130 static bool is_initial_mem_debug_disabled;
131 #endif
132 
133 /**
134  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
135  * @data: Pointer to network data buffer
136  *
137  * Get the IP header offset in case of 8021Q and 8021AD
138  * tag is present in L2 header.
139  *
140  * Return: IP header offset
141  */
142 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
143 {
144 	uint16_t ether_type;
145 
146 	ether_type = *(uint16_t *)(data +
147 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
148 
149 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
150 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
151 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
152 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
153 
154 	return QDF_NBUF_TRAC_IP_OFFSET;
155 }
156 
157 /**
158  *  __qdf_nbuf_get_ether_type - Get the ether type
159  * @data: Pointer to network data buffer
160  *
161  * Get the ether type in case of 8021Q and 8021AD tag
162  * is present in L2 header, e.g for the returned ether type
163  * value, if IPV4 data ether type 0x0800, return 0x0008.
164  *
165  * Return ether type.
166  */
167 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
168 {
169 	uint16_t ether_type;
170 
171 	ether_type = *(uint16_t *)(data +
172 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
173 
174 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
175 		ether_type = *(uint16_t *)(data +
176 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
177 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
178 		ether_type = *(uint16_t *)(data +
179 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
180 
181 	return ether_type;
182 }
183 
184 /**
185  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
186  *
187  * Return: none
188  */
189 void qdf_nbuf_tx_desc_count_display(void)
190 {
191 	qdf_debug("Current Snapshot of the Driver:");
192 	qdf_debug("Data Packets:");
193 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
194 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
195 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
196 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
198 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
199 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
204 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
205 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
207 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
208 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
209 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
210 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
211 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
212 	qdf_debug("Mgmt Packets:");
213 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
214 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
215 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
216 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
219 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
220 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
222 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
223 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
224 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
225 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
226 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
227 }
228 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
229 
230 /**
231  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
232  * @packet_type   : packet type either mgmt/data
233  * @current_state : layer at which the packet currently present
234  *
235  * Return: none
236  */
237 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
238 			uint8_t current_state)
239 {
240 	switch (packet_type) {
241 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
242 		nbuf_tx_mgmt[current_state]++;
243 		break;
244 	case QDF_NBUF_TX_PKT_DATA_TRACK:
245 		nbuf_tx_data[current_state]++;
246 		break;
247 	default:
248 		break;
249 	}
250 }
251 
252 /**
253  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
254  *
255  * Return: none
256  */
257 void qdf_nbuf_tx_desc_count_clear(void)
258 {
259 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
260 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
261 }
262 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
263 
264 /**
265  * qdf_nbuf_set_state() - Updates the packet state
266  * @nbuf:            network buffer
267  * @current_state :  layer at which the packet currently is
268  *
269  * This function updates the packet state to the layer at which the packet
270  * currently is
271  *
272  * Return: none
273  */
274 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
275 {
276 	/*
277 	 * Only Mgmt, Data Packets are tracked. WMI messages
278 	 * such as scan commands are not tracked
279 	 */
280 	uint8_t packet_type;
281 
282 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
283 
284 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
285 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
286 		return;
287 	}
288 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
289 	qdf_nbuf_tx_desc_count_update(packet_type,
290 					current_state);
291 }
292 qdf_export_symbol(qdf_nbuf_set_state);
293 
294 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
295 /**
296  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
297  *
298  * This function starts the alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 static inline void __qdf_nbuf_start_replenish_timer(void)
303 {
304 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
305 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
306 	    QDF_TIMER_STATE_RUNNING)
307 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
308 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
309 }
310 
311 /**
312  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
313  *
314  * This function stops the alloc fail replenish timer.
315  *
316  * Return: void
317  */
318 static inline void __qdf_nbuf_stop_replenish_timer(void)
319 {
320 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
321 		return;
322 
323 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
324 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
325 	    QDF_TIMER_STATE_RUNNING)
326 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
327 }
328 
329 /**
330  * qdf_replenish_expire_handler - Replenish expire handler
331  *
332  * This function triggers when the alloc fail replenish timer expires.
333  *
334  * Return: void
335  */
336 static void qdf_replenish_expire_handler(void *arg)
337 {
338 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
339 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
340 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
341 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
342 
343 		/* Error handling here */
344 	}
345 }
346 
347 /**
348  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
349  *
350  * This function initializes the nbuf alloc fail replenish timer.
351  *
352  * Return: void
353  */
354 void __qdf_nbuf_init_replenish_timer(void)
355 {
356 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
357 			  qdf_replenish_expire_handler, NULL);
358 }
359 
360 /**
361  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
362  *
363  * This function deinitializes the nbuf alloc fail replenish timer.
364  *
365  * Return: void
366  */
367 void __qdf_nbuf_deinit_replenish_timer(void)
368 {
369 	__qdf_nbuf_stop_replenish_timer();
370 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
371 }
372 
373 void qdf_nbuf_stop_replenish_timer(void)
374 {
375 	__qdf_nbuf_stop_replenish_timer();
376 }
377 #else
378 
379 static inline void __qdf_nbuf_start_replenish_timer(void) {}
380 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
381 void qdf_nbuf_stop_replenish_timer(void)
382 {
383 }
384 #endif
385 
386 /* globals do not need to be initialized to NULL/0 */
387 qdf_nbuf_trace_update_t qdf_trace_update_cb;
388 qdf_nbuf_free_t nbuf_free_cb;
389 
390 #ifdef QDF_NBUF_GLOBAL_COUNT
391 
392 /**
393  * __qdf_nbuf_count_get() - get nbuf global count
394  *
395  * Return: nbuf global count
396  */
397 int __qdf_nbuf_count_get(void)
398 {
399 	return qdf_atomic_read(&nbuf_count);
400 }
401 qdf_export_symbol(__qdf_nbuf_count_get);
402 
403 /**
404  * __qdf_nbuf_count_inc() - increment nbuf global count
405  *
406  * @buf: sk buff
407  *
408  * Return: void
409  */
410 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
411 {
412 	int num_nbuf = 1;
413 	qdf_nbuf_t ext_list;
414 
415 	if (qdf_likely(is_initial_mem_debug_disabled))
416 		return;
417 
418 	ext_list = qdf_nbuf_get_ext_list(nbuf);
419 
420 	/* Take care to account for frag_list */
421 	while (ext_list) {
422 		++num_nbuf;
423 		ext_list = qdf_nbuf_queue_next(ext_list);
424 	}
425 
426 	qdf_atomic_add(num_nbuf, &nbuf_count);
427 }
428 qdf_export_symbol(__qdf_nbuf_count_inc);
429 
430 /**
431  * __qdf_nbuf_count_dec() - decrement nbuf global count
432  *
433  * @buf: sk buff
434  *
435  * Return: void
436  */
437 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
438 {
439 	qdf_nbuf_t ext_list;
440 	int num_nbuf;
441 
442 	if (qdf_likely(is_initial_mem_debug_disabled))
443 		return;
444 
445 	if (qdf_nbuf_get_users(nbuf) > 1)
446 		return;
447 
448 	num_nbuf = 1;
449 
450 	/* Take care to account for frag_list */
451 	ext_list = qdf_nbuf_get_ext_list(nbuf);
452 	while (ext_list) {
453 		if (qdf_nbuf_get_users(ext_list) == 1)
454 			++num_nbuf;
455 		ext_list = qdf_nbuf_queue_next(ext_list);
456 	}
457 
458 	qdf_atomic_sub(num_nbuf, &nbuf_count);
459 }
460 qdf_export_symbol(__qdf_nbuf_count_dec);
461 #endif
462 
463 #ifdef NBUF_FRAG_MEMORY_DEBUG
464 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
465 {
466 	qdf_nbuf_t ext_list;
467 	uint32_t num_nr_frags;
468 	uint32_t total_num_nr_frags;
469 
470 	if (qdf_likely(is_initial_mem_debug_disabled))
471 		return;
472 
473 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
474 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
475 
476 	total_num_nr_frags = num_nr_frags;
477 
478 	/* Take into account the frags attached to frag_list */
479 	ext_list = qdf_nbuf_get_ext_list(nbuf);
480 	while (ext_list) {
481 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
482 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
483 		total_num_nr_frags += num_nr_frags;
484 		ext_list = qdf_nbuf_queue_next(ext_list);
485 	}
486 
487 	qdf_frag_count_inc(total_num_nr_frags);
488 }
489 
490 qdf_export_symbol(qdf_nbuf_frag_count_inc);
491 
492 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
493 {
494 	qdf_nbuf_t ext_list;
495 	uint32_t num_nr_frags;
496 	uint32_t total_num_nr_frags;
497 
498 	if (qdf_likely(is_initial_mem_debug_disabled))
499 		return;
500 
501 	if (qdf_nbuf_get_users(nbuf) > 1)
502 		return;
503 
504 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
505 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
506 
507 	total_num_nr_frags = num_nr_frags;
508 
509 	/* Take into account the frags attached to frag_list */
510 	ext_list = qdf_nbuf_get_ext_list(nbuf);
511 	while (ext_list) {
512 		if (qdf_nbuf_get_users(ext_list) == 1) {
513 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
514 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
515 			total_num_nr_frags += num_nr_frags;
516 		}
517 		ext_list = qdf_nbuf_queue_next(ext_list);
518 	}
519 
520 	qdf_frag_count_dec(total_num_nr_frags);
521 }
522 
523 qdf_export_symbol(qdf_nbuf_frag_count_dec);
524 
525 #endif
526 
527 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
528 	!defined(QCA_WIFI_QCN9000)
529 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
530 				 int align, int prio, const char *func,
531 				 uint32_t line)
532 {
533 	struct sk_buff *skb;
534 	unsigned long offset;
535 	uint32_t lowmem_alloc_tries = 0;
536 
537 	if (align)
538 		size += (align - 1);
539 
540 realloc:
541 	skb = dev_alloc_skb(size);
542 
543 	if (skb)
544 		goto skb_alloc;
545 
546 	skb = pld_nbuf_pre_alloc(size);
547 
548 	if (!skb) {
549 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
550 				size, func, line);
551 		return NULL;
552 	}
553 
554 skb_alloc:
555 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
556 	 * Though we are trying to reserve low memory upfront to prevent this,
557 	 * we sometimes see SKBs allocated from low memory.
558 	 */
559 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
560 		lowmem_alloc_tries++;
561 		if (lowmem_alloc_tries > 100) {
562 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
563 				     size, func, line);
564 			return NULL;
565 		} else {
566 			/* Not freeing to make sure it
567 			 * will not get allocated again
568 			 */
569 			goto realloc;
570 		}
571 	}
572 	memset(skb->cb, 0x0, sizeof(skb->cb));
573 
574 	/*
575 	 * The default is for netbuf fragments to be interpreted
576 	 * as wordstreams rather than bytestreams.
577 	 */
578 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
579 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
580 
581 	/*
582 	 * XXX:how about we reserve first then align
583 	 * Align & make sure that the tail & data are adjusted properly
584 	 */
585 
586 	if (align) {
587 		offset = ((unsigned long)skb->data) % align;
588 		if (offset)
589 			skb_reserve(skb, align - offset);
590 	}
591 
592 	/*
593 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
594 	 * pointer
595 	 */
596 	skb_reserve(skb, reserve);
597 	qdf_nbuf_count_inc(skb);
598 
599 	return skb;
600 }
601 #else
602 
603 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size)
604 {
605 	struct sk_buff *skb;
606 	int flags = GFP_KERNEL;
607 
608 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
609 		flags = GFP_ATOMIC;
610 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
611 		/*
612 		 * Observed that kcompactd burns out CPU to make order-3 page.
613 		 *__netdev_alloc_skb has 4k page fallback option just in case of
614 		 * failing high order page allocation so we don't need to be
615 		 * hard. Make kcompactd rest in piece.
616 		 */
617 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
618 #endif
619 	}
620 
621 	skb = __netdev_alloc_skb(NULL, size, flags);
622 
623 	if (skb)
624 		qdf_nbuf_count_inc(skb);
625 	else
626 		return NULL;
627 
628 	return skb;
629 }
630 
631 qdf_export_symbol(__qdf_nbuf_alloc_simple);
632 
633 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
634 				 int align, int prio, const char *func,
635 				 uint32_t line)
636 {
637 	struct sk_buff *skb;
638 	unsigned long offset;
639 	int flags = GFP_KERNEL;
640 
641 	if (align)
642 		size += (align - 1);
643 
644 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
645 		flags = GFP_ATOMIC;
646 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
647 		/*
648 		 * Observed that kcompactd burns out CPU to make order-3 page.
649 		 *__netdev_alloc_skb has 4k page fallback option just in case of
650 		 * failing high order page allocation so we don't need to be
651 		 * hard. Make kcompactd rest in piece.
652 		 */
653 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
654 #endif
655 	}
656 
657 	skb = __netdev_alloc_skb(NULL, size, flags);
658 
659 	if (skb)
660 		goto skb_alloc;
661 
662 	skb = pld_nbuf_pre_alloc(size);
663 
664 	if (!skb) {
665 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
666 				size, func, line);
667 		__qdf_nbuf_start_replenish_timer();
668 		return NULL;
669 	} else {
670 		__qdf_nbuf_stop_replenish_timer();
671 	}
672 
673 skb_alloc:
674 	memset(skb->cb, 0x0, sizeof(skb->cb));
675 
676 	/*
677 	 * The default is for netbuf fragments to be interpreted
678 	 * as wordstreams rather than bytestreams.
679 	 */
680 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
681 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
682 
683 	/*
684 	 * XXX:how about we reserve first then align
685 	 * Align & make sure that the tail & data are adjusted properly
686 	 */
687 
688 	if (align) {
689 		offset = ((unsigned long)skb->data) % align;
690 		if (offset)
691 			skb_reserve(skb, align - offset);
692 	}
693 
694 	/*
695 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
696 	 * pointer
697 	 */
698 	skb_reserve(skb, reserve);
699 	qdf_nbuf_count_inc(skb);
700 
701 	return skb;
702 }
703 #endif
704 qdf_export_symbol(__qdf_nbuf_alloc);
705 
706 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
707 					  const char *func, uint32_t line)
708 {
709 	qdf_nbuf_t nbuf;
710 	unsigned long offset;
711 
712 	if (align)
713 		size += (align - 1);
714 
715 	nbuf = alloc_skb(size, GFP_ATOMIC);
716 	if (!nbuf)
717 		goto ret_nbuf;
718 
719 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
720 
721 	skb_reserve(nbuf, reserve);
722 
723 	if (align) {
724 		offset = ((unsigned long)nbuf->data) % align;
725 		if (offset)
726 			skb_reserve(nbuf, align - offset);
727 	}
728 
729 	qdf_nbuf_count_inc(nbuf);
730 
731 ret_nbuf:
732 	return nbuf;
733 }
734 
735 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
736 
737 /**
738  * __qdf_nbuf_free() - free the nbuf its interrupt safe
739  * @skb: Pointer to network buffer
740  *
741  * Return: none
742  */
743 
744 void __qdf_nbuf_free(struct sk_buff *skb)
745 {
746 	if (pld_nbuf_pre_alloc_free(skb))
747 		return;
748 
749 	qdf_nbuf_frag_count_dec(skb);
750 
751 	qdf_nbuf_count_dec(skb);
752 	if (nbuf_free_cb)
753 		nbuf_free_cb(skb);
754 	else
755 		dev_kfree_skb_any(skb);
756 }
757 
758 qdf_export_symbol(__qdf_nbuf_free);
759 
760 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
761 {
762 	qdf_nbuf_t skb_new = NULL;
763 
764 	skb_new = skb_clone(skb, GFP_ATOMIC);
765 	if (skb_new) {
766 		qdf_nbuf_frag_count_inc(skb_new);
767 		qdf_nbuf_count_inc(skb_new);
768 	}
769 	return skb_new;
770 }
771 
772 qdf_export_symbol(__qdf_nbuf_clone);
773 
774 #ifdef NBUF_MEMORY_DEBUG
775 enum qdf_nbuf_event_type {
776 	QDF_NBUF_ALLOC,
777 	QDF_NBUF_ALLOC_CLONE,
778 	QDF_NBUF_ALLOC_COPY,
779 	QDF_NBUF_ALLOC_FAILURE,
780 	QDF_NBUF_FREE,
781 	QDF_NBUF_MAP,
782 	QDF_NBUF_UNMAP,
783 	QDF_NBUF_ALLOC_COPY_EXPAND,
784 };
785 
786 struct qdf_nbuf_event {
787 	qdf_nbuf_t nbuf;
788 	char func[QDF_MEM_FUNC_NAME_SIZE];
789 	uint32_t line;
790 	enum qdf_nbuf_event_type type;
791 	uint64_t timestamp;
792 	qdf_dma_addr_t iova;
793 };
794 
795 #ifndef QDF_NBUF_HISTORY_SIZE
796 #define QDF_NBUF_HISTORY_SIZE 4096
797 #endif
798 static qdf_atomic_t qdf_nbuf_history_index;
799 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
800 
801 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
802 {
803 	int32_t next = qdf_atomic_inc_return(index);
804 
805 	if (next == size)
806 		qdf_atomic_sub(size, index);
807 
808 	return next % size;
809 }
810 
811 static void
812 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
813 		     enum qdf_nbuf_event_type type)
814 {
815 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
816 						   QDF_NBUF_HISTORY_SIZE);
817 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
818 
819 	if (qdf_atomic_read(&smmu_crashed)) {
820 		g_histroy_add_drop++;
821 		return;
822 	}
823 
824 	event->nbuf = nbuf;
825 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
826 	event->line = line;
827 	event->type = type;
828 	event->timestamp = qdf_get_log_timestamp();
829 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP)
830 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
831 	else
832 		event->iova = 0;
833 }
834 
835 void qdf_set_smmu_fault_state(bool smmu_fault_state)
836 {
837 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
838 	if (!smmu_fault_state)
839 		g_histroy_add_drop = 0;
840 }
841 qdf_export_symbol(qdf_set_smmu_fault_state);
842 #endif /* NBUF_MEMORY_DEBUG */
843 
844 #ifdef NBUF_MAP_UNMAP_DEBUG
845 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
846 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
847 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
848 
849 static void qdf_nbuf_map_tracking_init(void)
850 {
851 	qdf_tracker_init(&qdf_nbuf_map_tracker);
852 }
853 
854 static void qdf_nbuf_map_tracking_deinit(void)
855 {
856 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
857 }
858 
859 static QDF_STATUS
860 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
861 {
862 	if (is_initial_mem_debug_disabled)
863 		return QDF_STATUS_SUCCESS;
864 
865 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
866 }
867 
868 static void
869 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
870 {
871 	if (is_initial_mem_debug_disabled)
872 		return;
873 
874 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
875 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
876 }
877 
878 void qdf_nbuf_map_check_for_leaks(void)
879 {
880 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
881 }
882 
883 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
884 			      qdf_nbuf_t buf,
885 			      qdf_dma_dir_t dir,
886 			      const char *func,
887 			      uint32_t line)
888 {
889 	QDF_STATUS status;
890 
891 	status = qdf_nbuf_track_map(buf, func, line);
892 	if (QDF_IS_STATUS_ERROR(status))
893 		return status;
894 
895 	status = __qdf_nbuf_map(osdev, buf, dir);
896 	if (QDF_IS_STATUS_ERROR(status)) {
897 		qdf_nbuf_untrack_map(buf, func, line);
898 	} else {
899 		if (!is_initial_mem_debug_disabled)
900 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
901 		qdf_net_buf_debug_update_map_node(buf, func, line);
902 	}
903 
904 	return status;
905 }
906 
907 qdf_export_symbol(qdf_nbuf_map_debug);
908 
909 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
910 			  qdf_nbuf_t buf,
911 			  qdf_dma_dir_t dir,
912 			  const char *func,
913 			  uint32_t line)
914 {
915 	qdf_nbuf_untrack_map(buf, func, line);
916 	__qdf_nbuf_unmap_single(osdev, buf, dir);
917 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
918 }
919 
920 qdf_export_symbol(qdf_nbuf_unmap_debug);
921 
922 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
923 				     qdf_nbuf_t buf,
924 				     qdf_dma_dir_t dir,
925 				     const char *func,
926 				     uint32_t line)
927 {
928 	QDF_STATUS status;
929 
930 	status = qdf_nbuf_track_map(buf, func, line);
931 	if (QDF_IS_STATUS_ERROR(status))
932 		return status;
933 
934 	status = __qdf_nbuf_map_single(osdev, buf, dir);
935 	if (QDF_IS_STATUS_ERROR(status)) {
936 		qdf_nbuf_untrack_map(buf, func, line);
937 	} else {
938 		if (!is_initial_mem_debug_disabled)
939 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
940 		qdf_net_buf_debug_update_map_node(buf, func, line);
941 	}
942 
943 	return status;
944 }
945 
946 qdf_export_symbol(qdf_nbuf_map_single_debug);
947 
948 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
949 				 qdf_nbuf_t buf,
950 				 qdf_dma_dir_t dir,
951 				 const char *func,
952 				 uint32_t line)
953 {
954 	qdf_nbuf_untrack_map(buf, func, line);
955 	__qdf_nbuf_unmap_single(osdev, buf, dir);
956 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
957 }
958 
959 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
960 
961 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
962 				     qdf_nbuf_t buf,
963 				     qdf_dma_dir_t dir,
964 				     int nbytes,
965 				     const char *func,
966 				     uint32_t line)
967 {
968 	QDF_STATUS status;
969 
970 	status = qdf_nbuf_track_map(buf, func, line);
971 	if (QDF_IS_STATUS_ERROR(status))
972 		return status;
973 
974 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
975 	if (QDF_IS_STATUS_ERROR(status)) {
976 		qdf_nbuf_untrack_map(buf, func, line);
977 	} else {
978 		if (!is_initial_mem_debug_disabled)
979 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
980 		qdf_net_buf_debug_update_map_node(buf, func, line);
981 	}
982 
983 	return status;
984 }
985 
986 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
987 
988 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
989 				 qdf_nbuf_t buf,
990 				 qdf_dma_dir_t dir,
991 				 int nbytes,
992 				 const char *func,
993 				 uint32_t line)
994 {
995 	qdf_nbuf_untrack_map(buf, func, line);
996 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
997 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
998 }
999 
1000 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
1001 
1002 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
1003 					    qdf_nbuf_t buf,
1004 					    qdf_dma_dir_t dir,
1005 					    int nbytes,
1006 					    const char *func,
1007 					    uint32_t line)
1008 {
1009 	QDF_STATUS status;
1010 
1011 	status = qdf_nbuf_track_map(buf, func, line);
1012 	if (QDF_IS_STATUS_ERROR(status))
1013 		return status;
1014 
1015 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
1016 	if (QDF_IS_STATUS_ERROR(status)) {
1017 		qdf_nbuf_untrack_map(buf, func, line);
1018 	} else {
1019 		if (!is_initial_mem_debug_disabled)
1020 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1021 		qdf_net_buf_debug_update_map_node(buf, func, line);
1022 	}
1023 
1024 	return status;
1025 }
1026 
1027 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
1028 
1029 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
1030 					qdf_nbuf_t buf,
1031 					qdf_dma_dir_t dir,
1032 					int nbytes,
1033 					const char *func,
1034 					uint32_t line)
1035 {
1036 	qdf_nbuf_untrack_map(buf, func, line);
1037 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
1038 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1039 }
1040 
1041 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1042 
1043 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1044 					      qdf_nbuf_t buf,
1045 					      qdf_dma_addr_t phy_addr,
1046 					      qdf_dma_dir_t dir, int nbytes,
1047 					      const char *func, uint32_t line)
1048 {
1049 	qdf_nbuf_untrack_map(buf, func, line);
1050 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1051 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1052 }
1053 
1054 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1055 
1056 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1057 					     const char *func,
1058 					     uint32_t line)
1059 {
1060 	char map_func[QDF_TRACKER_FUNC_SIZE];
1061 	uint32_t map_line;
1062 
1063 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1064 				&map_func, &map_line))
1065 		return;
1066 
1067 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1068 			   func, line, map_func, map_line);
1069 }
1070 #else
1071 static inline void qdf_nbuf_map_tracking_init(void)
1072 {
1073 }
1074 
1075 static inline void qdf_nbuf_map_tracking_deinit(void)
1076 {
1077 }
1078 
1079 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1080 						    const char *func,
1081 						    uint32_t line)
1082 {
1083 }
1084 #endif /* NBUF_MAP_UNMAP_DEBUG */
1085 
1086 /**
1087  * __qdf_nbuf_map() - map a buffer to local bus address space
1088  * @osdev: OS device
1089  * @bmap: Bitmap
1090  * @skb: Pointer to network buffer
1091  * @dir: Direction
1092  *
1093  * Return: QDF_STATUS
1094  */
1095 #ifdef QDF_OS_DEBUG
1096 QDF_STATUS
1097 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1098 {
1099 	struct skb_shared_info *sh = skb_shinfo(skb);
1100 
1101 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1102 			|| (dir == QDF_DMA_FROM_DEVICE));
1103 
1104 	/*
1105 	 * Assume there's only a single fragment.
1106 	 * To support multiple fragments, it would be necessary to change
1107 	 * qdf_nbuf_t to be a separate object that stores meta-info
1108 	 * (including the bus address for each fragment) and a pointer
1109 	 * to the underlying sk_buff.
1110 	 */
1111 	qdf_assert(sh->nr_frags == 0);
1112 
1113 	return __qdf_nbuf_map_single(osdev, skb, dir);
1114 }
1115 qdf_export_symbol(__qdf_nbuf_map);
1116 
1117 #else
1118 QDF_STATUS
1119 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1120 {
1121 	return __qdf_nbuf_map_single(osdev, skb, dir);
1122 }
1123 qdf_export_symbol(__qdf_nbuf_map);
1124 #endif
1125 /**
1126  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1127  * @osdev: OS device
1128  * @skb: Pointer to network buffer
1129  * @dir: dma direction
1130  *
1131  * Return: none
1132  */
1133 void
1134 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1135 			qdf_dma_dir_t dir)
1136 {
1137 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1138 		   || (dir == QDF_DMA_FROM_DEVICE));
1139 
1140 	/*
1141 	 * Assume there's a single fragment.
1142 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1143 	 */
1144 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1145 }
1146 qdf_export_symbol(__qdf_nbuf_unmap);
1147 
1148 /**
1149  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1150  * @osdev: OS device
1151  * @skb: Pointer to network buffer
1152  * @dir: Direction
1153  *
1154  * Return: QDF_STATUS
1155  */
1156 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1157 QDF_STATUS
1158 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1159 {
1160 	qdf_dma_addr_t paddr;
1161 
1162 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1163 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1164 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1165 	return QDF_STATUS_SUCCESS;
1166 }
1167 qdf_export_symbol(__qdf_nbuf_map_single);
1168 #else
1169 QDF_STATUS
1170 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1171 {
1172 	qdf_dma_addr_t paddr;
1173 
1174 	/* assume that the OS only provides a single fragment */
1175 	QDF_NBUF_CB_PADDR(buf) = paddr =
1176 		dma_map_single(osdev->dev, buf->data,
1177 				skb_end_pointer(buf) - buf->data,
1178 				__qdf_dma_dir_to_os(dir));
1179 	__qdf_record_nbuf_nbytes(
1180 		__qdf_nbuf_get_end_offset(buf), dir, true);
1181 	return dma_mapping_error(osdev->dev, paddr)
1182 		? QDF_STATUS_E_FAILURE
1183 		: QDF_STATUS_SUCCESS;
1184 }
1185 qdf_export_symbol(__qdf_nbuf_map_single);
1186 #endif
1187 /**
1188  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1189  * @osdev: OS device
1190  * @skb: Pointer to network buffer
1191  * @dir: Direction
1192  *
1193  * Return: none
1194  */
1195 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1196 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1197 				qdf_dma_dir_t dir)
1198 {
1199 }
1200 #else
1201 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1202 					qdf_dma_dir_t dir)
1203 {
1204 	if (QDF_NBUF_CB_PADDR(buf)) {
1205 		__qdf_record_nbuf_nbytes(
1206 			__qdf_nbuf_get_end_offset(buf), dir, false);
1207 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1208 			skb_end_pointer(buf) - buf->data,
1209 			__qdf_dma_dir_to_os(dir));
1210 	}
1211 }
1212 #endif
1213 qdf_export_symbol(__qdf_nbuf_unmap_single);
1214 
1215 /**
1216  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1217  * @skb: Pointer to network buffer
1218  * @cksum: Pointer to checksum value
1219  *
1220  * Return: QDF_STATUS
1221  */
1222 QDF_STATUS
1223 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1224 {
1225 	switch (cksum->l4_result) {
1226 	case QDF_NBUF_RX_CKSUM_NONE:
1227 		skb->ip_summed = CHECKSUM_NONE;
1228 		break;
1229 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1230 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1231 		break;
1232 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1233 		skb->ip_summed = CHECKSUM_PARTIAL;
1234 		skb->csum = cksum->val;
1235 		break;
1236 	default:
1237 		pr_err("Unknown checksum type\n");
1238 		qdf_assert(0);
1239 		return QDF_STATUS_E_NOSUPPORT;
1240 	}
1241 	return QDF_STATUS_SUCCESS;
1242 }
1243 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1244 
1245 /**
1246  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1247  * @skb: Pointer to network buffer
1248  *
1249  * Return: TX checksum value
1250  */
1251 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1252 {
1253 	switch (skb->ip_summed) {
1254 	case CHECKSUM_NONE:
1255 		return QDF_NBUF_TX_CKSUM_NONE;
1256 	case CHECKSUM_PARTIAL:
1257 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1258 	case CHECKSUM_COMPLETE:
1259 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1260 	default:
1261 		return QDF_NBUF_TX_CKSUM_NONE;
1262 	}
1263 }
1264 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1265 
1266 /**
1267  * __qdf_nbuf_get_tid() - get tid
1268  * @skb: Pointer to network buffer
1269  *
1270  * Return: tid
1271  */
1272 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1273 {
1274 	return skb->priority;
1275 }
1276 qdf_export_symbol(__qdf_nbuf_get_tid);
1277 
1278 /**
1279  * __qdf_nbuf_set_tid() - set tid
1280  * @skb: Pointer to network buffer
1281  *
1282  * Return: none
1283  */
1284 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1285 {
1286 	skb->priority = tid;
1287 }
1288 qdf_export_symbol(__qdf_nbuf_set_tid);
1289 
1290 /**
1291  * __qdf_nbuf_set_tid() - set tid
1292  * @skb: Pointer to network buffer
1293  *
1294  * Return: none
1295  */
1296 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1297 {
1298 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1299 }
1300 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1301 
1302 /**
1303  * __qdf_nbuf_reg_trace_cb() - register trace callback
1304  * @cb_func_ptr: Pointer to trace callback function
1305  *
1306  * Return: none
1307  */
1308 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1309 {
1310 	qdf_trace_update_cb = cb_func_ptr;
1311 }
1312 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1313 
1314 /**
1315  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1316  *              of DHCP packet.
1317  * @data: Pointer to DHCP packet data buffer
1318  *
1319  * This func. returns the subtype of DHCP packet.
1320  *
1321  * Return: subtype of the DHCP packet.
1322  */
1323 enum qdf_proto_subtype
1324 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1325 {
1326 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1327 
1328 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1329 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1330 					QDF_DHCP_OPTION53_LENGTH)) {
1331 
1332 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1333 		case QDF_DHCP_DISCOVER:
1334 			subtype = QDF_PROTO_DHCP_DISCOVER;
1335 			break;
1336 		case QDF_DHCP_REQUEST:
1337 			subtype = QDF_PROTO_DHCP_REQUEST;
1338 			break;
1339 		case QDF_DHCP_OFFER:
1340 			subtype = QDF_PROTO_DHCP_OFFER;
1341 			break;
1342 		case QDF_DHCP_ACK:
1343 			subtype = QDF_PROTO_DHCP_ACK;
1344 			break;
1345 		case QDF_DHCP_NAK:
1346 			subtype = QDF_PROTO_DHCP_NACK;
1347 			break;
1348 		case QDF_DHCP_RELEASE:
1349 			subtype = QDF_PROTO_DHCP_RELEASE;
1350 			break;
1351 		case QDF_DHCP_INFORM:
1352 			subtype = QDF_PROTO_DHCP_INFORM;
1353 			break;
1354 		case QDF_DHCP_DECLINE:
1355 			subtype = QDF_PROTO_DHCP_DECLINE;
1356 			break;
1357 		default:
1358 			break;
1359 		}
1360 	}
1361 
1362 	return subtype;
1363 }
1364 
1365 /**
1366  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1367  *            of EAPOL packet.
1368  * @data: Pointer to EAPOL packet data buffer
1369  *
1370  * This func. returns the subtype of EAPOL packet.
1371  *
1372  * Return: subtype of the EAPOL packet.
1373  */
1374 enum qdf_proto_subtype
1375 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1376 {
1377 	uint16_t eapol_key_info;
1378 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1379 	uint16_t mask;
1380 
1381 	eapol_key_info = (uint16_t)(*(uint16_t *)
1382 			(data + EAPOL_KEY_INFO_OFFSET));
1383 
1384 	mask = eapol_key_info & EAPOL_MASK;
1385 	switch (mask) {
1386 	case EAPOL_M1_BIT_MASK:
1387 		subtype = QDF_PROTO_EAPOL_M1;
1388 		break;
1389 	case EAPOL_M2_BIT_MASK:
1390 		subtype = QDF_PROTO_EAPOL_M2;
1391 		break;
1392 	case EAPOL_M3_BIT_MASK:
1393 		subtype = QDF_PROTO_EAPOL_M3;
1394 		break;
1395 	case EAPOL_M4_BIT_MASK:
1396 		subtype = QDF_PROTO_EAPOL_M4;
1397 		break;
1398 	default:
1399 		break;
1400 	}
1401 
1402 	return subtype;
1403 }
1404 
1405 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1406 
1407 /**
1408  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1409  *            of ARP packet.
1410  * @data: Pointer to ARP packet data buffer
1411  *
1412  * This func. returns the subtype of ARP packet.
1413  *
1414  * Return: subtype of the ARP packet.
1415  */
1416 enum qdf_proto_subtype
1417 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1418 {
1419 	uint16_t subtype;
1420 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1421 
1422 	subtype = (uint16_t)(*(uint16_t *)
1423 			(data + ARP_SUB_TYPE_OFFSET));
1424 
1425 	switch (QDF_SWAP_U16(subtype)) {
1426 	case ARP_REQUEST:
1427 		proto_subtype = QDF_PROTO_ARP_REQ;
1428 		break;
1429 	case ARP_RESPONSE:
1430 		proto_subtype = QDF_PROTO_ARP_RES;
1431 		break;
1432 	default:
1433 		break;
1434 	}
1435 
1436 	return proto_subtype;
1437 }
1438 
1439 /**
1440  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1441  *            of IPV4 ICMP packet.
1442  * @data: Pointer to IPV4 ICMP packet data buffer
1443  *
1444  * This func. returns the subtype of ICMP packet.
1445  *
1446  * Return: subtype of the ICMP packet.
1447  */
1448 enum qdf_proto_subtype
1449 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1450 {
1451 	uint8_t subtype;
1452 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1453 
1454 	subtype = (uint8_t)(*(uint8_t *)
1455 			(data + ICMP_SUBTYPE_OFFSET));
1456 
1457 	switch (subtype) {
1458 	case ICMP_REQUEST:
1459 		proto_subtype = QDF_PROTO_ICMP_REQ;
1460 		break;
1461 	case ICMP_RESPONSE:
1462 		proto_subtype = QDF_PROTO_ICMP_RES;
1463 		break;
1464 	default:
1465 		break;
1466 	}
1467 
1468 	return proto_subtype;
1469 }
1470 
1471 /**
1472  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1473  *            of IPV6 ICMPV6 packet.
1474  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1475  *
1476  * This func. returns the subtype of ICMPV6 packet.
1477  *
1478  * Return: subtype of the ICMPV6 packet.
1479  */
1480 enum qdf_proto_subtype
1481 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1482 {
1483 	uint8_t subtype;
1484 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1485 
1486 	subtype = (uint8_t)(*(uint8_t *)
1487 			(data + ICMPV6_SUBTYPE_OFFSET));
1488 
1489 	switch (subtype) {
1490 	case ICMPV6_REQUEST:
1491 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1492 		break;
1493 	case ICMPV6_RESPONSE:
1494 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1495 		break;
1496 	case ICMPV6_RS:
1497 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1498 		break;
1499 	case ICMPV6_RA:
1500 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1501 		break;
1502 	case ICMPV6_NS:
1503 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1504 		break;
1505 	case ICMPV6_NA:
1506 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1507 		break;
1508 	default:
1509 		break;
1510 	}
1511 
1512 	return proto_subtype;
1513 }
1514 
1515 /**
1516  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1517  *            of IPV4 packet.
1518  * @data: Pointer to IPV4 packet data buffer
1519  *
1520  * This func. returns the proto type of IPV4 packet.
1521  *
1522  * Return: proto type of IPV4 packet.
1523  */
1524 uint8_t
1525 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1526 {
1527 	uint8_t proto_type;
1528 
1529 	proto_type = (uint8_t)(*(uint8_t *)(data +
1530 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1531 	return proto_type;
1532 }
1533 
1534 /**
1535  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1536  *            of IPV6 packet.
1537  * @data: Pointer to IPV6 packet data buffer
1538  *
1539  * This func. returns the proto type of IPV6 packet.
1540  *
1541  * Return: proto type of IPV6 packet.
1542  */
1543 uint8_t
1544 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1545 {
1546 	uint8_t proto_type;
1547 
1548 	proto_type = (uint8_t)(*(uint8_t *)(data +
1549 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1550 	return proto_type;
1551 }
1552 
1553 /**
1554  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1555  * @data: Pointer to network data
1556  *
1557  * This api is for Tx packets.
1558  *
1559  * Return: true if packet is ipv4 packet
1560  *	   false otherwise
1561  */
1562 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1563 {
1564 	uint16_t ether_type;
1565 
1566 	ether_type = (uint16_t)(*(uint16_t *)(data +
1567 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1568 
1569 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1570 		return true;
1571 	else
1572 		return false;
1573 }
1574 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1575 
1576 /**
1577  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1578  * @data: Pointer to network data buffer
1579  *
1580  * This api is for ipv4 packet.
1581  *
1582  * Return: true if packet is DHCP packet
1583  *	   false otherwise
1584  */
1585 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1586 {
1587 	uint16_t sport;
1588 	uint16_t dport;
1589 	uint8_t ipv4_offset;
1590 	uint8_t ipv4_hdr_len;
1591 	struct iphdr *iphdr;
1592 
1593 	if (__qdf_nbuf_get_ether_type(data) !=
1594 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1595 		return false;
1596 
1597 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1598 	iphdr = (struct iphdr *)(data + ipv4_offset);
1599 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1600 
1601 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1602 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1603 			      sizeof(uint16_t));
1604 
1605 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1606 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1607 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1608 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1609 		return true;
1610 	else
1611 		return false;
1612 }
1613 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1614 
1615 /**
1616  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1617  * @data: Pointer to network data buffer
1618  *
1619  * This api is for ipv4 packet.
1620  *
1621  * Return: true if packet is EAPOL packet
1622  *	   false otherwise.
1623  */
1624 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1625 {
1626 	uint16_t ether_type;
1627 
1628 	ether_type = __qdf_nbuf_get_ether_type(data);
1629 
1630 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1631 		return true;
1632 	else
1633 		return false;
1634 }
1635 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1636 
1637 /**
1638  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1639  * @skb: Pointer to network buffer
1640  *
1641  * This api is for ipv4 packet.
1642  *
1643  * Return: true if packet is WAPI packet
1644  *	   false otherwise.
1645  */
1646 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1647 {
1648 	uint16_t ether_type;
1649 
1650 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1651 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1652 
1653 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1654 		return true;
1655 	else
1656 		return false;
1657 }
1658 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1659 
1660 /**
1661  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
1662  * @data: Pointer to network data buffer
1663  *
1664  * This api is for ipv4 packet.
1665  *
1666  * Return: true if packet is igmp packet
1667  *	   false otherwise.
1668  */
1669 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1670 {
1671 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1672 		uint8_t pkt_type;
1673 
1674 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1675 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1676 
1677 		if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
1678 			return true;
1679 	}
1680 	return false;
1681 }
1682 
1683 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
1684 
1685 /**
1686  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
1687  * @data: Pointer to network data buffer
1688  *
1689  * This api is for ipv6 packet.
1690  *
1691  * Return: true if packet is igmp packet
1692  *	   false otherwise.
1693  */
1694 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
1695 {
1696 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1697 		uint8_t pkt_type;
1698 		uint8_t next_hdr;
1699 
1700 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1701 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1702 		next_hdr = (uint8_t)(*(uint8_t *)(data +
1703 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
1704 
1705 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1706 			return true;
1707 		if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
1708 		    (next_hdr == QDF_NBUF_TRAC_HOPOPTS_TYPE))
1709 			return true;
1710 	}
1711 	return false;
1712 }
1713 
1714 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
1715 
1716 /**
1717  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1718  * @skb: Pointer to network buffer
1719  *
1720  * This api is for ipv4 packet.
1721  *
1722  * Return: true if packet is tdls packet
1723  *	   false otherwise.
1724  */
1725 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1726 {
1727 	uint16_t ether_type;
1728 
1729 	ether_type = *(uint16_t *)(skb->data +
1730 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1731 
1732 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1733 		return true;
1734 	else
1735 		return false;
1736 }
1737 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1738 
1739 /**
1740  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1741  * @data: Pointer to network data buffer
1742  *
1743  * This api is for ipv4 packet.
1744  *
1745  * Return: true if packet is ARP packet
1746  *	   false otherwise.
1747  */
1748 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1749 {
1750 	uint16_t ether_type;
1751 
1752 	ether_type = __qdf_nbuf_get_ether_type(data);
1753 
1754 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1755 		return true;
1756 	else
1757 		return false;
1758 }
1759 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1760 
1761 /**
1762  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1763  * @data: Pointer to network data buffer
1764  *
1765  * This api is for ipv4 packet.
1766  *
1767  * Return: true if packet is ARP request
1768  *	   false otherwise.
1769  */
1770 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1771 {
1772 	uint16_t op_code;
1773 
1774 	op_code = (uint16_t)(*(uint16_t *)(data +
1775 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1776 
1777 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1778 		return true;
1779 	return false;
1780 }
1781 
1782 /**
1783  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1784  * @data: Pointer to network data buffer
1785  *
1786  * This api is for ipv4 packet.
1787  *
1788  * Return: true if packet is ARP response
1789  *	   false otherwise.
1790  */
1791 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1792 {
1793 	uint16_t op_code;
1794 
1795 	op_code = (uint16_t)(*(uint16_t *)(data +
1796 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1797 
1798 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1799 		return true;
1800 	return false;
1801 }
1802 
1803 /**
1804  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1805  * @data: Pointer to network data buffer
1806  *
1807  * This api is for ipv4 packet.
1808  *
1809  * Return: ARP packet source IP value.
1810  */
1811 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1812 {
1813 	uint32_t src_ip;
1814 
1815 	src_ip = (uint32_t)(*(uint32_t *)(data +
1816 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1817 
1818 	return src_ip;
1819 }
1820 
1821 /**
1822  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1823  * @data: Pointer to network data buffer
1824  *
1825  * This api is for ipv4 packet.
1826  *
1827  * Return: ARP packet target IP value.
1828  */
1829 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1830 {
1831 	uint32_t tgt_ip;
1832 
1833 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1834 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1835 
1836 	return tgt_ip;
1837 }
1838 
1839 /**
1840  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1841  * @data: Pointer to network data buffer
1842  * @len: length to copy
1843  *
1844  * This api is for dns domain name
1845  *
1846  * Return: dns domain name.
1847  */
1848 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1849 {
1850 	uint8_t *domain_name;
1851 
1852 	domain_name = (uint8_t *)
1853 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1854 	return domain_name;
1855 }
1856 
1857 
1858 /**
1859  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1860  * @data: Pointer to network data buffer
1861  *
1862  * This api is for dns query packet.
1863  *
1864  * Return: true if packet is dns query packet.
1865  *	   false otherwise.
1866  */
1867 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1868 {
1869 	uint16_t op_code;
1870 	uint16_t tgt_port;
1871 
1872 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1873 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1874 	/* Standard DNS query always happen on Dest Port 53. */
1875 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1876 		op_code = (uint16_t)(*(uint16_t *)(data +
1877 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1878 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1879 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1880 			return true;
1881 	}
1882 	return false;
1883 }
1884 
1885 /**
1886  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1887  * @data: Pointer to network data buffer
1888  *
1889  * This api is for dns query response.
1890  *
1891  * Return: true if packet is dns response packet.
1892  *	   false otherwise.
1893  */
1894 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1895 {
1896 	uint16_t op_code;
1897 	uint16_t src_port;
1898 
1899 	src_port = (uint16_t)(*(uint16_t *)(data +
1900 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1901 	/* Standard DNS response always comes on Src Port 53. */
1902 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1903 		op_code = (uint16_t)(*(uint16_t *)(data +
1904 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1905 
1906 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1907 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1908 			return true;
1909 	}
1910 	return false;
1911 }
1912 
1913 /**
1914  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
1915  * @data: Pointer to network data buffer
1916  *
1917  * This api is to check if the packet is tcp fin.
1918  *
1919  * Return: true if packet is tcp fin packet.
1920  *         false otherwise.
1921  */
1922 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
1923 {
1924 	uint8_t op_code;
1925 
1926 	op_code = (uint8_t)(*(uint8_t *)(data +
1927 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1928 
1929 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
1930 		return true;
1931 
1932 	return false;
1933 }
1934 
1935 /**
1936  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
1937  * @data: Pointer to network data buffer
1938  *
1939  * This api is to check if the tcp packet is fin ack.
1940  *
1941  * Return: true if packet is tcp fin ack packet.
1942  *         false otherwise.
1943  */
1944 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
1945 {
1946 	uint8_t op_code;
1947 
1948 	op_code = (uint8_t)(*(uint8_t *)(data +
1949 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1950 
1951 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
1952 		return true;
1953 
1954 	return false;
1955 }
1956 
1957 /**
1958  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1959  * @data: Pointer to network data buffer
1960  *
1961  * This api is for tcp syn packet.
1962  *
1963  * Return: true if packet is tcp syn packet.
1964  *	   false otherwise.
1965  */
1966 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1967 {
1968 	uint8_t op_code;
1969 
1970 	op_code = (uint8_t)(*(uint8_t *)(data +
1971 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1972 
1973 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1974 		return true;
1975 	return false;
1976 }
1977 
1978 /**
1979  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1980  * @data: Pointer to network data buffer
1981  *
1982  * This api is for tcp syn ack packet.
1983  *
1984  * Return: true if packet is tcp syn ack packet.
1985  *	   false otherwise.
1986  */
1987 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1988 {
1989 	uint8_t op_code;
1990 
1991 	op_code = (uint8_t)(*(uint8_t *)(data +
1992 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1993 
1994 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1995 		return true;
1996 	return false;
1997 }
1998 
1999 /**
2000  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
2001  * @data: Pointer to network data buffer
2002  *
2003  * This api is to check if the tcp packet is rst.
2004  *
2005  * Return: true if packet is tcp rst packet.
2006  *         false otherwise.
2007  */
2008 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2009 {
2010 	uint8_t op_code;
2011 
2012 	op_code = (uint8_t)(*(uint8_t *)(data +
2013 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2014 
2015 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2016 		return true;
2017 
2018 	return false;
2019 }
2020 
2021 /**
2022  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
2023  * @data: Pointer to network data buffer
2024  *
2025  * This api is for tcp ack packet.
2026  *
2027  * Return: true if packet is tcp ack packet.
2028  *	   false otherwise.
2029  */
2030 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2031 {
2032 	uint8_t op_code;
2033 
2034 	op_code = (uint8_t)(*(uint8_t *)(data +
2035 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2036 
2037 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2038 		return true;
2039 	return false;
2040 }
2041 
2042 /**
2043  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
2044  * @data: Pointer to network data buffer
2045  *
2046  * This api is for tcp packet.
2047  *
2048  * Return: tcp source port value.
2049  */
2050 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2051 {
2052 	uint16_t src_port;
2053 
2054 	src_port = (uint16_t)(*(uint16_t *)(data +
2055 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2056 
2057 	return src_port;
2058 }
2059 
2060 /**
2061  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
2062  * @data: Pointer to network data buffer
2063  *
2064  * This api is for tcp packet.
2065  *
2066  * Return: tcp destination port value.
2067  */
2068 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2069 {
2070 	uint16_t tgt_port;
2071 
2072 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2073 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2074 
2075 	return tgt_port;
2076 }
2077 
2078 /**
2079  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
2080  * @data: Pointer to network data buffer
2081  *
2082  * This api is for ipv4 req packet.
2083  *
2084  * Return: true if packet is icmpv4 request
2085  *	   false otherwise.
2086  */
2087 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2088 {
2089 	uint8_t op_code;
2090 
2091 	op_code = (uint8_t)(*(uint8_t *)(data +
2092 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2093 
2094 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2095 		return true;
2096 	return false;
2097 }
2098 
2099 /**
2100  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
2101  * @data: Pointer to network data buffer
2102  *
2103  * This api is for ipv4 res packet.
2104  *
2105  * Return: true if packet is icmpv4 response
2106  *	   false otherwise.
2107  */
2108 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2109 {
2110 	uint8_t op_code;
2111 
2112 	op_code = (uint8_t)(*(uint8_t *)(data +
2113 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2114 
2115 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2116 		return true;
2117 	return false;
2118 }
2119 
2120 /**
2121  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
2122  * @data: Pointer to network data buffer
2123  *
2124  * This api is for ipv4 packet.
2125  *
2126  * Return: icmpv4 packet source IP value.
2127  */
2128 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2129 {
2130 	uint32_t src_ip;
2131 
2132 	src_ip = (uint32_t)(*(uint32_t *)(data +
2133 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2134 
2135 	return src_ip;
2136 }
2137 
2138 /**
2139  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
2140  * @data: Pointer to network data buffer
2141  *
2142  * This api is for ipv4 packet.
2143  *
2144  * Return: icmpv4 packet target IP value.
2145  */
2146 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2147 {
2148 	uint32_t tgt_ip;
2149 
2150 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2151 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2152 
2153 	return tgt_ip;
2154 }
2155 
2156 
2157 /**
2158  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
2159  * @data: Pointer to IPV6 packet data buffer
2160  *
2161  * This func. checks whether it is a IPV6 packet or not.
2162  *
2163  * Return: TRUE if it is a IPV6 packet
2164  *         FALSE if not
2165  */
2166 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2167 {
2168 	uint16_t ether_type;
2169 
2170 	ether_type = (uint16_t)(*(uint16_t *)(data +
2171 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2172 
2173 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2174 		return true;
2175 	else
2176 		return false;
2177 }
2178 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2179 
2180 /**
2181  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
2182  * @data: Pointer to network data buffer
2183  *
2184  * This api is for ipv6 packet.
2185  *
2186  * Return: true if packet is DHCP packet
2187  *	   false otherwise
2188  */
2189 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2190 {
2191 	uint16_t sport;
2192 	uint16_t dport;
2193 	uint8_t ipv6_offset;
2194 
2195 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2196 		return false;
2197 
2198 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2199 	sport = *(uint16_t *)(data + ipv6_offset +
2200 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2201 	dport = *(uint16_t *)(data + ipv6_offset +
2202 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2203 			      sizeof(uint16_t));
2204 
2205 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2206 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2207 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2208 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2209 		return true;
2210 	else
2211 		return false;
2212 }
2213 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2214 
2215 /**
2216  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2217  * @data: Pointer to network data buffer
2218  *
2219  * This api is for ipv6 packet.
2220  *
2221  * Return: true if packet is MDNS packet
2222  *	   false otherwise
2223  */
2224 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2225 {
2226 	uint16_t sport;
2227 	uint16_t dport;
2228 
2229 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2230 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2231 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2232 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2233 					sizeof(uint16_t));
2234 
2235 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2236 	    dport == sport)
2237 		return true;
2238 	else
2239 		return false;
2240 }
2241 
2242 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2243 
2244 /**
2245  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2246  * @data: Pointer to IPV4 packet data buffer
2247  *
2248  * This func. checks whether it is a IPV4 multicast packet or not.
2249  *
2250  * Return: TRUE if it is a IPV4 multicast packet
2251  *         FALSE if not
2252  */
2253 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2254 {
2255 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2256 		uint32_t *dst_addr =
2257 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2258 
2259 		/*
2260 		 * Check first word of the IPV4 address and if it is
2261 		 * equal to 0xE then it represents multicast IP.
2262 		 */
2263 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2264 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2265 			return true;
2266 		else
2267 			return false;
2268 	} else
2269 		return false;
2270 }
2271 
2272 /**
2273  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2274  * @data: Pointer to IPV6 packet data buffer
2275  *
2276  * This func. checks whether it is a IPV6 multicast packet or not.
2277  *
2278  * Return: TRUE if it is a IPV6 multicast packet
2279  *         FALSE if not
2280  */
2281 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2282 {
2283 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2284 		uint16_t *dst_addr;
2285 
2286 		dst_addr = (uint16_t *)
2287 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2288 
2289 		/*
2290 		 * Check first byte of the IP address and if it
2291 		 * 0xFF00 then it is a IPV6 mcast packet.
2292 		 */
2293 		if (*dst_addr ==
2294 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2295 			return true;
2296 		else
2297 			return false;
2298 	} else
2299 		return false;
2300 }
2301 
2302 /**
2303  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2304  * @data: Pointer to IPV4 ICMP packet data buffer
2305  *
2306  * This func. checks whether it is a ICMP packet or not.
2307  *
2308  * Return: TRUE if it is a ICMP packet
2309  *         FALSE if not
2310  */
2311 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2312 {
2313 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2314 		uint8_t pkt_type;
2315 
2316 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2317 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2318 
2319 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2320 			return true;
2321 		else
2322 			return false;
2323 	} else
2324 		return false;
2325 }
2326 
2327 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2328 
2329 /**
2330  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2331  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2332  *
2333  * This func. checks whether it is a ICMPV6 packet or not.
2334  *
2335  * Return: TRUE if it is a ICMPV6 packet
2336  *         FALSE if not
2337  */
2338 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2339 {
2340 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2341 		uint8_t pkt_type;
2342 
2343 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2344 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2345 
2346 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2347 			return true;
2348 		else
2349 			return false;
2350 	} else
2351 		return false;
2352 }
2353 
2354 /**
2355  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2356  * @data: Pointer to IPV4 UDP packet data buffer
2357  *
2358  * This func. checks whether it is a IPV4 UDP packet or not.
2359  *
2360  * Return: TRUE if it is a IPV4 UDP packet
2361  *         FALSE if not
2362  */
2363 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2364 {
2365 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2366 		uint8_t pkt_type;
2367 
2368 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2369 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2370 
2371 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2372 			return true;
2373 		else
2374 			return false;
2375 	} else
2376 		return false;
2377 }
2378 
2379 /**
2380  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2381  * @data: Pointer to IPV4 TCP packet data buffer
2382  *
2383  * This func. checks whether it is a IPV4 TCP packet or not.
2384  *
2385  * Return: TRUE if it is a IPV4 TCP packet
2386  *         FALSE if not
2387  */
2388 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2389 {
2390 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2391 		uint8_t pkt_type;
2392 
2393 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2394 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2395 
2396 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2397 			return true;
2398 		else
2399 			return false;
2400 	} else
2401 		return false;
2402 }
2403 
2404 /**
2405  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2406  * @data: Pointer to IPV6 UDP packet data buffer
2407  *
2408  * This func. checks whether it is a IPV6 UDP packet or not.
2409  *
2410  * Return: TRUE if it is a IPV6 UDP packet
2411  *         FALSE if not
2412  */
2413 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2414 {
2415 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2416 		uint8_t pkt_type;
2417 
2418 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2419 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2420 
2421 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2422 			return true;
2423 		else
2424 			return false;
2425 	} else
2426 		return false;
2427 }
2428 
2429 /**
2430  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2431  * @data: Pointer to IPV6 TCP packet data buffer
2432  *
2433  * This func. checks whether it is a IPV6 TCP packet or not.
2434  *
2435  * Return: TRUE if it is a IPV6 TCP packet
2436  *         FALSE if not
2437  */
2438 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2439 {
2440 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2441 		uint8_t pkt_type;
2442 
2443 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2444 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2445 
2446 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2447 			return true;
2448 		else
2449 			return false;
2450 	} else
2451 		return false;
2452 }
2453 
2454 /**
2455  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2456  * @nbuf - sk buff
2457  *
2458  * Return: true if packet is broadcast
2459  *	   false otherwise
2460  */
2461 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2462 {
2463 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2464 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2465 }
2466 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2467 
2468 #ifdef NBUF_MEMORY_DEBUG
2469 
2470 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2471 
2472 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2473 static struct kmem_cache *nbuf_tracking_cache;
2474 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2475 static spinlock_t qdf_net_buf_track_free_list_lock;
2476 static uint32_t qdf_net_buf_track_free_list_count;
2477 static uint32_t qdf_net_buf_track_used_list_count;
2478 static uint32_t qdf_net_buf_track_max_used;
2479 static uint32_t qdf_net_buf_track_max_free;
2480 static uint32_t qdf_net_buf_track_max_allocated;
2481 static uint32_t qdf_net_buf_track_fail_count;
2482 
2483 /**
2484  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2485  *
2486  * tracks the max number of network buffers that the wlan driver was tracking
2487  * at any one time.
2488  *
2489  * Return: none
2490  */
2491 static inline void update_max_used(void)
2492 {
2493 	int sum;
2494 
2495 	if (qdf_net_buf_track_max_used <
2496 	    qdf_net_buf_track_used_list_count)
2497 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2498 	sum = qdf_net_buf_track_free_list_count +
2499 		qdf_net_buf_track_used_list_count;
2500 	if (qdf_net_buf_track_max_allocated < sum)
2501 		qdf_net_buf_track_max_allocated = sum;
2502 }
2503 
2504 /**
2505  * update_max_free() - update qdf_net_buf_track_free_list_count
2506  *
2507  * tracks the max number tracking buffers kept in the freelist.
2508  *
2509  * Return: none
2510  */
2511 static inline void update_max_free(void)
2512 {
2513 	if (qdf_net_buf_track_max_free <
2514 	    qdf_net_buf_track_free_list_count)
2515 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2516 }
2517 
2518 /**
2519  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2520  *
2521  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2522  * This function also ads fexibility to adjust the allocation and freelist
2523  * scheems.
2524  *
2525  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2526  */
2527 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2528 {
2529 	int flags = GFP_KERNEL;
2530 	unsigned long irq_flag;
2531 	QDF_NBUF_TRACK *new_node = NULL;
2532 
2533 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2534 	qdf_net_buf_track_used_list_count++;
2535 	if (qdf_net_buf_track_free_list) {
2536 		new_node = qdf_net_buf_track_free_list;
2537 		qdf_net_buf_track_free_list =
2538 			qdf_net_buf_track_free_list->p_next;
2539 		qdf_net_buf_track_free_list_count--;
2540 	}
2541 	update_max_used();
2542 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2543 
2544 	if (new_node)
2545 		return new_node;
2546 
2547 	if (in_interrupt() || irqs_disabled() || in_atomic())
2548 		flags = GFP_ATOMIC;
2549 
2550 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2551 }
2552 
2553 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2554 #define FREEQ_POOLSIZE 2048
2555 
2556 /**
2557  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2558  *
2559  * Matches calls to qdf_nbuf_track_alloc.
2560  * Either frees the tracking cookie to kernel or an internal
2561  * freelist based on the size of the freelist.
2562  *
2563  * Return: none
2564  */
2565 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2566 {
2567 	unsigned long irq_flag;
2568 
2569 	if (!node)
2570 		return;
2571 
2572 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2573 	 * only shrink the freelist if it is bigger than twice the number of
2574 	 * nbufs in use. If the driver is stalling in a consistent bursty
2575 	 * fasion, this will keep 3/4 of thee allocations from the free list
2576 	 * while also allowing the system to recover memory as less frantic
2577 	 * traffic occurs.
2578 	 */
2579 
2580 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2581 
2582 	qdf_net_buf_track_used_list_count--;
2583 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2584 	   (qdf_net_buf_track_free_list_count >
2585 	    qdf_net_buf_track_used_list_count << 1)) {
2586 		kmem_cache_free(nbuf_tracking_cache, node);
2587 	} else {
2588 		node->p_next = qdf_net_buf_track_free_list;
2589 		qdf_net_buf_track_free_list = node;
2590 		qdf_net_buf_track_free_list_count++;
2591 	}
2592 	update_max_free();
2593 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2594 }
2595 
2596 /**
2597  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2598  *
2599  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2600  * the freelist first makes it performant for the first iperf udp burst
2601  * as well as steady state.
2602  *
2603  * Return: None
2604  */
2605 static void qdf_nbuf_track_prefill(void)
2606 {
2607 	int i;
2608 	QDF_NBUF_TRACK *node, *head;
2609 
2610 	/* prepopulate the freelist */
2611 	head = NULL;
2612 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2613 		node = qdf_nbuf_track_alloc();
2614 		if (!node)
2615 			continue;
2616 		node->p_next = head;
2617 		head = node;
2618 	}
2619 	while (head) {
2620 		node = head->p_next;
2621 		qdf_nbuf_track_free(head);
2622 		head = node;
2623 	}
2624 
2625 	/* prefilled buffers should not count as used */
2626 	qdf_net_buf_track_max_used = 0;
2627 }
2628 
2629 /**
2630  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2631  *
2632  * This initializes the memory manager for the nbuf tracking cookies.  Because
2633  * these cookies are all the same size and only used in this feature, we can
2634  * use a kmem_cache to provide tracking as well as to speed up allocations.
2635  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2636  * features) a freelist is prepopulated here.
2637  *
2638  * Return: None
2639  */
2640 static void qdf_nbuf_track_memory_manager_create(void)
2641 {
2642 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2643 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2644 						sizeof(QDF_NBUF_TRACK),
2645 						0, 0, NULL);
2646 
2647 	qdf_nbuf_track_prefill();
2648 }
2649 
2650 /**
2651  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2652  *
2653  * Empty the freelist and print out usage statistics when it is no longer
2654  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2655  * any nbuf tracking cookies were leaked.
2656  *
2657  * Return: None
2658  */
2659 static void qdf_nbuf_track_memory_manager_destroy(void)
2660 {
2661 	QDF_NBUF_TRACK *node, *tmp;
2662 	unsigned long irq_flag;
2663 
2664 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2665 	node = qdf_net_buf_track_free_list;
2666 
2667 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2668 		qdf_print("%s: unexpectedly large max_used count %d",
2669 			  __func__, qdf_net_buf_track_max_used);
2670 
2671 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2672 		qdf_print("%s: %d unused trackers were allocated",
2673 			  __func__,
2674 			  qdf_net_buf_track_max_allocated -
2675 			  qdf_net_buf_track_max_used);
2676 
2677 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2678 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2679 		qdf_print("%s: check freelist shrinking functionality",
2680 			  __func__);
2681 
2682 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2683 		  "%s: %d residual freelist size",
2684 		  __func__, qdf_net_buf_track_free_list_count);
2685 
2686 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2687 		  "%s: %d max freelist size observed",
2688 		  __func__, qdf_net_buf_track_max_free);
2689 
2690 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2691 		  "%s: %d max buffers used observed",
2692 		  __func__, qdf_net_buf_track_max_used);
2693 
2694 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2695 		  "%s: %d max buffers allocated observed",
2696 		  __func__, qdf_net_buf_track_max_allocated);
2697 
2698 	while (node) {
2699 		tmp = node;
2700 		node = node->p_next;
2701 		kmem_cache_free(nbuf_tracking_cache, tmp);
2702 		qdf_net_buf_track_free_list_count--;
2703 	}
2704 
2705 	if (qdf_net_buf_track_free_list_count != 0)
2706 		qdf_info("%d unfreed tracking memory lost in freelist",
2707 			 qdf_net_buf_track_free_list_count);
2708 
2709 	if (qdf_net_buf_track_used_list_count != 0)
2710 		qdf_info("%d unfreed tracking memory still in use",
2711 			 qdf_net_buf_track_used_list_count);
2712 
2713 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2714 	kmem_cache_destroy(nbuf_tracking_cache);
2715 	qdf_net_buf_track_free_list = NULL;
2716 }
2717 
2718 /**
2719  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2720  *
2721  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2722  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2723  * WLAN driver module whose allocated SKB is freed by network stack are
2724  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2725  * reported as memory leak.
2726  *
2727  * Return: none
2728  */
2729 void qdf_net_buf_debug_init(void)
2730 {
2731 	uint32_t i;
2732 
2733 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2734 
2735 	if (is_initial_mem_debug_disabled)
2736 		return;
2737 
2738 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2739 
2740 	qdf_nbuf_map_tracking_init();
2741 	qdf_nbuf_track_memory_manager_create();
2742 
2743 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2744 		gp_qdf_net_buf_track_tbl[i] = NULL;
2745 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2746 	}
2747 }
2748 qdf_export_symbol(qdf_net_buf_debug_init);
2749 
2750 /**
2751  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2752  *
2753  * Exit network buffer tracking debug functionality and log SKB memory leaks
2754  * As part of exiting the functionality, free the leaked memory and
2755  * cleanup the tracking buffers.
2756  *
2757  * Return: none
2758  */
2759 void qdf_net_buf_debug_exit(void)
2760 {
2761 	uint32_t i;
2762 	uint32_t count = 0;
2763 	unsigned long irq_flag;
2764 	QDF_NBUF_TRACK *p_node;
2765 	QDF_NBUF_TRACK *p_prev;
2766 
2767 	if (is_initial_mem_debug_disabled)
2768 		return;
2769 
2770 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2771 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2772 		p_node = gp_qdf_net_buf_track_tbl[i];
2773 		while (p_node) {
2774 			p_prev = p_node;
2775 			p_node = p_node->p_next;
2776 			count++;
2777 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2778 				 p_prev->func_name, p_prev->line_num,
2779 				 p_prev->size, p_prev->net_buf);
2780 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2781 				 p_prev->map_func_name,
2782 				 p_prev->map_line_num,
2783 				 p_prev->unmap_func_name,
2784 				 p_prev->unmap_line_num,
2785 				 p_prev->is_nbuf_mapped);
2786 			qdf_nbuf_track_free(p_prev);
2787 		}
2788 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2789 	}
2790 
2791 	qdf_nbuf_track_memory_manager_destroy();
2792 	qdf_nbuf_map_tracking_deinit();
2793 
2794 #ifdef CONFIG_HALT_KMEMLEAK
2795 	if (count) {
2796 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2797 		QDF_BUG(0);
2798 	}
2799 #endif
2800 }
2801 qdf_export_symbol(qdf_net_buf_debug_exit);
2802 
2803 /**
2804  * qdf_net_buf_debug_hash() - hash network buffer pointer
2805  *
2806  * Return: hash value
2807  */
2808 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2809 {
2810 	uint32_t i;
2811 
2812 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2813 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2814 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2815 
2816 	return i;
2817 }
2818 
2819 /**
2820  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2821  *
2822  * Return: If skb is found in hash table then return pointer to network buffer
2823  *	else return %NULL
2824  */
2825 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2826 {
2827 	uint32_t i;
2828 	QDF_NBUF_TRACK *p_node;
2829 
2830 	i = qdf_net_buf_debug_hash(net_buf);
2831 	p_node = gp_qdf_net_buf_track_tbl[i];
2832 
2833 	while (p_node) {
2834 		if (p_node->net_buf == net_buf)
2835 			return p_node;
2836 		p_node = p_node->p_next;
2837 	}
2838 
2839 	return NULL;
2840 }
2841 
2842 /**
2843  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2844  *
2845  * Return: none
2846  */
2847 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2848 				const char *func_name, uint32_t line_num)
2849 {
2850 	uint32_t i;
2851 	unsigned long irq_flag;
2852 	QDF_NBUF_TRACK *p_node;
2853 	QDF_NBUF_TRACK *new_node;
2854 
2855 	if (is_initial_mem_debug_disabled)
2856 		return;
2857 
2858 	new_node = qdf_nbuf_track_alloc();
2859 
2860 	i = qdf_net_buf_debug_hash(net_buf);
2861 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2862 
2863 	p_node = qdf_net_buf_debug_look_up(net_buf);
2864 
2865 	if (p_node) {
2866 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2867 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2868 			  net_buf, func_name, line_num);
2869 		qdf_nbuf_track_free(new_node);
2870 	} else {
2871 		p_node = new_node;
2872 		if (p_node) {
2873 			p_node->net_buf = net_buf;
2874 			qdf_str_lcopy(p_node->func_name, func_name,
2875 				      QDF_MEM_FUNC_NAME_SIZE);
2876 			p_node->line_num = line_num;
2877 			p_node->is_nbuf_mapped = false;
2878 			p_node->map_line_num = 0;
2879 			p_node->unmap_line_num = 0;
2880 			p_node->map_func_name[0] = '\0';
2881 			p_node->unmap_func_name[0] = '\0';
2882 			p_node->size = size;
2883 			p_node->time = qdf_get_log_timestamp();
2884 			qdf_mem_skb_inc(size);
2885 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2886 			gp_qdf_net_buf_track_tbl[i] = p_node;
2887 		} else {
2888 			qdf_net_buf_track_fail_count++;
2889 			qdf_print(
2890 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2891 				  func_name, line_num, size);
2892 		}
2893 	}
2894 
2895 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2896 }
2897 qdf_export_symbol(qdf_net_buf_debug_add_node);
2898 
2899 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2900 				   uint32_t line_num)
2901 {
2902 	uint32_t i;
2903 	unsigned long irq_flag;
2904 	QDF_NBUF_TRACK *p_node;
2905 
2906 	if (is_initial_mem_debug_disabled)
2907 		return;
2908 
2909 	i = qdf_net_buf_debug_hash(net_buf);
2910 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2911 
2912 	p_node = qdf_net_buf_debug_look_up(net_buf);
2913 
2914 	if (p_node) {
2915 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2916 			      QDF_MEM_FUNC_NAME_SIZE);
2917 		p_node->line_num = line_num;
2918 	}
2919 
2920 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2921 }
2922 
2923 qdf_export_symbol(qdf_net_buf_debug_update_node);
2924 
2925 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
2926 				       const char *func_name,
2927 				       uint32_t line_num)
2928 {
2929 	uint32_t i;
2930 	unsigned long irq_flag;
2931 	QDF_NBUF_TRACK *p_node;
2932 
2933 	if (is_initial_mem_debug_disabled)
2934 		return;
2935 
2936 	i = qdf_net_buf_debug_hash(net_buf);
2937 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2938 
2939 	p_node = qdf_net_buf_debug_look_up(net_buf);
2940 
2941 	if (p_node) {
2942 		qdf_str_lcopy(p_node->map_func_name, func_name,
2943 			      QDF_MEM_FUNC_NAME_SIZE);
2944 		p_node->map_line_num = line_num;
2945 		p_node->is_nbuf_mapped = true;
2946 	}
2947 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2948 }
2949 
2950 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
2951 					 const char *func_name,
2952 					 uint32_t line_num)
2953 {
2954 	uint32_t i;
2955 	unsigned long irq_flag;
2956 	QDF_NBUF_TRACK *p_node;
2957 
2958 	if (is_initial_mem_debug_disabled)
2959 		return;
2960 
2961 	i = qdf_net_buf_debug_hash(net_buf);
2962 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2963 
2964 	p_node = qdf_net_buf_debug_look_up(net_buf);
2965 
2966 	if (p_node) {
2967 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
2968 			      QDF_MEM_FUNC_NAME_SIZE);
2969 		p_node->unmap_line_num = line_num;
2970 		p_node->is_nbuf_mapped = false;
2971 	}
2972 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2973 }
2974 
2975 /**
2976  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2977  *
2978  * Return: none
2979  */
2980 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2981 {
2982 	uint32_t i;
2983 	QDF_NBUF_TRACK *p_head;
2984 	QDF_NBUF_TRACK *p_node = NULL;
2985 	unsigned long irq_flag;
2986 	QDF_NBUF_TRACK *p_prev;
2987 
2988 	if (is_initial_mem_debug_disabled)
2989 		return;
2990 
2991 	i = qdf_net_buf_debug_hash(net_buf);
2992 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2993 
2994 	p_head = gp_qdf_net_buf_track_tbl[i];
2995 
2996 	/* Unallocated SKB */
2997 	if (!p_head)
2998 		goto done;
2999 
3000 	p_node = p_head;
3001 	/* Found at head of the table */
3002 	if (p_head->net_buf == net_buf) {
3003 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3004 		goto done;
3005 	}
3006 
3007 	/* Search in collision list */
3008 	while (p_node) {
3009 		p_prev = p_node;
3010 		p_node = p_node->p_next;
3011 		if ((p_node) && (p_node->net_buf == net_buf)) {
3012 			p_prev->p_next = p_node->p_next;
3013 			break;
3014 		}
3015 	}
3016 
3017 done:
3018 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3019 
3020 	if (p_node) {
3021 		qdf_mem_skb_dec(p_node->size);
3022 		qdf_nbuf_track_free(p_node);
3023 	} else {
3024 		if (qdf_net_buf_track_fail_count) {
3025 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3026 				  net_buf, qdf_net_buf_track_fail_count);
3027 		} else
3028 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3029 					   net_buf);
3030 	}
3031 }
3032 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3033 
3034 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3035 				   const char *func_name, uint32_t line_num)
3036 {
3037 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3038 
3039 	if (is_initial_mem_debug_disabled)
3040 		return;
3041 
3042 	while (ext_list) {
3043 		/*
3044 		 * Take care to add if it is Jumbo packet connected using
3045 		 * frag_list
3046 		 */
3047 		qdf_nbuf_t next;
3048 
3049 		next = qdf_nbuf_queue_next(ext_list);
3050 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3051 		ext_list = next;
3052 	}
3053 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3054 }
3055 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3056 
3057 /**
3058  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
3059  * @net_buf: Network buf holding head segment (single)
3060  *
3061  * WLAN driver module whose allocated SKB is freed by network stack are
3062  * suppose to call this API before returning SKB to network stack such
3063  * that the SKB is not reported as memory leak.
3064  *
3065  * Return: none
3066  */
3067 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3068 {
3069 	qdf_nbuf_t ext_list;
3070 
3071 	if (is_initial_mem_debug_disabled)
3072 		return;
3073 
3074 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3075 	while (ext_list) {
3076 		/*
3077 		 * Take care to free if it is Jumbo packet connected using
3078 		 * frag_list
3079 		 */
3080 		qdf_nbuf_t next;
3081 
3082 		next = qdf_nbuf_queue_next(ext_list);
3083 
3084 		if (qdf_nbuf_get_users(ext_list) > 1) {
3085 			ext_list = next;
3086 			continue;
3087 		}
3088 
3089 		qdf_net_buf_debug_delete_node(ext_list);
3090 		ext_list = next;
3091 	}
3092 
3093 	if (qdf_nbuf_get_users(net_buf) > 1)
3094 		return;
3095 
3096 	qdf_net_buf_debug_delete_node(net_buf);
3097 }
3098 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3099 
3100 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3101 				int reserve, int align, int prio,
3102 				const char *func, uint32_t line)
3103 {
3104 	qdf_nbuf_t nbuf;
3105 
3106 	if (is_initial_mem_debug_disabled)
3107 		return __qdf_nbuf_alloc(osdev, size,
3108 					reserve, align,
3109 					prio, func, line);
3110 
3111 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3112 
3113 	/* Store SKB in internal QDF tracking table */
3114 	if (qdf_likely(nbuf)) {
3115 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3116 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3117 	} else {
3118 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3119 	}
3120 
3121 	return nbuf;
3122 }
3123 qdf_export_symbol(qdf_nbuf_alloc_debug);
3124 
3125 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3126 					    const char *func, uint32_t line)
3127 {
3128 	qdf_nbuf_t nbuf;
3129 
3130 	if (is_initial_mem_debug_disabled)
3131 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3132 						    line);
3133 
3134 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3135 
3136 	/* Store SKB in internal QDF tracking table */
3137 	if (qdf_likely(nbuf)) {
3138 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3139 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3140 	} else {
3141 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3142 	}
3143 
3144 	return nbuf;
3145 }
3146 
3147 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3148 
3149 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3150 {
3151 	qdf_nbuf_t ext_list;
3152 	qdf_frag_t p_frag;
3153 	uint32_t num_nr_frags;
3154 	uint32_t idx = 0;
3155 
3156 	if (qdf_unlikely(!nbuf))
3157 		return;
3158 
3159 	if (is_initial_mem_debug_disabled)
3160 		goto free_buf;
3161 
3162 	if (qdf_nbuf_get_users(nbuf) > 1)
3163 		goto free_buf;
3164 
3165 	/* Remove SKB from internal QDF tracking table */
3166 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3167 	qdf_net_buf_debug_delete_node(nbuf);
3168 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3169 
3170 	/* Take care to delete the debug entries for frags */
3171 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3172 
3173 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3174 
3175 	while (idx < num_nr_frags) {
3176 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3177 		if (qdf_likely(p_frag))
3178 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3179 		idx++;
3180 	}
3181 
3182 	/**
3183 	 * Take care to update the debug entries for frag_list and also
3184 	 * for the frags attached to frag_list
3185 	 */
3186 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3187 	while (ext_list) {
3188 		if (qdf_nbuf_get_users(ext_list) == 1) {
3189 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3190 			idx = 0;
3191 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3192 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3193 			while (idx < num_nr_frags) {
3194 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3195 				if (qdf_likely(p_frag))
3196 					qdf_frag_debug_refcount_dec(p_frag,
3197 								    func, line);
3198 				idx++;
3199 			}
3200 			qdf_net_buf_debug_delete_node(ext_list);
3201 		}
3202 
3203 		ext_list = qdf_nbuf_queue_next(ext_list);
3204 	}
3205 
3206 free_buf:
3207 	__qdf_nbuf_free(nbuf);
3208 }
3209 qdf_export_symbol(qdf_nbuf_free_debug);
3210 
3211 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3212 {
3213 	uint32_t num_nr_frags;
3214 	uint32_t idx = 0;
3215 	qdf_nbuf_t ext_list;
3216 	qdf_frag_t p_frag;
3217 
3218 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3219 
3220 	if (is_initial_mem_debug_disabled)
3221 		return cloned_buf;
3222 
3223 	if (qdf_unlikely(!cloned_buf))
3224 		return NULL;
3225 
3226 	/* Take care to update the debug entries for frags */
3227 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3228 
3229 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3230 
3231 	while (idx < num_nr_frags) {
3232 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3233 		if (qdf_likely(p_frag))
3234 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3235 		idx++;
3236 	}
3237 
3238 	/* Take care to update debug entries for frags attached to frag_list */
3239 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3240 	while (ext_list) {
3241 		idx = 0;
3242 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3243 
3244 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3245 
3246 		while (idx < num_nr_frags) {
3247 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3248 			if (qdf_likely(p_frag))
3249 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3250 			idx++;
3251 		}
3252 		ext_list = qdf_nbuf_queue_next(ext_list);
3253 	}
3254 
3255 	/* Store SKB in internal QDF tracking table */
3256 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3257 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3258 
3259 	return cloned_buf;
3260 }
3261 qdf_export_symbol(qdf_nbuf_clone_debug);
3262 
3263 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3264 {
3265 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3266 
3267 	if (is_initial_mem_debug_disabled)
3268 		return copied_buf;
3269 
3270 	if (qdf_unlikely(!copied_buf))
3271 		return NULL;
3272 
3273 	/* Store SKB in internal QDF tracking table */
3274 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3275 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3276 
3277 	return copied_buf;
3278 }
3279 qdf_export_symbol(qdf_nbuf_copy_debug);
3280 
3281 qdf_nbuf_t
3282 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3283 			   const char *func, uint32_t line)
3284 {
3285 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3286 
3287 	if (qdf_unlikely(!copied_buf))
3288 		return NULL;
3289 
3290 	if (is_initial_mem_debug_disabled)
3291 		return copied_buf;
3292 
3293 	/* Store SKB in internal QDF tracking table */
3294 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3295 	qdf_nbuf_history_add(copied_buf, func, line,
3296 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3297 
3298 	return copied_buf;
3299 }
3300 
3301 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3302 
3303 qdf_nbuf_t
3304 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3305 		       uint32_t line_num)
3306 {
3307 	qdf_nbuf_t unshared_buf;
3308 	qdf_frag_t p_frag;
3309 	uint32_t num_nr_frags;
3310 	uint32_t idx = 0;
3311 	qdf_nbuf_t ext_list, next;
3312 
3313 	if (is_initial_mem_debug_disabled)
3314 		return __qdf_nbuf_unshare(buf);
3315 
3316 	/* Not a shared buffer, nothing to do */
3317 	if (!qdf_nbuf_is_cloned(buf))
3318 		return buf;
3319 
3320 	if (qdf_nbuf_get_users(buf) > 1)
3321 		goto unshare_buf;
3322 
3323 	/* Take care to delete the debug entries for frags */
3324 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3325 
3326 	while (idx < num_nr_frags) {
3327 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3328 		if (qdf_likely(p_frag))
3329 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3330 						    line_num);
3331 		idx++;
3332 	}
3333 
3334 	qdf_net_buf_debug_delete_node(buf);
3335 
3336 	 /* Take care of jumbo packet connected using frag_list and frags */
3337 	ext_list = qdf_nbuf_get_ext_list(buf);
3338 	while (ext_list) {
3339 		idx = 0;
3340 		next = qdf_nbuf_queue_next(ext_list);
3341 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3342 
3343 		if (qdf_nbuf_get_users(ext_list) > 1) {
3344 			ext_list = next;
3345 			continue;
3346 		}
3347 
3348 		while (idx < num_nr_frags) {
3349 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3350 			if (qdf_likely(p_frag))
3351 				qdf_frag_debug_refcount_dec(p_frag, func_name,
3352 							    line_num);
3353 			idx++;
3354 		}
3355 
3356 		qdf_net_buf_debug_delete_node(ext_list);
3357 		ext_list = next;
3358 	}
3359 
3360 unshare_buf:
3361 	unshared_buf = __qdf_nbuf_unshare(buf);
3362 
3363 	if (qdf_likely(unshared_buf))
3364 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3365 					   line_num);
3366 
3367 	return unshared_buf;
3368 }
3369 
3370 qdf_export_symbol(qdf_nbuf_unshare_debug);
3371 
3372 #endif /* NBUF_MEMORY_DEBUG */
3373 
3374 #if defined(FEATURE_TSO)
3375 
3376 /**
3377  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3378  *
3379  * @ethproto: ethernet type of the msdu
3380  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3381  * @l2_len: L2 length for the msdu
3382  * @eit_hdr: pointer to EIT header
3383  * @eit_hdr_len: EIT header length for the msdu
3384  * @eit_hdr_dma_map_addr: dma addr for EIT header
3385  * @tcphdr: pointer to tcp header
3386  * @ipv4_csum_en: ipv4 checksum enable
3387  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3388  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3389  * @ip_id: IP id
3390  * @tcp_seq_num: TCP sequence number
3391  *
3392  * This structure holds the TSO common info that is common
3393  * across all the TCP segments of the jumbo packet.
3394  */
3395 struct qdf_tso_cmn_seg_info_t {
3396 	uint16_t ethproto;
3397 	uint16_t ip_tcp_hdr_len;
3398 	uint16_t l2_len;
3399 	uint8_t *eit_hdr;
3400 	uint32_t eit_hdr_len;
3401 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3402 	struct tcphdr *tcphdr;
3403 	uint16_t ipv4_csum_en;
3404 	uint16_t tcp_ipv4_csum_en;
3405 	uint16_t tcp_ipv6_csum_en;
3406 	uint16_t ip_id;
3407 	uint32_t tcp_seq_num;
3408 };
3409 
3410 /**
3411  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
3412  *
3413  * @skb: network buffer
3414  *
3415  * Return: byte offset length of 8 bytes aligned.
3416  */
3417 #ifdef FIX_TXDMA_LIMITATION
3418 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3419 {
3420 	uint32_t eit_hdr_len;
3421 	uint8_t *eit_hdr;
3422 	uint8_t byte_8_align_offset;
3423 
3424 	eit_hdr = skb->data;
3425 	eit_hdr_len = (skb_transport_header(skb)
3426 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3427 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
3428 	if (qdf_unlikely(byte_8_align_offset)) {
3429 		TSO_DEBUG("%pK,Len %d %d",
3430 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
3431 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
3432 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
3433 				  __LINE__, skb->head, skb->data,
3434 				 byte_8_align_offset);
3435 			return 0;
3436 		}
3437 		qdf_nbuf_push_head(skb, byte_8_align_offset);
3438 		qdf_mem_move(skb->data,
3439 			     skb->data + byte_8_align_offset,
3440 			     eit_hdr_len);
3441 		skb->len -= byte_8_align_offset;
3442 		skb->mac_header -= byte_8_align_offset;
3443 		skb->network_header -= byte_8_align_offset;
3444 		skb->transport_header -= byte_8_align_offset;
3445 	}
3446 	return byte_8_align_offset;
3447 }
3448 #else
3449 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3450 {
3451 	return 0;
3452 }
3453 #endif
3454 
3455 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
3456 void qdf_record_nbuf_nbytes(
3457 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
3458 {
3459 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
3460 }
3461 
3462 qdf_export_symbol(qdf_record_nbuf_nbytes);
3463 
3464 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
3465 
3466 /**
3467  * qdf_nbuf_tso_map_frag() - Map TSO segment
3468  * @osdev: qdf device handle
3469  * @tso_frag_vaddr: addr of tso fragment
3470  * @nbytes: number of bytes
3471  * @dir: direction
3472  *
3473  * Map TSO segment and for MCL record the amount of memory mapped
3474  *
3475  * Return: DMA address of mapped TSO fragment in success and
3476  * NULL in case of DMA mapping failure
3477  */
3478 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
3479 	qdf_device_t osdev, void *tso_frag_vaddr,
3480 	uint32_t nbytes, qdf_dma_dir_t dir)
3481 {
3482 	qdf_dma_addr_t tso_frag_paddr = 0;
3483 
3484 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
3485 					nbytes, __qdf_dma_dir_to_os(dir));
3486 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
3487 		qdf_err("DMA mapping error!");
3488 		qdf_assert_always(0);
3489 		return 0;
3490 	}
3491 	qdf_record_nbuf_nbytes(nbytes, dir, true);
3492 	return tso_frag_paddr;
3493 }
3494 
3495 /**
3496  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
3497  * @osdev: qdf device handle
3498  * @tso_frag_paddr: DMA addr of tso fragment
3499  * @dir: direction
3500  * @nbytes: number of bytes
3501  *
3502  * Unmap TSO segment and for MCL record the amount of memory mapped
3503  *
3504  * Return: None
3505  */
3506 static inline void qdf_nbuf_tso_unmap_frag(
3507 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
3508 	uint32_t nbytes, qdf_dma_dir_t dir)
3509 {
3510 	qdf_record_nbuf_nbytes(nbytes, dir, false);
3511 	dma_unmap_single(osdev->dev, tso_frag_paddr,
3512 			 nbytes, __qdf_dma_dir_to_os(dir));
3513 }
3514 
3515 /**
3516  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
3517  * information
3518  * @osdev: qdf device handle
3519  * @skb: skb buffer
3520  * @tso_info: Parameters common to all segements
3521  *
3522  * Get the TSO information that is common across all the TCP
3523  * segments of the jumbo packet
3524  *
3525  * Return: 0 - success 1 - failure
3526  */
3527 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3528 			struct sk_buff *skb,
3529 			struct qdf_tso_cmn_seg_info_t *tso_info)
3530 {
3531 	/* Get ethernet type and ethernet header length */
3532 	tso_info->ethproto = vlan_get_protocol(skb);
3533 
3534 	/* Determine whether this is an IPv4 or IPv6 packet */
3535 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3536 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3537 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3538 
3539 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3540 		tso_info->ipv4_csum_en = 1;
3541 		tso_info->tcp_ipv4_csum_en = 1;
3542 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3543 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3544 				ipv4_hdr->protocol);
3545 			return 1;
3546 		}
3547 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3548 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3549 		tso_info->tcp_ipv6_csum_en = 1;
3550 	} else {
3551 		qdf_err("TSO: ethertype 0x%x is not supported!",
3552 			tso_info->ethproto);
3553 		return 1;
3554 	}
3555 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3556 	tso_info->tcphdr = tcp_hdr(skb);
3557 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3558 	/* get pointer to the ethernet + IP + TCP header and their length */
3559 	tso_info->eit_hdr = skb->data;
3560 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3561 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3562 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
3563 						osdev, tso_info->eit_hdr,
3564 						tso_info->eit_hdr_len,
3565 						QDF_DMA_TO_DEVICE);
3566 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
3567 		return 1;
3568 
3569 	if (tso_info->ethproto == htons(ETH_P_IP)) {
3570 		/* inlcude IPv4 header length for IPV4 (total length) */
3571 		tso_info->ip_tcp_hdr_len =
3572 			tso_info->eit_hdr_len - tso_info->l2_len;
3573 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
3574 		/* exclude IPv6 header length for IPv6 (payload length) */
3575 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3576 	}
3577 	/*
3578 	 * The length of the payload (application layer data) is added to
3579 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3580 	 * descriptor.
3581 	 */
3582 
3583 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3584 		tso_info->tcp_seq_num,
3585 		tso_info->eit_hdr_len,
3586 		tso_info->l2_len,
3587 		skb->len);
3588 	return 0;
3589 }
3590 
3591 
3592 /**
3593  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3594  *
3595  * @curr_seg: Segment whose contents are initialized
3596  * @tso_cmn_info: Parameters common to all segements
3597  *
3598  * Return: None
3599  */
3600 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3601 				struct qdf_tso_seg_elem_t *curr_seg,
3602 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3603 {
3604 	/* Initialize the flags to 0 */
3605 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3606 
3607 	/*
3608 	 * The following fields remain the same across all segments of
3609 	 * a jumbo packet
3610 	 */
3611 	curr_seg->seg.tso_flags.tso_enable = 1;
3612 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3613 		tso_cmn_info->ipv4_csum_en;
3614 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3615 		tso_cmn_info->tcp_ipv6_csum_en;
3616 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3617 		tso_cmn_info->tcp_ipv4_csum_en;
3618 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3619 
3620 	/* The following fields change for the segments */
3621 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3622 	tso_cmn_info->ip_id++;
3623 
3624 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3625 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3626 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3627 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3628 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3629 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3630 
3631 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3632 
3633 	/*
3634 	 * First fragment for each segment always contains the ethernet,
3635 	 * IP and TCP header
3636 	 */
3637 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3638 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3639 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3640 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3641 
3642 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3643 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3644 		   tso_cmn_info->eit_hdr_len,
3645 		   curr_seg->seg.tso_flags.tcp_seq_num,
3646 		   curr_seg->seg.total_len);
3647 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3648 }
3649 
3650 /**
3651  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3652  * into segments
3653  * @nbuf: network buffer to be segmented
3654  * @tso_info: This is the output. The information about the
3655  *           TSO segments will be populated within this.
3656  *
3657  * This function fragments a TCP jumbo packet into smaller
3658  * segments to be transmitted by the driver. It chains the TSO
3659  * segments created into a list.
3660  *
3661  * Return: number of TSO segments
3662  */
3663 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3664 		struct qdf_tso_info_t *tso_info)
3665 {
3666 	/* common across all segments */
3667 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3668 	/* segment specific */
3669 	void *tso_frag_vaddr;
3670 	qdf_dma_addr_t tso_frag_paddr = 0;
3671 	uint32_t num_seg = 0;
3672 	struct qdf_tso_seg_elem_t *curr_seg;
3673 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3674 	skb_frag_t *frag = NULL;
3675 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3676 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3677 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3678 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3679 	int j = 0; /* skb fragment index */
3680 	uint8_t byte_8_align_offset;
3681 
3682 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3683 	total_num_seg = tso_info->tso_num_seg_list;
3684 	curr_seg = tso_info->tso_seg_list;
3685 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3686 
3687 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3688 
3689 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3690 						skb, &tso_cmn_info))) {
3691 		qdf_warn("TSO: error getting common segment info");
3692 		return 0;
3693 	}
3694 
3695 	/* length of the first chunk of data in the skb */
3696 	skb_frag_len = skb_headlen(skb);
3697 
3698 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3699 	/* update the remaining skb fragment length and TSO segment length */
3700 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3701 	skb_proc -= tso_cmn_info.eit_hdr_len;
3702 
3703 	/* get the address to the next tso fragment */
3704 	tso_frag_vaddr = skb->data +
3705 			 tso_cmn_info.eit_hdr_len +
3706 			 byte_8_align_offset;
3707 	/* get the length of the next tso fragment */
3708 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3709 
3710 	if (tso_frag_len != 0) {
3711 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
3712 					osdev, tso_frag_vaddr, tso_frag_len,
3713 					QDF_DMA_TO_DEVICE);
3714 		if (qdf_unlikely(!tso_frag_paddr))
3715 			return 0;
3716 	}
3717 
3718 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3719 		__LINE__, skb_frag_len, tso_frag_len);
3720 	num_seg = tso_info->num_segs;
3721 	tso_info->num_segs = 0;
3722 	tso_info->is_tso = 1;
3723 
3724 	while (num_seg && curr_seg) {
3725 		int i = 1; /* tso fragment index */
3726 		uint8_t more_tso_frags = 1;
3727 
3728 		curr_seg->seg.num_frags = 0;
3729 		tso_info->num_segs++;
3730 		total_num_seg->num_seg.tso_cmn_num_seg++;
3731 
3732 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3733 						 &tso_cmn_info);
3734 
3735 		/* If TCP PSH flag is set, set it in the last or only segment */
3736 		if (num_seg == 1)
3737 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3738 
3739 		if (unlikely(skb_proc == 0))
3740 			return tso_info->num_segs;
3741 
3742 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3743 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3744 		/* frag len is added to ip_len in while loop below*/
3745 
3746 		curr_seg->seg.num_frags++;
3747 
3748 		while (more_tso_frags) {
3749 			if (tso_frag_len != 0) {
3750 				curr_seg->seg.tso_frags[i].vaddr =
3751 					tso_frag_vaddr;
3752 				curr_seg->seg.tso_frags[i].length =
3753 					tso_frag_len;
3754 				curr_seg->seg.total_len += tso_frag_len;
3755 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3756 				curr_seg->seg.num_frags++;
3757 				skb_proc = skb_proc - tso_frag_len;
3758 
3759 				/* increment the TCP sequence number */
3760 
3761 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3762 				curr_seg->seg.tso_frags[i].paddr =
3763 					tso_frag_paddr;
3764 
3765 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
3766 			}
3767 
3768 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3769 					__func__, __LINE__,
3770 					i,
3771 					tso_frag_len,
3772 					curr_seg->seg.total_len,
3773 					curr_seg->seg.tso_frags[i].vaddr);
3774 
3775 			/* if there is no more data left in the skb */
3776 			if (!skb_proc)
3777 				return tso_info->num_segs;
3778 
3779 			/* get the next payload fragment information */
3780 			/* check if there are more fragments in this segment */
3781 			if (tso_frag_len < tso_seg_size) {
3782 				more_tso_frags = 1;
3783 				if (tso_frag_len != 0) {
3784 					tso_seg_size = tso_seg_size -
3785 						tso_frag_len;
3786 					i++;
3787 					if (curr_seg->seg.num_frags ==
3788 								FRAG_NUM_MAX) {
3789 						more_tso_frags = 0;
3790 						/*
3791 						 * reset i and the tso
3792 						 * payload size
3793 						 */
3794 						i = 1;
3795 						tso_seg_size =
3796 							skb_shinfo(skb)->
3797 								gso_size;
3798 					}
3799 				}
3800 			} else {
3801 				more_tso_frags = 0;
3802 				/* reset i and the tso payload size */
3803 				i = 1;
3804 				tso_seg_size = skb_shinfo(skb)->gso_size;
3805 			}
3806 
3807 			/* if the next fragment is contiguous */
3808 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3809 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3810 				skb_frag_len = skb_frag_len - tso_frag_len;
3811 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3812 
3813 			} else { /* the next fragment is not contiguous */
3814 				if (skb_shinfo(skb)->nr_frags == 0) {
3815 					qdf_info("TSO: nr_frags == 0!");
3816 					qdf_assert(0);
3817 					return 0;
3818 				}
3819 				if (j >= skb_shinfo(skb)->nr_frags) {
3820 					qdf_info("TSO: nr_frags %d j %d",
3821 						 skb_shinfo(skb)->nr_frags, j);
3822 					qdf_assert(0);
3823 					return 0;
3824 				}
3825 				frag = &skb_shinfo(skb)->frags[j];
3826 				skb_frag_len = skb_frag_size(frag);
3827 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3828 				tso_frag_vaddr = skb_frag_address_safe(frag);
3829 				j++;
3830 			}
3831 
3832 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3833 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3834 				tso_seg_size);
3835 
3836 			if (!(tso_frag_vaddr)) {
3837 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3838 						__func__);
3839 				return 0;
3840 			}
3841 
3842 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
3843 						osdev, tso_frag_vaddr,
3844 						tso_frag_len,
3845 						QDF_DMA_TO_DEVICE);
3846 			if (qdf_unlikely(!tso_frag_paddr))
3847 				return 0;
3848 		}
3849 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3850 				curr_seg->seg.tso_flags.tcp_seq_num);
3851 		num_seg--;
3852 		/* if TCP FIN flag was set, set it in the last segment */
3853 		if (!num_seg)
3854 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3855 
3856 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3857 		curr_seg = curr_seg->next;
3858 	}
3859 	return tso_info->num_segs;
3860 }
3861 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3862 
3863 /**
3864  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3865  *
3866  * @osdev: qdf device handle
3867  * @tso_seg: TSO segment element to be unmapped
3868  * @is_last_seg: whether this is last tso seg or not
3869  *
3870  * Return: none
3871  */
3872 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3873 			  struct qdf_tso_seg_elem_t *tso_seg,
3874 			  bool is_last_seg)
3875 {
3876 	uint32_t num_frags = 0;
3877 
3878 	if (tso_seg->seg.num_frags > 0)
3879 		num_frags = tso_seg->seg.num_frags - 1;
3880 
3881 	/*Num of frags in a tso seg cannot be less than 2 */
3882 	if (num_frags < 1) {
3883 		/*
3884 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3885 		 * this may happen when qdf_nbuf_get_tso_info failed,
3886 		 * do dma unmap for the 0th frag in this seg.
3887 		 */
3888 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3889 			goto last_seg_free_first_frag;
3890 
3891 		qdf_assert(0);
3892 		qdf_err("ERROR: num of frags in a tso segment is %d",
3893 			(num_frags + 1));
3894 		return;
3895 	}
3896 
3897 	while (num_frags) {
3898 		/*Do dma unmap the tso seg except the 0th frag */
3899 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3900 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3901 				num_frags);
3902 			qdf_assert(0);
3903 			return;
3904 		}
3905 		qdf_nbuf_tso_unmap_frag(
3906 			osdev,
3907 			tso_seg->seg.tso_frags[num_frags].paddr,
3908 			tso_seg->seg.tso_frags[num_frags].length,
3909 			QDF_DMA_TO_DEVICE);
3910 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3911 		num_frags--;
3912 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3913 	}
3914 
3915 last_seg_free_first_frag:
3916 	if (is_last_seg) {
3917 		/*Do dma unmap for the tso seg 0th frag */
3918 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3919 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3920 			qdf_assert(0);
3921 			return;
3922 		}
3923 		qdf_nbuf_tso_unmap_frag(osdev,
3924 					tso_seg->seg.tso_frags[0].paddr,
3925 					tso_seg->seg.tso_frags[0].length,
3926 					QDF_DMA_TO_DEVICE);
3927 		tso_seg->seg.tso_frags[0].paddr = 0;
3928 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3929 	}
3930 }
3931 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3932 
3933 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3934 {
3935 	size_t packet_len;
3936 
3937 	packet_len = skb->len -
3938 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3939 		 tcp_hdrlen(skb));
3940 
3941 	return packet_len;
3942 }
3943 
3944 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3945 
3946 /**
3947  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3948  * into segments
3949  * @nbuf:   network buffer to be segmented
3950  * @tso_info:  This is the output. The information about the
3951  *      TSO segments will be populated within this.
3952  *
3953  * This function fragments a TCP jumbo packet into smaller
3954  * segments to be transmitted by the driver. It chains the TSO
3955  * segments created into a list.
3956  *
3957  * Return: 0 - success, 1 - failure
3958  */
3959 #ifndef BUILD_X86
3960 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3961 {
3962 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3963 	uint32_t remainder, num_segs = 0;
3964 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3965 	uint8_t frags_per_tso = 0;
3966 	uint32_t skb_frag_len = 0;
3967 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3968 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3969 	skb_frag_t *frag = NULL;
3970 	int j = 0;
3971 	uint32_t temp_num_seg = 0;
3972 
3973 	/* length of the first chunk of data in the skb minus eit header*/
3974 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3975 
3976 	/* Calculate num of segs for skb's first chunk of data*/
3977 	remainder = skb_frag_len % tso_seg_size;
3978 	num_segs = skb_frag_len / tso_seg_size;
3979 	/**
3980 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3981 	 * In that case, one more tso seg is required to accommodate
3982 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3983 	 * then remaining data will be accomodated while doing the calculation
3984 	 * for nr_frags data. Hence, frags_per_tso++.
3985 	 */
3986 	if (remainder) {
3987 		if (!skb_nr_frags)
3988 			num_segs++;
3989 		else
3990 			frags_per_tso++;
3991 	}
3992 
3993 	while (skb_nr_frags) {
3994 		if (j >= skb_shinfo(skb)->nr_frags) {
3995 			qdf_info("TSO: nr_frags %d j %d",
3996 				 skb_shinfo(skb)->nr_frags, j);
3997 			qdf_assert(0);
3998 			return 0;
3999 		}
4000 		/**
4001 		 * Calculate the number of tso seg for nr_frags data:
4002 		 * Get the length of each frag in skb_frag_len, add to
4003 		 * remainder.Get the number of segments by dividing it to
4004 		 * tso_seg_size and calculate the new remainder.
4005 		 * Decrement the nr_frags value and keep
4006 		 * looping all the skb_fragments.
4007 		 */
4008 		frag = &skb_shinfo(skb)->frags[j];
4009 		skb_frag_len = skb_frag_size(frag);
4010 		temp_num_seg = num_segs;
4011 		remainder += skb_frag_len;
4012 		num_segs += remainder / tso_seg_size;
4013 		remainder = remainder % tso_seg_size;
4014 		skb_nr_frags--;
4015 		if (remainder) {
4016 			if (num_segs > temp_num_seg)
4017 				frags_per_tso = 0;
4018 			/**
4019 			 * increment the tso per frags whenever remainder is
4020 			 * positive. If frags_per_tso reaches the (max-1),
4021 			 * [First frags always have EIT header, therefore max-1]
4022 			 * increment the num_segs as no more data can be
4023 			 * accomodated in the curr tso seg. Reset the remainder
4024 			 * and frags per tso and keep looping.
4025 			 */
4026 			frags_per_tso++;
4027 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4028 				num_segs++;
4029 				frags_per_tso = 0;
4030 				remainder = 0;
4031 			}
4032 			/**
4033 			 * If this is the last skb frag and still remainder is
4034 			 * non-zero(frags_per_tso is not reached to the max-1)
4035 			 * then increment the num_segs to take care of the
4036 			 * remaining length.
4037 			 */
4038 			if (!skb_nr_frags && remainder) {
4039 				num_segs++;
4040 				frags_per_tso = 0;
4041 			}
4042 		} else {
4043 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4044 			frags_per_tso = 0;
4045 		}
4046 		j++;
4047 	}
4048 
4049 	return num_segs;
4050 }
4051 #elif !defined(QCA_WIFI_QCN9000)
4052 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4053 {
4054 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4055 	skb_frag_t *frag = NULL;
4056 
4057 	/*
4058 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4059 	 * region which cannot be accessed by Target
4060 	 */
4061 	if (virt_to_phys(skb->data) < 0x50000040) {
4062 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4063 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4064 				virt_to_phys(skb->data));
4065 		goto fail;
4066 
4067 	}
4068 
4069 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4070 		frag = &skb_shinfo(skb)->frags[i];
4071 
4072 		if (!frag)
4073 			goto fail;
4074 
4075 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4076 			goto fail;
4077 	}
4078 
4079 
4080 	gso_size = skb_shinfo(skb)->gso_size;
4081 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4082 			+ tcp_hdrlen(skb));
4083 	while (tmp_len) {
4084 		num_segs++;
4085 		if (tmp_len > gso_size)
4086 			tmp_len -= gso_size;
4087 		else
4088 			break;
4089 	}
4090 
4091 	return num_segs;
4092 
4093 	/*
4094 	 * Do not free this frame, just do socket level accounting
4095 	 * so that this is not reused.
4096 	 */
4097 fail:
4098 	if (skb->sk)
4099 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4100 
4101 	return 0;
4102 }
4103 #else
4104 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4105 {
4106 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4107 	skb_frag_t *frag = NULL;
4108 
4109 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4110 		frag = &skb_shinfo(skb)->frags[i];
4111 
4112 		if (!frag)
4113 			goto fail;
4114 	}
4115 
4116 	gso_size = skb_shinfo(skb)->gso_size;
4117 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4118 			+ tcp_hdrlen(skb));
4119 	while (tmp_len) {
4120 		num_segs++;
4121 		if (tmp_len > gso_size)
4122 			tmp_len -= gso_size;
4123 		else
4124 			break;
4125 	}
4126 
4127 	return num_segs;
4128 
4129 	/*
4130 	 * Do not free this frame, just do socket level accounting
4131 	 * so that this is not reused.
4132 	 */
4133 fail:
4134 	if (skb->sk)
4135 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4136 
4137 	return 0;
4138 }
4139 #endif
4140 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4141 
4142 #endif /* FEATURE_TSO */
4143 
4144 /**
4145  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
4146  *
4147  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
4148  *
4149  * Return: N/A
4150  */
4151 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4152 			  uint32_t *lo, uint32_t *hi)
4153 {
4154 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4155 		*lo = lower_32_bits(dmaaddr);
4156 		*hi = upper_32_bits(dmaaddr);
4157 	} else {
4158 		*lo = dmaaddr;
4159 		*hi = 0;
4160 	}
4161 }
4162 
4163 qdf_export_symbol(__qdf_dmaaddr_to_32s);
4164 
4165 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4166 {
4167 	qdf_nbuf_users_inc(&skb->users);
4168 	return skb;
4169 }
4170 qdf_export_symbol(__qdf_nbuf_inc_users);
4171 
4172 int __qdf_nbuf_get_users(struct sk_buff *skb)
4173 {
4174 	return qdf_nbuf_users_read(&skb->users);
4175 }
4176 qdf_export_symbol(__qdf_nbuf_get_users);
4177 
4178 /**
4179  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
4180  * @skb: sk_buff handle
4181  *
4182  * Return: none
4183  */
4184 
4185 void __qdf_nbuf_ref(struct sk_buff *skb)
4186 {
4187 	skb_get(skb);
4188 }
4189 qdf_export_symbol(__qdf_nbuf_ref);
4190 
4191 /**
4192  * __qdf_nbuf_shared() - Check whether the buffer is shared
4193  *  @skb: sk_buff buffer
4194  *
4195  *  Return: true if more than one person has a reference to this buffer.
4196  */
4197 int __qdf_nbuf_shared(struct sk_buff *skb)
4198 {
4199 	return skb_shared(skb);
4200 }
4201 qdf_export_symbol(__qdf_nbuf_shared);
4202 
4203 /**
4204  * __qdf_nbuf_dmamap_create() - create a DMA map.
4205  * @osdev: qdf device handle
4206  * @dmap: dma map handle
4207  *
4208  * This can later be used to map networking buffers. They :
4209  * - need space in adf_drv's software descriptor
4210  * - are typically created during adf_drv_create
4211  * - need to be created before any API(qdf_nbuf_map) that uses them
4212  *
4213  * Return: QDF STATUS
4214  */
4215 QDF_STATUS
4216 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4217 {
4218 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4219 	/*
4220 	 * driver can tell its SG capablity, it must be handled.
4221 	 * Bounce buffers if they are there
4222 	 */
4223 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4224 	if (!(*dmap))
4225 		error = QDF_STATUS_E_NOMEM;
4226 
4227 	return error;
4228 }
4229 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4230 /**
4231  * __qdf_nbuf_dmamap_destroy() - delete a dma map
4232  * @osdev: qdf device handle
4233  * @dmap: dma map handle
4234  *
4235  * Return: none
4236  */
4237 void
4238 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4239 {
4240 	kfree(dmap);
4241 }
4242 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4243 
4244 /**
4245  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
4246  * @osdev: os device
4247  * @skb: skb handle
4248  * @dir: dma direction
4249  * @nbytes: number of bytes to be mapped
4250  *
4251  * Return: QDF_STATUS
4252  */
4253 #ifdef QDF_OS_DEBUG
4254 QDF_STATUS
4255 __qdf_nbuf_map_nbytes(
4256 	qdf_device_t osdev,
4257 	struct sk_buff *skb,
4258 	qdf_dma_dir_t dir,
4259 	int nbytes)
4260 {
4261 	struct skb_shared_info  *sh = skb_shinfo(skb);
4262 
4263 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4264 
4265 	/*
4266 	 * Assume there's only a single fragment.
4267 	 * To support multiple fragments, it would be necessary to change
4268 	 * adf_nbuf_t to be a separate object that stores meta-info
4269 	 * (including the bus address for each fragment) and a pointer
4270 	 * to the underlying sk_buff.
4271 	 */
4272 	qdf_assert(sh->nr_frags == 0);
4273 
4274 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4275 }
4276 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4277 #else
4278 QDF_STATUS
4279 __qdf_nbuf_map_nbytes(
4280 	qdf_device_t osdev,
4281 	struct sk_buff *skb,
4282 	qdf_dma_dir_t dir,
4283 	int nbytes)
4284 {
4285 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4286 }
4287 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4288 #endif
4289 /**
4290  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
4291  * @osdev: OS device
4292  * @skb: skb handle
4293  * @dir: direction
4294  * @nbytes: number of bytes
4295  *
4296  * Return: none
4297  */
4298 void
4299 __qdf_nbuf_unmap_nbytes(
4300 	qdf_device_t osdev,
4301 	struct sk_buff *skb,
4302 	qdf_dma_dir_t dir,
4303 	int nbytes)
4304 {
4305 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4306 
4307 	/*
4308 	 * Assume there's a single fragment.
4309 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4310 	 */
4311 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4312 }
4313 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4314 
4315 /**
4316  * __qdf_nbuf_dma_map_info() - return the dma map info
4317  * @bmap: dma map
4318  * @sg: dma map info
4319  *
4320  * Return: none
4321  */
4322 void
4323 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4324 {
4325 	qdf_assert(bmap->mapped);
4326 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4327 
4328 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4329 			sizeof(struct __qdf_segment));
4330 	sg->nsegs = bmap->nsegs;
4331 }
4332 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4333 /**
4334  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
4335  *			specified by the index
4336  * @skb: sk buff
4337  * @sg: scatter/gather list of all the frags
4338  *
4339  * Return: none
4340  */
4341 #if defined(__QDF_SUPPORT_FRAG_MEM)
4342 void
4343 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4344 {
4345 	qdf_assert(skb);
4346 	sg->sg_segs[0].vaddr = skb->data;
4347 	sg->sg_segs[0].len   = skb->len;
4348 	sg->nsegs            = 1;
4349 
4350 	for (int i = 1; i <= sh->nr_frags; i++) {
4351 		skb_frag_t    *f        = &sh->frags[i - 1];
4352 
4353 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4354 			f->page_offset);
4355 		sg->sg_segs[i].len      = f->size;
4356 
4357 		qdf_assert(i < QDF_MAX_SGLIST);
4358 	}
4359 	sg->nsegs += i;
4360 
4361 }
4362 qdf_export_symbol(__qdf_nbuf_frag_info);
4363 #else
4364 #ifdef QDF_OS_DEBUG
4365 void
4366 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4367 {
4368 
4369 	struct skb_shared_info  *sh = skb_shinfo(skb);
4370 
4371 	qdf_assert(skb);
4372 	sg->sg_segs[0].vaddr = skb->data;
4373 	sg->sg_segs[0].len   = skb->len;
4374 	sg->nsegs            = 1;
4375 
4376 	qdf_assert(sh->nr_frags == 0);
4377 }
4378 qdf_export_symbol(__qdf_nbuf_frag_info);
4379 #else
4380 void
4381 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4382 {
4383 	sg->sg_segs[0].vaddr = skb->data;
4384 	sg->sg_segs[0].len   = skb->len;
4385 	sg->nsegs            = 1;
4386 }
4387 qdf_export_symbol(__qdf_nbuf_frag_info);
4388 #endif
4389 #endif
4390 /**
4391  * __qdf_nbuf_get_frag_size() - get frag size
4392  * @nbuf: sk buffer
4393  * @cur_frag: current frag
4394  *
4395  * Return: frag size
4396  */
4397 uint32_t
4398 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4399 {
4400 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4401 	const skb_frag_t *frag = sh->frags + cur_frag;
4402 
4403 	return skb_frag_size(frag);
4404 }
4405 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4406 
4407 /**
4408  * __qdf_nbuf_frag_map() - dma map frag
4409  * @osdev: os device
4410  * @nbuf: sk buff
4411  * @offset: offset
4412  * @dir: direction
4413  * @cur_frag: current fragment
4414  *
4415  * Return: QDF status
4416  */
4417 #ifdef A_SIMOS_DEVHOST
4418 QDF_STATUS __qdf_nbuf_frag_map(
4419 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4420 	int offset, qdf_dma_dir_t dir, int cur_frag)
4421 {
4422 	int32_t paddr, frag_len;
4423 
4424 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4425 	return QDF_STATUS_SUCCESS;
4426 }
4427 qdf_export_symbol(__qdf_nbuf_frag_map);
4428 #else
4429 QDF_STATUS __qdf_nbuf_frag_map(
4430 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4431 	int offset, qdf_dma_dir_t dir, int cur_frag)
4432 {
4433 	dma_addr_t paddr, frag_len;
4434 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4435 	const skb_frag_t *frag = sh->frags + cur_frag;
4436 
4437 	frag_len = skb_frag_size(frag);
4438 
4439 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4440 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4441 					__qdf_dma_dir_to_os(dir));
4442 	return dma_mapping_error(osdev->dev, paddr) ?
4443 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4444 }
4445 qdf_export_symbol(__qdf_nbuf_frag_map);
4446 #endif
4447 /**
4448  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
4449  * @dmap: dma map
4450  * @cb: callback
4451  * @arg: argument
4452  *
4453  * Return: none
4454  */
4455 void
4456 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4457 {
4458 	return;
4459 }
4460 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4461 
4462 
4463 /**
4464  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4465  * @osdev: os device
4466  * @buf: sk buff
4467  * @dir: direction
4468  *
4469  * Return: none
4470  */
4471 #if defined(A_SIMOS_DEVHOST)
4472 static void __qdf_nbuf_sync_single_for_cpu(
4473 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4474 {
4475 	return;
4476 }
4477 #else
4478 static void __qdf_nbuf_sync_single_for_cpu(
4479 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4480 {
4481 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4482 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4483 		return;
4484 	}
4485 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4486 		skb_end_offset(buf) - skb_headroom(buf),
4487 		__qdf_dma_dir_to_os(dir));
4488 }
4489 #endif
4490 /**
4491  * __qdf_nbuf_sync_for_cpu() - nbuf sync
4492  * @osdev: os device
4493  * @skb: sk buff
4494  * @dir: direction
4495  *
4496  * Return: none
4497  */
4498 void
4499 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4500 	struct sk_buff *skb, qdf_dma_dir_t dir)
4501 {
4502 	qdf_assert(
4503 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4504 
4505 	/*
4506 	 * Assume there's a single fragment.
4507 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4508 	 */
4509 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4510 }
4511 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4512 
4513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4514 /**
4515  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4516  * @rx_status: Pointer to rx_status.
4517  * @rtap_buf: Buf to which VHT info has to be updated.
4518  * @rtap_len: Current length of radiotap buffer
4519  *
4520  * Return: Length of radiotap after VHT flags updated.
4521  */
4522 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4523 					struct mon_rx_status *rx_status,
4524 					int8_t *rtap_buf,
4525 					uint32_t rtap_len)
4526 {
4527 	uint16_t vht_flags = 0;
4528 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4529 
4530 	rtap_len = qdf_align(rtap_len, 2);
4531 
4532 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4533 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4534 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4535 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4536 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4537 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4538 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4539 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4540 	rtap_len += 2;
4541 
4542 	rtap_buf[rtap_len] |=
4543 		(rx_status->is_stbc ?
4544 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4545 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4546 		(rx_status->ldpc ?
4547 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4548 		(rx_status->beamformed ?
4549 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4550 	rtap_len += 1;
4551 
4552 	if (!rx_user_status) {
4553 		switch (rx_status->vht_flag_values2) {
4554 		case IEEE80211_RADIOTAP_VHT_BW_20:
4555 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4556 			break;
4557 		case IEEE80211_RADIOTAP_VHT_BW_40:
4558 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4559 			break;
4560 		case IEEE80211_RADIOTAP_VHT_BW_80:
4561 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4562 			break;
4563 		case IEEE80211_RADIOTAP_VHT_BW_160:
4564 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4565 			break;
4566 		}
4567 		rtap_len += 1;
4568 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
4569 		rtap_len += 1;
4570 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
4571 		rtap_len += 1;
4572 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4573 		rtap_len += 1;
4574 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4575 		rtap_len += 1;
4576 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4577 		rtap_len += 1;
4578 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4579 		rtap_len += 1;
4580 		put_unaligned_le16(rx_status->vht_flag_values6,
4581 				   &rtap_buf[rtap_len]);
4582 		rtap_len += 2;
4583 	} else {
4584 		switch (rx_user_status->vht_flag_values2) {
4585 		case IEEE80211_RADIOTAP_VHT_BW_20:
4586 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4587 			break;
4588 		case IEEE80211_RADIOTAP_VHT_BW_40:
4589 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4590 			break;
4591 		case IEEE80211_RADIOTAP_VHT_BW_80:
4592 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4593 			break;
4594 		case IEEE80211_RADIOTAP_VHT_BW_160:
4595 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4596 			break;
4597 		}
4598 		rtap_len += 1;
4599 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
4600 		rtap_len += 1;
4601 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
4602 		rtap_len += 1;
4603 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
4604 		rtap_len += 1;
4605 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
4606 		rtap_len += 1;
4607 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
4608 		rtap_len += 1;
4609 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
4610 		rtap_len += 1;
4611 		put_unaligned_le16(rx_user_status->vht_flag_values6,
4612 				   &rtap_buf[rtap_len]);
4613 		rtap_len += 2;
4614 	}
4615 
4616 	return rtap_len;
4617 }
4618 
4619 /**
4620  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4621  * @rx_status: Pointer to rx_status.
4622  * @rtap_buf: buffer to which radiotap has to be updated
4623  * @rtap_len: radiotap length
4624  *
4625  * API update high-efficiency (11ax) fields in the radiotap header
4626  *
4627  * Return: length of rtap_len updated.
4628  */
4629 static unsigned int
4630 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4631 				     int8_t *rtap_buf, uint32_t rtap_len)
4632 {
4633 	/*
4634 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4635 	 * Enable all "known" HE radiotap flags for now
4636 	 */
4637 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4638 
4639 	rtap_len = qdf_align(rtap_len, 2);
4640 
4641 	if (!rx_user_status) {
4642 		put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4643 		rtap_len += 2;
4644 
4645 		put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4646 		rtap_len += 2;
4647 
4648 		put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4649 		rtap_len += 2;
4650 
4651 		put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4652 		rtap_len += 2;
4653 
4654 		put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4655 		rtap_len += 2;
4656 
4657 		put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4658 		rtap_len += 2;
4659 		qdf_rl_debug("he data %x %x %x %x %x %x",
4660 			     rx_status->he_data1,
4661 			     rx_status->he_data2, rx_status->he_data3,
4662 			     rx_status->he_data4, rx_status->he_data5,
4663 			     rx_status->he_data6);
4664 	} else {
4665 		put_unaligned_le16(rx_user_status->he_data1,
4666 				   &rtap_buf[rtap_len]);
4667 		rtap_len += 2;
4668 
4669 		put_unaligned_le16(rx_user_status->he_data2,
4670 				   &rtap_buf[rtap_len]);
4671 		rtap_len += 2;
4672 
4673 		put_unaligned_le16(rx_user_status->he_data3,
4674 				   &rtap_buf[rtap_len]);
4675 		rtap_len += 2;
4676 
4677 		put_unaligned_le16(rx_user_status->he_data4,
4678 				   &rtap_buf[rtap_len]);
4679 		rtap_len += 2;
4680 
4681 		put_unaligned_le16(rx_user_status->he_data5,
4682 				   &rtap_buf[rtap_len]);
4683 		rtap_len += 2;
4684 
4685 		put_unaligned_le16(rx_user_status->he_data6,
4686 				   &rtap_buf[rtap_len]);
4687 		rtap_len += 2;
4688 		qdf_rl_debug("he data %x %x %x %x %x %x",
4689 			     rx_user_status->he_data1,
4690 			     rx_user_status->he_data2, rx_user_status->he_data3,
4691 			     rx_user_status->he_data4, rx_user_status->he_data5,
4692 			     rx_user_status->he_data6);
4693 	}
4694 
4695 	return rtap_len;
4696 }
4697 
4698 
4699 /**
4700  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4701  * @rx_status: Pointer to rx_status.
4702  * @rtap_buf: buffer to which radiotap has to be updated
4703  * @rtap_len: radiotap length
4704  *
4705  * API update HE-MU fields in the radiotap header
4706  *
4707  * Return: length of rtap_len updated.
4708  */
4709 static unsigned int
4710 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4711 				     int8_t *rtap_buf, uint32_t rtap_len)
4712 {
4713 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4714 
4715 	rtap_len = qdf_align(rtap_len, 2);
4716 
4717 	/*
4718 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4719 	 * Enable all "known" he-mu radiotap flags for now
4720 	 */
4721 
4722 	if (!rx_user_status) {
4723 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4724 		rtap_len += 2;
4725 
4726 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4727 		rtap_len += 2;
4728 
4729 		rtap_buf[rtap_len] = rx_status->he_RU[0];
4730 		rtap_len += 1;
4731 
4732 		rtap_buf[rtap_len] = rx_status->he_RU[1];
4733 		rtap_len += 1;
4734 
4735 		rtap_buf[rtap_len] = rx_status->he_RU[2];
4736 		rtap_len += 1;
4737 
4738 		rtap_buf[rtap_len] = rx_status->he_RU[3];
4739 		rtap_len += 1;
4740 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4741 			  rx_status->he_flags1,
4742 			  rx_status->he_flags2, rx_status->he_RU[0],
4743 			  rx_status->he_RU[1], rx_status->he_RU[2],
4744 			  rx_status->he_RU[3]);
4745 	} else {
4746 		put_unaligned_le16(rx_user_status->he_flags1,
4747 				   &rtap_buf[rtap_len]);
4748 		rtap_len += 2;
4749 
4750 		put_unaligned_le16(rx_user_status->he_flags2,
4751 				   &rtap_buf[rtap_len]);
4752 		rtap_len += 2;
4753 
4754 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
4755 		rtap_len += 1;
4756 
4757 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
4758 		rtap_len += 1;
4759 
4760 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
4761 		rtap_len += 1;
4762 
4763 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
4764 		rtap_len += 1;
4765 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4766 			  rx_user_status->he_flags1,
4767 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
4768 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
4769 			  rx_user_status->he_RU[3]);
4770 	}
4771 
4772 	return rtap_len;
4773 }
4774 
4775 /**
4776  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4777  * @rx_status: Pointer to rx_status.
4778  * @rtap_buf: buffer to which radiotap has to be updated
4779  * @rtap_len: radiotap length
4780  *
4781  * API update he-mu-other fields in the radiotap header
4782  *
4783  * Return: length of rtap_len updated.
4784  */
4785 static unsigned int
4786 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4787 				     int8_t *rtap_buf, uint32_t rtap_len)
4788 {
4789 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4790 
4791 	rtap_len = qdf_align(rtap_len, 2);
4792 
4793 	/*
4794 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4795 	 * Enable all "known" he-mu-other radiotap flags for now
4796 	 */
4797 	if (!rx_user_status) {
4798 		put_unaligned_le16(rx_status->he_per_user_1,
4799 				   &rtap_buf[rtap_len]);
4800 		rtap_len += 2;
4801 
4802 		put_unaligned_le16(rx_status->he_per_user_2,
4803 				   &rtap_buf[rtap_len]);
4804 		rtap_len += 2;
4805 
4806 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
4807 		rtap_len += 1;
4808 
4809 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
4810 		rtap_len += 1;
4811 		qdf_debug("he_per_user %x %x pos %x knwn %x",
4812 			  rx_status->he_per_user_1,
4813 			  rx_status->he_per_user_2,
4814 			  rx_status->he_per_user_position,
4815 			  rx_status->he_per_user_known);
4816 	} else {
4817 		put_unaligned_le16(rx_user_status->he_per_user_1,
4818 				   &rtap_buf[rtap_len]);
4819 		rtap_len += 2;
4820 
4821 		put_unaligned_le16(rx_user_status->he_per_user_2,
4822 				   &rtap_buf[rtap_len]);
4823 		rtap_len += 2;
4824 
4825 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
4826 		rtap_len += 1;
4827 
4828 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
4829 		rtap_len += 1;
4830 		qdf_debug("he_per_user %x %x pos %x knwn %x",
4831 			  rx_user_status->he_per_user_1,
4832 			  rx_user_status->he_per_user_2,
4833 			  rx_user_status->he_per_user_position,
4834 			  rx_user_status->he_per_user_known);
4835 	}
4836 
4837 	return rtap_len;
4838 }
4839 
4840 /**
4841  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
4842  *						from rx_status
4843  * @rx_status: Pointer to rx_status.
4844  * @rtap_buf: buffer to which radiotap has to be updated
4845  * @rtap_len: radiotap length
4846  *
4847  * API update Extra High Throughput (11be) fields in the radiotap header
4848  *
4849  * Return: length of rtap_len updated.
4850  */
4851 static unsigned int
4852 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
4853 				    int8_t *rtap_buf, uint32_t rtap_len)
4854 {
4855 	/*
4856 	 * IEEE80211_RADIOTAP_USIG:
4857 	 *		u32, u32, u32
4858 	 */
4859 	rtap_len = qdf_align(rtap_len, 4);
4860 
4861 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
4862 	rtap_len += 4;
4863 
4864 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
4865 	rtap_len += 4;
4866 
4867 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
4868 	rtap_len += 4;
4869 
4870 	qdf_rl_debug("U-SIG data %x %x %x",
4871 		     rx_status->usig_common, rx_status->usig_value,
4872 		     rx_status->usig_mask);
4873 
4874 	return rtap_len;
4875 }
4876 
4877 /**
4878  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
4879  *					from rx_status
4880  * @rx_status: Pointer to rx_status.
4881  * @rtap_buf: buffer to which radiotap has to be updated
4882  * @rtap_len: radiotap length
4883  *
4884  * API update Extra High Throughput (11be) fields in the radiotap header
4885  *
4886  * Return: length of rtap_len updated.
4887  */
4888 static unsigned int
4889 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
4890 				   int8_t *rtap_buf, uint32_t rtap_len)
4891 {
4892 	uint32_t user;
4893 
4894 	/*
4895 	 * IEEE80211_RADIOTAP_EHT:
4896 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
4897 	 */
4898 	rtap_len = qdf_align(rtap_len, 4);
4899 
4900 	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
4901 	rtap_len += 4;
4902 
4903 	put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
4904 	rtap_len += 4;
4905 
4906 	put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
4907 	rtap_len += 4;
4908 
4909 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
4910 	rtap_len += 4;
4911 
4912 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
4913 	rtap_len += 4;
4914 
4915 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
4916 	rtap_len += 4;
4917 
4918 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
4919 	rtap_len += 4;
4920 
4921 	for (user = 0; user < rx_status->num_eht_user_info_valid; user++) {
4922 		put_unaligned_le32(rx_status->eht_user_info[user],
4923 				   &rtap_buf[rtap_len]);
4924 		rtap_len += 4;
4925 	}
4926 
4927 	qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
4928 		     rx_status->eht_known, rx_status->eht_data[0],
4929 		     rx_status->eht_data[1], rx_status->eht_data[2],
4930 		     rx_status->eht_data[3], rx_status->eht_data[4],
4931 		     rx_status->eht_data[5]);
4932 
4933 	return rtap_len;
4934 }
4935 
4936 #define IEEE80211_RADIOTAP_TX_STATUS 0
4937 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
4938 #define IEEE80211_RADIOTAP_EXTENSION2 2
4939 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4940 
4941 /**
4942  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4943  * @rx_status: Pointer to rx_status.
4944  * @rtap_buf: Buf to which AMPDU info has to be updated.
4945  * @rtap_len: Current length of radiotap buffer
4946  *
4947  * Return: Length of radiotap after AMPDU flags updated.
4948  */
4949 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4950 					struct mon_rx_status *rx_status,
4951 					uint8_t *rtap_buf,
4952 					uint32_t rtap_len)
4953 {
4954 	/*
4955 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4956 	 * First 32 bits of AMPDU represents the reference number
4957 	 */
4958 
4959 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4960 	uint16_t ampdu_flags = 0;
4961 	uint16_t ampdu_reserved_flags = 0;
4962 
4963 	rtap_len = qdf_align(rtap_len, 4);
4964 
4965 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4966 	rtap_len += 4;
4967 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4968 	rtap_len += 2;
4969 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4970 	rtap_len += 2;
4971 
4972 	return rtap_len;
4973 }
4974 
4975 #ifdef DP_MON_RSSI_IN_DBM
4976 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
4977 (rx_status->rssi_comb)
4978 #else
4979 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
4980 (rx_status->rssi_comb + rx_status->chan_noise_floor)
4981 #endif
4982 
4983 /**
4984  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
4985  * @rx_status: Pointer to rx_status.
4986  * @rtap_buf: Buf to which tx info has to be updated.
4987  * @rtap_len: Current length of radiotap buffer
4988  *
4989  * Return: Length of radiotap after tx flags updated.
4990  */
4991 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
4992 						struct mon_rx_status *rx_status,
4993 						uint8_t *rtap_buf,
4994 						uint32_t rtap_len)
4995 {
4996 	/*
4997 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
4998 	 */
4999 
5000 	uint16_t tx_flags = 0;
5001 
5002 	rtap_len = qdf_align(rtap_len, 2);
5003 
5004 	switch (rx_status->tx_status) {
5005 	case RADIOTAP_TX_STATUS_FAIL:
5006 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5007 		break;
5008 	case RADIOTAP_TX_STATUS_NOACK:
5009 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5010 		break;
5011 	}
5012 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5013 	rtap_len += 2;
5014 
5015 	return rtap_len;
5016 }
5017 
5018 /**
5019  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
5020  * @rx_status: Pointer to rx_status.
5021  * @nbuf:      nbuf pointer to which radiotap has to be updated
5022  * @headroom_sz: Available headroom size.
5023  *
5024  * Return: length of rtap_len updated.
5025  */
5026 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5027 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5028 {
5029 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5030 	struct ieee80211_radiotap_header *rthdr =
5031 		(struct ieee80211_radiotap_header *)rtap_buf;
5032 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5033 	uint32_t rtap_len = rtap_hdr_len;
5034 	uint8_t length = rtap_len;
5035 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5036 	struct qdf_radiotap_ext2 *rtap_ext2;
5037 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5038 
5039 	/* per user info */
5040 	qdf_le32_t *it_present;
5041 	uint32_t it_present_val;
5042 	bool radiotap_ext1_hdr_present = false;
5043 
5044 	it_present = &rthdr->it_present;
5045 
5046 	/* Adding Extended Header space */
5047 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5048 	    rx_status->usig_flags || rx_status->eht_flags) {
5049 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5050 		rtap_len = rtap_hdr_len;
5051 		radiotap_ext1_hdr_present = true;
5052 	}
5053 
5054 	length = rtap_len;
5055 
5056 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5057 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5058 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5059 	rtap_len += 8;
5060 
5061 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5062 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5063 
5064 	if (rx_status->rs_fcs_err)
5065 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5066 
5067 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5068 	rtap_len += 1;
5069 
5070 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5071 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5072 	    !rx_status->he_flags) {
5073 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5074 		rtap_buf[rtap_len] = rx_status->rate;
5075 	} else
5076 		rtap_buf[rtap_len] = 0;
5077 	rtap_len += 1;
5078 
5079 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5080 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5081 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5082 	rtap_len += 2;
5083 	/* Channel flags. */
5084 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5085 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5086 	else
5087 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5088 	if (rx_status->cck_flag)
5089 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5090 	if (rx_status->ofdm_flag)
5091 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5092 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5093 	rtap_len += 2;
5094 
5095 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5096 	 *					(dBm)
5097 	 */
5098 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5099 	/*
5100 	 * rssi_comb is int dB, need to convert it to dBm.
5101 	 * normalize value to noise floor of -96 dBm
5102 	 */
5103 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5104 	rtap_len += 1;
5105 
5106 	/* RX signal noise floor */
5107 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5108 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5109 	rtap_len += 1;
5110 
5111 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
5112 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5113 	rtap_buf[rtap_len] = rx_status->nr_ant;
5114 	rtap_len += 1;
5115 
5116 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5117 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5118 		return 0;
5119 	}
5120 
5121 	/* update tx flags for pkt capture*/
5122 	if (rx_status->add_rtap_ext) {
5123 		rthdr->it_present |=
5124 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5125 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5126 							     rtap_buf,
5127 							     rtap_len);
5128 
5129 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5130 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5131 			return 0;
5132 		}
5133 	}
5134 
5135 	if (rx_status->ht_flags) {
5136 		length = rtap_len;
5137 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5138 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5139 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5140 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5141 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
5142 		rtap_len += 1;
5143 
5144 		if (rx_status->sgi)
5145 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5146 		if (rx_status->bw)
5147 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5148 		else
5149 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5150 		rtap_len += 1;
5151 
5152 		rtap_buf[rtap_len] = rx_status->ht_mcs;
5153 		rtap_len += 1;
5154 
5155 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5156 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5157 			return 0;
5158 		}
5159 	}
5160 
5161 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5162 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5163 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5164 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5165 								rtap_buf,
5166 								rtap_len);
5167 	}
5168 
5169 	if (rx_status->vht_flags) {
5170 		length = rtap_len;
5171 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5172 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5173 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5174 								rtap_buf,
5175 								rtap_len);
5176 
5177 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5178 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5179 			return 0;
5180 		}
5181 	}
5182 
5183 	if (rx_status->he_flags) {
5184 		length = rtap_len;
5185 		/* IEEE80211_RADIOTAP_HE */
5186 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5187 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5188 								rtap_buf,
5189 								rtap_len);
5190 
5191 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5192 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5193 			return 0;
5194 		}
5195 	}
5196 
5197 	if (rx_status->he_mu_flags) {
5198 		length = rtap_len;
5199 		/* IEEE80211_RADIOTAP_HE-MU */
5200 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5201 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5202 								rtap_buf,
5203 								rtap_len);
5204 
5205 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5206 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5207 			return 0;
5208 		}
5209 	}
5210 
5211 	if (rx_status->he_mu_other_flags) {
5212 		length = rtap_len;
5213 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
5214 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5215 		rtap_len =
5216 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5217 								rtap_buf,
5218 								rtap_len);
5219 
5220 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5221 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5222 			return 0;
5223 		}
5224 	}
5225 
5226 	rtap_len = qdf_align(rtap_len, 2);
5227 	/*
5228 	 * Radiotap Vendor Namespace
5229 	 */
5230 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5231 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5232 					(rtap_buf + rtap_len);
5233 	/*
5234 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5235 	 */
5236 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5237 	/*
5238 	 * Name space selector = 0
5239 	 * We only will have one namespace for now
5240 	 */
5241 	radiotap_vendor_ns_ath->hdr.selector = 0;
5242 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5243 					sizeof(*radiotap_vendor_ns_ath) -
5244 					sizeof(radiotap_vendor_ns_ath->hdr));
5245 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5246 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5247 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5248 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
5249 				cpu_to_le32(rx_status->ppdu_timestamp);
5250 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
5251 
5252 	/* Move to next it_present */
5253 	if (radiotap_ext1_hdr_present) {
5254 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5255 		put_unaligned_le32(it_present_val, it_present);
5256 		it_present_val = 0;
5257 		it_present++;
5258 	}
5259 
5260 	/* Add Extension to Radiotap Header & corresponding data */
5261 	if (rx_status->add_rtap_ext) {
5262 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5263 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5264 
5265 		rtap_buf[rtap_len] = rx_status->tx_status;
5266 		rtap_len += 1;
5267 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5268 		rtap_len += 1;
5269 	}
5270 
5271 	/* Add Extension2 to Radiotap Header */
5272 	if (rx_status->add_rtap_ext2) {
5273 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5274 
5275 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5276 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
5277 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5278 		if (!rx_user_status) {
5279 			rtap_ext2->tid = rx_status->tid;
5280 			rtap_ext2->start_seq = rx_status->start_seq;
5281 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5282 				     rx_status->ba_bitmap,
5283 				     8 * (sizeof(uint32_t)));
5284 		} else {
5285 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5286 
5287 			/* set default bitmap sz if not set */
5288 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5289 			rtap_ext2->tid = rx_user_status->tid;
5290 			rtap_ext2->start_seq = rx_user_status->start_seq;
5291 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5292 				     rx_user_status->ba_bitmap,
5293 				     ba_bitmap_sz * (sizeof(uint32_t)));
5294 		}
5295 
5296 		rtap_len += sizeof(*rtap_ext2);
5297 	}
5298 
5299 	if (rx_status->usig_flags) {
5300 		length = rtap_len;
5301 		/* IEEE80211_RADIOTAP_USIG */
5302 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5303 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5304 							       rtap_buf,
5305 							       rtap_len);
5306 
5307 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5308 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5309 			return 0;
5310 		}
5311 	}
5312 
5313 	if (rx_status->eht_flags) {
5314 		length = rtap_len;
5315 		/* IEEE80211_RADIOTAP_EHT */
5316 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5317 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5318 							      rtap_buf,
5319 							      rtap_len);
5320 
5321 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5322 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5323 			return 0;
5324 		}
5325 	}
5326 
5327 	put_unaligned_le32(it_present_val, it_present);
5328 	rthdr->it_len = cpu_to_le16(rtap_len);
5329 
5330 	if (headroom_sz < rtap_len) {
5331 		qdf_debug("DEBUG: Not enough space to update radiotap");
5332 		return 0;
5333 	}
5334 
5335 	qdf_nbuf_push_head(nbuf, rtap_len);
5336 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5337 	return rtap_len;
5338 }
5339 #else
5340 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5341 					struct mon_rx_status *rx_status,
5342 					int8_t *rtap_buf,
5343 					uint32_t rtap_len)
5344 {
5345 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5346 	return 0;
5347 }
5348 
5349 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5350 				      int8_t *rtap_buf, uint32_t rtap_len)
5351 {
5352 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5353 	return 0;
5354 }
5355 
5356 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5357 					struct mon_rx_status *rx_status,
5358 					uint8_t *rtap_buf,
5359 					uint32_t rtap_len)
5360 {
5361 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5362 	return 0;
5363 }
5364 
5365 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5366 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5367 {
5368 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5369 	return 0;
5370 }
5371 #endif
5372 qdf_export_symbol(qdf_nbuf_update_radiotap);
5373 
5374 /**
5375  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
5376  * @cb_func_ptr: function pointer to the nbuf free callback
5377  *
5378  * This function registers a callback function for nbuf free.
5379  *
5380  * Return: none
5381  */
5382 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5383 {
5384 	nbuf_free_cb = cb_func_ptr;
5385 }
5386 
5387 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5388 
5389 /**
5390  * qdf_nbuf_classify_pkt() - classify packet
5391  * @skb - sk buff
5392  *
5393  * Return: none
5394  */
5395 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5396 {
5397 	struct ethhdr *eh = (struct ethhdr *)skb->data;
5398 
5399 	/* check destination mac address is broadcast/multicast */
5400 	if (is_broadcast_ether_addr((uint8_t *)eh))
5401 		QDF_NBUF_CB_SET_BCAST(skb);
5402 	else if (is_multicast_ether_addr((uint8_t *)eh))
5403 		QDF_NBUF_CB_SET_MCAST(skb);
5404 
5405 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5406 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5407 			QDF_NBUF_CB_PACKET_TYPE_ARP;
5408 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5409 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5410 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
5411 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5412 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5413 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5414 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5415 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5416 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
5417 }
5418 qdf_export_symbol(qdf_nbuf_classify_pkt);
5419 
5420 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5421 {
5422 	qdf_nbuf_users_set(&nbuf->users, 1);
5423 	nbuf->data = nbuf->head + NET_SKB_PAD;
5424 	skb_reset_tail_pointer(nbuf);
5425 }
5426 qdf_export_symbol(__qdf_nbuf_init);
5427 
5428 #ifdef WLAN_FEATURE_FASTPATH
5429 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5430 {
5431 	qdf_nbuf_users_set(&nbuf->users, 1);
5432 	nbuf->data = nbuf->head + NET_SKB_PAD;
5433 	skb_reset_tail_pointer(nbuf);
5434 }
5435 qdf_export_symbol(qdf_nbuf_init_fast);
5436 #endif /* WLAN_FEATURE_FASTPATH */
5437 
5438 
5439 #ifdef QDF_NBUF_GLOBAL_COUNT
5440 /**
5441  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
5442  *
5443  * Return void
5444  */
5445 void __qdf_nbuf_mod_init(void)
5446 {
5447 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5448 	qdf_atomic_init(&nbuf_count);
5449 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5450 }
5451 
5452 /**
5453  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
5454  *
5455  * Return void
5456  */
5457 void __qdf_nbuf_mod_exit(void)
5458 {
5459 }
5460 #endif
5461 
5462 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
5463 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5464 					    int offset)
5465 {
5466 	unsigned int frag_offset;
5467 	skb_frag_t *frag;
5468 
5469 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5470 		return QDF_STATUS_E_FAILURE;
5471 
5472 	frag = &skb_shinfo(nbuf)->frags[idx];
5473 	frag_offset = skb_frag_off(frag);
5474 
5475 	frag_offset += offset;
5476 	skb_frag_off_set(frag, frag_offset);
5477 
5478 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5479 
5480 	return QDF_STATUS_SUCCESS;
5481 }
5482 
5483 #else
5484 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5485 					    int offset)
5486 {
5487 	uint16_t frag_offset;
5488 	skb_frag_t *frag;
5489 
5490 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5491 		return QDF_STATUS_E_FAILURE;
5492 
5493 	frag = &skb_shinfo(nbuf)->frags[idx];
5494 	frag_offset = frag->page_offset;
5495 
5496 	frag_offset += offset;
5497 	frag->page_offset = frag_offset;
5498 
5499 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5500 
5501 	return QDF_STATUS_SUCCESS;
5502 }
5503 #endif
5504 
5505 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5506 
5507 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5508 			    int offset, int frag_len,
5509 			    unsigned int truesize, bool take_frag_ref)
5510 {
5511 	struct page *page;
5512 	int frag_offset;
5513 	uint8_t nr_frag;
5514 
5515 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5516 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5517 
5518 	page = virt_to_head_page(buf);
5519 	frag_offset = buf - page_address(page);
5520 
5521 	skb_add_rx_frag(nbuf, nr_frag, page,
5522 			(frag_offset + offset),
5523 			frag_len, truesize);
5524 
5525 	if (unlikely(take_frag_ref)) {
5526 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5527 		skb_frag_ref(nbuf, nr_frag);
5528 	}
5529 }
5530 
5531 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5532 
5533 void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
5534 {
5535 	struct page *page;
5536 	skb_frag_t frag = {0};
5537 
5538 	page = virt_to_head_page(buf);
5539 	__skb_frag_set_page(&frag, page);
5540 
5541 	/*
5542 	 * since __skb_frag_ref() just use page to increase ref
5543 	 * we just decode page alone
5544 	 */
5545 	qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5546 	__skb_frag_ref(&frag);
5547 }
5548 
5549 qdf_export_symbol(__qdf_nbuf_ref_frag);
5550 
5551 #ifdef NBUF_FRAG_MEMORY_DEBUG
5552 
5553 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5554 						int offset, const char *func,
5555 						uint32_t line)
5556 {
5557 	QDF_STATUS result;
5558 	qdf_frag_t p_fragp, n_fragp;
5559 
5560 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5561 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5562 
5563 	if (qdf_likely(is_initial_mem_debug_disabled))
5564 		return result;
5565 
5566 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5567 
5568 	/*
5569 	 * Update frag address in frag debug tracker
5570 	 * when frag offset is successfully changed in skb
5571 	 */
5572 	if (result == QDF_STATUS_SUCCESS)
5573 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
5574 
5575 	return result;
5576 }
5577 
5578 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
5579 
5580 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
5581 				int offset, int frag_len,
5582 				unsigned int truesize, bool take_frag_ref,
5583 				const char *func, uint32_t line)
5584 {
5585 	qdf_frag_t fragp;
5586 	uint32_t num_nr_frags;
5587 
5588 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
5589 			       frag_len, truesize, take_frag_ref);
5590 
5591 	if (qdf_likely(is_initial_mem_debug_disabled))
5592 		return;
5593 
5594 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
5595 
5596 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5597 
5598 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
5599 
5600 	/* Update frag address in frag debug tracking table */
5601 	if (fragp != buf)
5602 		qdf_frag_debug_update_addr(buf, fragp, func, line);
5603 
5604 	/* Update frag refcount in frag debug tracking table */
5605 	qdf_frag_debug_refcount_inc(fragp, func, line);
5606 }
5607 
5608 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
5609 
5610 /**
5611  * qdf_nbuf_ref_frag_debug() - get frag reference
5612  * @buf: Frag pointer needs to be taken reference.
5613  *
5614  * return: void
5615  */
5616 void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
5617 {
5618 	__qdf_nbuf_ref_frag(buf);
5619 
5620 	if (qdf_likely(is_initial_mem_debug_disabled))
5621 		return;
5622 
5623 	/* Update frag refcount in frag debug tracking table */
5624 	qdf_frag_debug_refcount_inc(buf, func, line);
5625 }
5626 
5627 qdf_export_symbol(qdf_nbuf_ref_frag_debug);
5628 
5629 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
5630 				    uint32_t line)
5631 {
5632 	uint32_t num_nr_frags;
5633 	uint32_t idx = 0;
5634 	qdf_nbuf_t ext_list;
5635 	qdf_frag_t p_frag;
5636 
5637 	if (qdf_likely(is_initial_mem_debug_disabled))
5638 		return;
5639 
5640 	if (qdf_unlikely(!buf))
5641 		return;
5642 
5643 	/* Take care to update the refcount in the debug entries for frags */
5644 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5645 
5646 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5647 
5648 	while (idx < num_nr_frags) {
5649 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5650 		if (qdf_likely(p_frag))
5651 			qdf_frag_debug_refcount_inc(p_frag, func, line);
5652 		idx++;
5653 	}
5654 
5655 	/**
5656 	 * Take care to update the refcount in the debug entries for the
5657 	 * frags attached to frag_list
5658 	 */
5659 	ext_list = qdf_nbuf_get_ext_list(buf);
5660 	while (ext_list) {
5661 		idx = 0;
5662 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5663 
5664 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5665 
5666 		while (idx < num_nr_frags) {
5667 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5668 			if (qdf_likely(p_frag))
5669 				qdf_frag_debug_refcount_inc(p_frag, func, line);
5670 			idx++;
5671 		}
5672 		ext_list = qdf_nbuf_queue_next(ext_list);
5673 	}
5674 }
5675 
5676 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
5677 
5678 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
5679 				    uint32_t line)
5680 {
5681 	uint32_t num_nr_frags;
5682 	qdf_nbuf_t ext_list;
5683 	uint32_t idx = 0;
5684 	qdf_frag_t p_frag;
5685 
5686 	if (qdf_likely(is_initial_mem_debug_disabled))
5687 		return;
5688 
5689 	if (qdf_unlikely(!buf))
5690 		return;
5691 
5692 	/**
5693 	 * Decrement refcount for frag debug nodes only when last user
5694 	 * of nbuf calls this API so as to avoid decrementing refcount
5695 	 * on every call expect the last one in case where nbuf has multiple
5696 	 * users
5697 	 */
5698 	if (qdf_nbuf_get_users(buf) > 1)
5699 		return;
5700 
5701 	/* Take care to update the refcount in the debug entries for frags */
5702 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5703 
5704 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5705 
5706 	while (idx < num_nr_frags) {
5707 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5708 		if (qdf_likely(p_frag))
5709 			qdf_frag_debug_refcount_dec(p_frag, func, line);
5710 		idx++;
5711 	}
5712 
5713 	/* Take care to update debug entries for frags attached to frag_list */
5714 	ext_list = qdf_nbuf_get_ext_list(buf);
5715 	while (ext_list) {
5716 		if (qdf_nbuf_get_users(ext_list) == 1) {
5717 			idx = 0;
5718 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5719 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5720 			while (idx < num_nr_frags) {
5721 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5722 				if (qdf_likely(p_frag))
5723 					qdf_frag_debug_refcount_dec(p_frag,
5724 								    func, line);
5725 				idx++;
5726 			}
5727 		}
5728 		ext_list = qdf_nbuf_queue_next(ext_list);
5729 	}
5730 }
5731 
5732 qdf_export_symbol(qdf_net_buf_debug_release_frag);
5733 #endif /* NBUF_FRAG_MEMORY_DEBUG */
5734 
5735 /**
5736  * qdf_get_nbuf_valid_frag() - Get nbuf to store frag
5737  * @nbuf: qdf_nbuf_t master nbuf
5738  *
5739  * Return: qdf_nbuf_t
5740  */
5741 static inline qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
5742 {
5743 	qdf_nbuf_t last_nbuf;
5744 	uint32_t num_frags;
5745 
5746 	if (qdf_unlikely(!nbuf))
5747 		return NULL;
5748 
5749 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
5750 
5751 	/* Check nbuf has enough memory to store frag memory */
5752 	if (num_frags <= QDF_NBUF_MAX_FRAGS)
5753 		return nbuf;
5754 
5755 	if (num_frags > QDF_NBUF_MAX_FRAGS && !__qdf_nbuf_has_fraglist(nbuf))
5756 		return NULL;
5757 
5758 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
5759 	if (qdf_unlikely(!last_nbuf))
5760 		return NULL;
5761 
5762 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
5763 	if (num_frags < QDF_NBUF_MAX_FRAGS)
5764 		return last_nbuf;
5765 
5766 	return NULL;
5767 }
5768 
5769 /**
5770  * qdf_nbuf_add_frag_debug() - Add frag to nbuf
5771  * @osdev: Device handle
5772  * @buf: Frag pointer needs to be added in nbuf frag
5773  * @nbuf: qdf_nbuf_t where frag will be added
5774  * @offset: Offset in frag to be added to nbuf_frags
5775  * @frag_len: Frag length
5776  * @truesize: truesize
5777  * @take_frag_ref: Whether to take ref for frag or not
5778  *      This bool must be set as per below comdition:
5779  *      1. False: If this frag is being added in any nbuf
5780  *              for the first time after allocation
5781  *      2. True: If frag is already attached part of any
5782  *              nbuf
5783  * @minsize: Minimum size to allocate
5784  * @func: Caller function name
5785  * @line: Caller function line no.
5786  *
5787  * if number of frag exceed maximum frag array. A new nbuf is allocated
5788  * with minimum headroom and frag it added to that nbuf.
5789  * new nbuf is added as frag_list to the master nbuf.
5790  *
5791  * Return: QDF_STATUS
5792  */
5793 QDF_STATUS
5794 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
5795 			qdf_nbuf_t nbuf, int offset,
5796 			int frag_len, unsigned int truesize,
5797 			bool take_frag_ref, unsigned int minsize,
5798 			const char *func, uint32_t line)
5799 {
5800 	qdf_nbuf_t cur_nbuf;
5801 	qdf_nbuf_t this_nbuf;
5802 
5803 	cur_nbuf = nbuf;
5804 	this_nbuf = nbuf;
5805 
5806 	if (qdf_unlikely(!frag_len || !buf)) {
5807 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
5808 			     func, line,
5809 			     buf, frag_len);
5810 		return QDF_STATUS_E_INVAL;
5811 	}
5812 
5813 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
5814 
5815 	if (this_nbuf) {
5816 		cur_nbuf = this_nbuf;
5817 	} else {
5818 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
5819 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
5820 		if (qdf_unlikely(!this_nbuf)) {
5821 			qdf_nofl_err("%s : %d no memory to allocate\n",
5822 				     func, line);
5823 			return QDF_STATUS_E_NOMEM;
5824 		}
5825 	}
5826 
5827 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
5828 			     take_frag_ref);
5829 
5830 	if (this_nbuf != cur_nbuf) {
5831 		/* add new skb to frag list */
5832 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
5833 					 qdf_nbuf_len(this_nbuf));
5834 	}
5835 
5836 	return QDF_STATUS_SUCCESS;
5837 }
5838 
5839 qdf_export_symbol(qdf_nbuf_add_frag_debug);
5840 
5841 #ifdef MEMORY_DEBUG
5842 void qdf_nbuf_acquire_track_lock(uint32_t index,
5843 				 unsigned long irq_flag)
5844 {
5845 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
5846 			  irq_flag);
5847 }
5848 
5849 void qdf_nbuf_release_track_lock(uint32_t index,
5850 				 unsigned long irq_flag)
5851 {
5852 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
5853 			       irq_flag);
5854 }
5855 
5856 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
5857 {
5858 	return gp_qdf_net_buf_track_tbl[index];
5859 }
5860 #endif /* MEMORY_DEBUG */
5861 
5862 #ifdef ENHANCED_OS_ABSTRACTION
5863 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
5864 {
5865 	__qdf_nbuf_set_timestamp(buf);
5866 }
5867 
5868 qdf_export_symbol(qdf_nbuf_set_timestamp);
5869 
5870 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
5871 {
5872 	return __qdf_nbuf_get_timestamp(buf);
5873 }
5874 
5875 qdf_export_symbol(qdf_nbuf_get_timestamp);
5876 
5877 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
5878 {
5879 	return __qdf_nbuf_get_timedelta_us(buf);
5880 }
5881 
5882 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
5883 
5884 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
5885 {
5886 	return __qdf_nbuf_get_timedelta_ms(buf);
5887 }
5888 
5889 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
5890 
5891 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
5892 {
5893 	return __qdf_nbuf_net_timedelta(t);
5894 }
5895 
5896 qdf_export_symbol(qdf_nbuf_net_timedelta);
5897 #endif
5898