xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf.c
22  * QCA driver framework(QDF) network buffer management APIs
23  */
24 
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/proc_fs.h>
31 #include <linux/inetdevice.h>
32 #include <qdf_atomic.h>
33 #include <qdf_debugfs.h>
34 #include <qdf_lock.h>
35 #include <qdf_mem.h>
36 #include <qdf_module.h>
37 #include <qdf_nbuf.h>
38 #include <qdf_status.h>
39 #include "qdf_str.h"
40 #include <qdf_trace.h>
41 #include "qdf_tracker.h"
42 #include <qdf_types.h>
43 #include <net/ieee80211_radiotap.h>
44 #include <pld_common.h>
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* tx status */
78 #define RADIOTAP_TX_STATUS_FAIL		1
79 #define RADIOTAP_TX_STATUS_NOACK	2
80 
81 /* channel number to freq conversion */
82 #define CHANNEL_NUM_14 14
83 #define CHANNEL_NUM_15 15
84 #define CHANNEL_NUM_27 27
85 #define CHANNEL_NUM_35 35
86 #define CHANNEL_NUM_182 182
87 #define CHANNEL_NUM_197 197
88 #define CHANNEL_FREQ_2484 2484
89 #define CHANNEL_FREQ_2407 2407
90 #define CHANNEL_FREQ_2512 2512
91 #define CHANNEL_FREQ_5000 5000
92 #define CHANNEL_FREQ_4000 4000
93 #define CHANNEL_FREQ_5150 5150
94 #define FREQ_MULTIPLIER_CONST_5MHZ 5
95 #define FREQ_MULTIPLIER_CONST_20MHZ 20
96 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
97 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
98 #define RADIOTAP_CCK_CHANNEL 0x0020
99 #define RADIOTAP_OFDM_CHANNEL 0x0040
100 
101 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
102 #include <qdf_mc_timer.h>
103 
104 struct qdf_track_timer {
105 	qdf_mc_timer_t track_timer;
106 	qdf_atomic_t alloc_fail_cnt;
107 };
108 
109 static struct qdf_track_timer alloc_track_timer;
110 
111 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
112 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
113 #endif
114 
115 #ifdef NBUF_MEMORY_DEBUG
116 /* SMMU crash indication*/
117 static qdf_atomic_t smmu_crashed;
118 /* Number of nbuf not added to history*/
119 unsigned long g_histroy_add_drop;
120 #endif
121 
122 /* Packet Counter */
123 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
124 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
125 #ifdef QDF_NBUF_GLOBAL_COUNT
126 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
127 static qdf_atomic_t nbuf_count;
128 #endif
129 
130 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
131 static bool is_initial_mem_debug_disabled;
132 #endif
133 
134 /**
135  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
136  * @data: Pointer to network data buffer
137  *
138  * Get the IP header offset in case of 8021Q and 8021AD
139  * tag is present in L2 header.
140  *
141  * Return: IP header offset
142  */
143 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
144 {
145 	uint16_t ether_type;
146 
147 	ether_type = *(uint16_t *)(data +
148 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
149 
150 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
151 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
152 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
153 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
154 
155 	return QDF_NBUF_TRAC_IP_OFFSET;
156 }
157 
158 /**
159  *  __qdf_nbuf_get_ether_type - Get the ether type
160  * @data: Pointer to network data buffer
161  *
162  * Get the ether type in case of 8021Q and 8021AD tag
163  * is present in L2 header, e.g for the returned ether type
164  * value, if IPV4 data ether type 0x0800, return 0x0008.
165  *
166  * Return ether type.
167  */
168 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
169 {
170 	uint16_t ether_type;
171 
172 	ether_type = *(uint16_t *)(data +
173 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
174 
175 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
176 		ether_type = *(uint16_t *)(data +
177 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
178 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
179 		ether_type = *(uint16_t *)(data +
180 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
181 
182 	return ether_type;
183 }
184 
185 /**
186  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
187  *
188  * Return: none
189  */
190 void qdf_nbuf_tx_desc_count_display(void)
191 {
192 	qdf_debug("Current Snapshot of the Driver:");
193 	qdf_debug("Data Packets:");
194 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
195 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
196 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
198 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
199 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
204 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
205 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
207 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
208 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
209 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
210 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
211 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
212 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
213 	qdf_debug("Mgmt Packets:");
214 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
215 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
216 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
219 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
220 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
222 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
223 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
224 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
225 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
226 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
227 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
228 }
229 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
230 
231 /**
232  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
233  * @packet_type   : packet type either mgmt/data
234  * @current_state : layer at which the packet currently present
235  *
236  * Return: none
237  */
238 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
239 			uint8_t current_state)
240 {
241 	switch (packet_type) {
242 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
243 		nbuf_tx_mgmt[current_state]++;
244 		break;
245 	case QDF_NBUF_TX_PKT_DATA_TRACK:
246 		nbuf_tx_data[current_state]++;
247 		break;
248 	default:
249 		break;
250 	}
251 }
252 
253 /**
254  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
255  *
256  * Return: none
257  */
258 void qdf_nbuf_tx_desc_count_clear(void)
259 {
260 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
261 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
262 }
263 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
264 
265 /**
266  * qdf_nbuf_set_state() - Updates the packet state
267  * @nbuf:            network buffer
268  * @current_state :  layer at which the packet currently is
269  *
270  * This function updates the packet state to the layer at which the packet
271  * currently is
272  *
273  * Return: none
274  */
275 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
276 {
277 	/*
278 	 * Only Mgmt, Data Packets are tracked. WMI messages
279 	 * such as scan commands are not tracked
280 	 */
281 	uint8_t packet_type;
282 
283 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
284 
285 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
286 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
287 		return;
288 	}
289 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
290 	qdf_nbuf_tx_desc_count_update(packet_type,
291 					current_state);
292 }
293 qdf_export_symbol(qdf_nbuf_set_state);
294 
295 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
296 /**
297  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
298  *
299  * This function starts the alloc fail replenish timer.
300  *
301  * Return: void
302  */
303 static inline void __qdf_nbuf_start_replenish_timer(void)
304 {
305 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
306 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
307 	    QDF_TIMER_STATE_RUNNING)
308 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
309 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
310 }
311 
312 /**
313  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
314  *
315  * This function stops the alloc fail replenish timer.
316  *
317  * Return: void
318  */
319 static inline void __qdf_nbuf_stop_replenish_timer(void)
320 {
321 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
322 		return;
323 
324 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
325 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
326 	    QDF_TIMER_STATE_RUNNING)
327 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
328 }
329 
330 /**
331  * qdf_replenish_expire_handler - Replenish expire handler
332  *
333  * This function triggers when the alloc fail replenish timer expires.
334  *
335  * Return: void
336  */
337 static void qdf_replenish_expire_handler(void *arg)
338 {
339 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
340 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
341 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
342 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
343 
344 		/* Error handling here */
345 	}
346 }
347 
348 /**
349  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
350  *
351  * This function initializes the nbuf alloc fail replenish timer.
352  *
353  * Return: void
354  */
355 void __qdf_nbuf_init_replenish_timer(void)
356 {
357 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
358 			  qdf_replenish_expire_handler, NULL);
359 }
360 
361 /**
362  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
363  *
364  * This function deinitializes the nbuf alloc fail replenish timer.
365  *
366  * Return: void
367  */
368 void __qdf_nbuf_deinit_replenish_timer(void)
369 {
370 	__qdf_nbuf_stop_replenish_timer();
371 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
372 }
373 
374 void qdf_nbuf_stop_replenish_timer(void)
375 {
376 	__qdf_nbuf_stop_replenish_timer();
377 }
378 #else
379 
380 static inline void __qdf_nbuf_start_replenish_timer(void) {}
381 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
382 void qdf_nbuf_stop_replenish_timer(void)
383 {
384 }
385 #endif
386 
387 /* globals do not need to be initialized to NULL/0 */
388 qdf_nbuf_trace_update_t qdf_trace_update_cb;
389 qdf_nbuf_free_t nbuf_free_cb;
390 
391 #ifdef QDF_NBUF_GLOBAL_COUNT
392 
393 /**
394  * __qdf_nbuf_count_get() - get nbuf global count
395  *
396  * Return: nbuf global count
397  */
398 int __qdf_nbuf_count_get(void)
399 {
400 	return qdf_atomic_read(&nbuf_count);
401 }
402 qdf_export_symbol(__qdf_nbuf_count_get);
403 
404 /**
405  * __qdf_nbuf_count_inc() - increment nbuf global count
406  *
407  * @buf: sk buff
408  *
409  * Return: void
410  */
411 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
412 {
413 	int num_nbuf = 1;
414 	qdf_nbuf_t ext_list;
415 
416 	if (qdf_likely(is_initial_mem_debug_disabled))
417 		return;
418 
419 	ext_list = qdf_nbuf_get_ext_list(nbuf);
420 
421 	/* Take care to account for frag_list */
422 	while (ext_list) {
423 		++num_nbuf;
424 		ext_list = qdf_nbuf_queue_next(ext_list);
425 	}
426 
427 	qdf_atomic_add(num_nbuf, &nbuf_count);
428 }
429 qdf_export_symbol(__qdf_nbuf_count_inc);
430 
431 /**
432  * __qdf_nbuf_count_dec() - decrement nbuf global count
433  *
434  * @buf: sk buff
435  *
436  * Return: void
437  */
438 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
439 {
440 	qdf_nbuf_t ext_list;
441 	int num_nbuf;
442 
443 	if (qdf_likely(is_initial_mem_debug_disabled))
444 		return;
445 
446 	if (qdf_nbuf_get_users(nbuf) > 1)
447 		return;
448 
449 	num_nbuf = 1;
450 
451 	/* Take care to account for frag_list */
452 	ext_list = qdf_nbuf_get_ext_list(nbuf);
453 	while (ext_list) {
454 		if (qdf_nbuf_get_users(ext_list) == 1)
455 			++num_nbuf;
456 		ext_list = qdf_nbuf_queue_next(ext_list);
457 	}
458 
459 	qdf_atomic_sub(num_nbuf, &nbuf_count);
460 }
461 qdf_export_symbol(__qdf_nbuf_count_dec);
462 #endif
463 
464 #ifdef NBUF_FRAG_MEMORY_DEBUG
465 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
466 {
467 	qdf_nbuf_t ext_list;
468 	uint32_t num_nr_frags;
469 	uint32_t total_num_nr_frags;
470 
471 	if (qdf_likely(is_initial_mem_debug_disabled))
472 		return;
473 
474 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
475 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
476 
477 	total_num_nr_frags = num_nr_frags;
478 
479 	/* Take into account the frags attached to frag_list */
480 	ext_list = qdf_nbuf_get_ext_list(nbuf);
481 	while (ext_list) {
482 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
483 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
484 		total_num_nr_frags += num_nr_frags;
485 		ext_list = qdf_nbuf_queue_next(ext_list);
486 	}
487 
488 	qdf_frag_count_inc(total_num_nr_frags);
489 }
490 
491 qdf_export_symbol(qdf_nbuf_frag_count_inc);
492 
493 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
494 {
495 	qdf_nbuf_t ext_list;
496 	uint32_t num_nr_frags;
497 	uint32_t total_num_nr_frags;
498 
499 	if (qdf_likely(is_initial_mem_debug_disabled))
500 		return;
501 
502 	if (qdf_nbuf_get_users(nbuf) > 1)
503 		return;
504 
505 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
506 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
507 
508 	total_num_nr_frags = num_nr_frags;
509 
510 	/* Take into account the frags attached to frag_list */
511 	ext_list = qdf_nbuf_get_ext_list(nbuf);
512 	while (ext_list) {
513 		if (qdf_nbuf_get_users(ext_list) == 1) {
514 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
515 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
516 			total_num_nr_frags += num_nr_frags;
517 		}
518 		ext_list = qdf_nbuf_queue_next(ext_list);
519 	}
520 
521 	qdf_frag_count_dec(total_num_nr_frags);
522 }
523 
524 qdf_export_symbol(qdf_nbuf_frag_count_dec);
525 
526 #endif
527 
528 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
529 	!defined(QCA_WIFI_QCN9000)
530 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
531 				 int align, int prio, const char *func,
532 				 uint32_t line)
533 {
534 	struct sk_buff *skb;
535 	unsigned long offset;
536 	uint32_t lowmem_alloc_tries = 0;
537 
538 	if (align)
539 		size += (align - 1);
540 
541 realloc:
542 	skb = dev_alloc_skb(size);
543 
544 	if (skb)
545 		goto skb_alloc;
546 
547 	skb = pld_nbuf_pre_alloc(size);
548 
549 	if (!skb) {
550 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
551 				size, func, line);
552 		return NULL;
553 	}
554 
555 skb_alloc:
556 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
557 	 * Though we are trying to reserve low memory upfront to prevent this,
558 	 * we sometimes see SKBs allocated from low memory.
559 	 */
560 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
561 		lowmem_alloc_tries++;
562 		if (lowmem_alloc_tries > 100) {
563 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
564 				     size, func, line);
565 			return NULL;
566 		} else {
567 			/* Not freeing to make sure it
568 			 * will not get allocated again
569 			 */
570 			goto realloc;
571 		}
572 	}
573 	memset(skb->cb, 0x0, sizeof(skb->cb));
574 
575 	/*
576 	 * The default is for netbuf fragments to be interpreted
577 	 * as wordstreams rather than bytestreams.
578 	 */
579 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
580 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
581 
582 	/*
583 	 * XXX:how about we reserve first then align
584 	 * Align & make sure that the tail & data are adjusted properly
585 	 */
586 
587 	if (align) {
588 		offset = ((unsigned long)skb->data) % align;
589 		if (offset)
590 			skb_reserve(skb, align - offset);
591 	}
592 
593 	/*
594 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
595 	 * pointer
596 	 */
597 	skb_reserve(skb, reserve);
598 	qdf_nbuf_count_inc(skb);
599 
600 	return skb;
601 }
602 #else
603 
604 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
605 				 int align, int prio, const char *func,
606 				 uint32_t line)
607 {
608 	struct sk_buff *skb;
609 	unsigned long offset;
610 	int flags = GFP_KERNEL;
611 
612 	if (align)
613 		size += (align - 1);
614 
615 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
616 		flags = GFP_ATOMIC;
617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
618 		/*
619 		 * Observed that kcompactd burns out CPU to make order-3 page.
620 		 *__netdev_alloc_skb has 4k page fallback option just in case of
621 		 * failing high order page allocation so we don't need to be
622 		 * hard. Make kcompactd rest in piece.
623 		 */
624 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
625 #endif
626 	}
627 
628 	skb = __netdev_alloc_skb(NULL, size, flags);
629 
630 	if (skb)
631 		goto skb_alloc;
632 
633 	skb = pld_nbuf_pre_alloc(size);
634 
635 	if (!skb) {
636 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
637 				size, func, line);
638 		__qdf_nbuf_start_replenish_timer();
639 		return NULL;
640 	} else {
641 		__qdf_nbuf_stop_replenish_timer();
642 	}
643 
644 skb_alloc:
645 	memset(skb->cb, 0x0, sizeof(skb->cb));
646 
647 	/*
648 	 * The default is for netbuf fragments to be interpreted
649 	 * as wordstreams rather than bytestreams.
650 	 */
651 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
652 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
653 
654 	/*
655 	 * XXX:how about we reserve first then align
656 	 * Align & make sure that the tail & data are adjusted properly
657 	 */
658 
659 	if (align) {
660 		offset = ((unsigned long)skb->data) % align;
661 		if (offset)
662 			skb_reserve(skb, align - offset);
663 	}
664 
665 	/*
666 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
667 	 * pointer
668 	 */
669 	skb_reserve(skb, reserve);
670 	qdf_nbuf_count_inc(skb);
671 
672 	return skb;
673 }
674 #endif
675 qdf_export_symbol(__qdf_nbuf_alloc);
676 
677 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
678 					  const char *func, uint32_t line)
679 {
680 	qdf_nbuf_t nbuf;
681 	unsigned long offset;
682 
683 	if (align)
684 		size += (align - 1);
685 
686 	nbuf = alloc_skb(size, GFP_ATOMIC);
687 	if (!nbuf)
688 		goto ret_nbuf;
689 
690 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
691 
692 	skb_reserve(nbuf, reserve);
693 
694 	if (align) {
695 		offset = ((unsigned long)nbuf->data) % align;
696 		if (offset)
697 			skb_reserve(nbuf, align - offset);
698 	}
699 
700 	qdf_nbuf_count_inc(nbuf);
701 
702 ret_nbuf:
703 	return nbuf;
704 }
705 
706 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
707 
708 /**
709  * __qdf_nbuf_free() - free the nbuf its interrupt safe
710  * @skb: Pointer to network buffer
711  *
712  * Return: none
713  */
714 
715 void __qdf_nbuf_free(struct sk_buff *skb)
716 {
717 	if (pld_nbuf_pre_alloc_free(skb))
718 		return;
719 
720 	qdf_nbuf_frag_count_dec(skb);
721 
722 	qdf_nbuf_count_dec(skb);
723 	if (nbuf_free_cb)
724 		nbuf_free_cb(skb);
725 	else
726 		dev_kfree_skb_any(skb);
727 }
728 
729 qdf_export_symbol(__qdf_nbuf_free);
730 
731 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
732 {
733 	qdf_nbuf_t skb_new = NULL;
734 
735 	skb_new = skb_clone(skb, GFP_ATOMIC);
736 	if (skb_new) {
737 		qdf_nbuf_frag_count_inc(skb_new);
738 		qdf_nbuf_count_inc(skb_new);
739 	}
740 	return skb_new;
741 }
742 
743 qdf_export_symbol(__qdf_nbuf_clone);
744 
745 #ifdef NBUF_MEMORY_DEBUG
746 struct qdf_nbuf_event {
747 	qdf_nbuf_t nbuf;
748 	char func[QDF_MEM_FUNC_NAME_SIZE];
749 	uint32_t line;
750 	enum qdf_nbuf_event_type type;
751 	uint64_t timestamp;
752 	qdf_dma_addr_t iova;
753 };
754 
755 #ifndef QDF_NBUF_HISTORY_SIZE
756 #define QDF_NBUF_HISTORY_SIZE 4096
757 #endif
758 static qdf_atomic_t qdf_nbuf_history_index;
759 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
760 
761 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
762 {
763 	int32_t next = qdf_atomic_inc_return(index);
764 
765 	if (next == size)
766 		qdf_atomic_sub(size, index);
767 
768 	return next % size;
769 }
770 
771 void
772 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
773 		     enum qdf_nbuf_event_type type)
774 {
775 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
776 						   QDF_NBUF_HISTORY_SIZE);
777 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
778 
779 	if (qdf_atomic_read(&smmu_crashed)) {
780 		g_histroy_add_drop++;
781 		return;
782 	}
783 
784 	event->nbuf = nbuf;
785 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
786 	event->line = line;
787 	event->type = type;
788 	event->timestamp = qdf_get_log_timestamp();
789 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP)
790 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
791 	else
792 		event->iova = 0;
793 }
794 
795 void qdf_set_smmu_fault_state(bool smmu_fault_state)
796 {
797 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
798 	if (!smmu_fault_state)
799 		g_histroy_add_drop = 0;
800 }
801 qdf_export_symbol(qdf_set_smmu_fault_state);
802 #endif /* NBUF_MEMORY_DEBUG */
803 
804 #ifdef NBUF_MAP_UNMAP_DEBUG
805 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
806 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
807 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
808 
809 static void qdf_nbuf_map_tracking_init(void)
810 {
811 	qdf_tracker_init(&qdf_nbuf_map_tracker);
812 }
813 
814 static void qdf_nbuf_map_tracking_deinit(void)
815 {
816 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
817 }
818 
819 static QDF_STATUS
820 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
821 {
822 	if (is_initial_mem_debug_disabled)
823 		return QDF_STATUS_SUCCESS;
824 
825 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
826 }
827 
828 static void
829 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
830 {
831 	if (is_initial_mem_debug_disabled)
832 		return;
833 
834 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
835 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
836 }
837 
838 void qdf_nbuf_map_check_for_leaks(void)
839 {
840 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
841 }
842 
843 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
844 			      qdf_nbuf_t buf,
845 			      qdf_dma_dir_t dir,
846 			      const char *func,
847 			      uint32_t line)
848 {
849 	QDF_STATUS status;
850 
851 	status = qdf_nbuf_track_map(buf, func, line);
852 	if (QDF_IS_STATUS_ERROR(status))
853 		return status;
854 
855 	status = __qdf_nbuf_map(osdev, buf, dir);
856 	if (QDF_IS_STATUS_ERROR(status)) {
857 		qdf_nbuf_untrack_map(buf, func, line);
858 	} else {
859 		if (!is_initial_mem_debug_disabled)
860 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
861 		qdf_net_buf_debug_update_map_node(buf, func, line);
862 	}
863 
864 	return status;
865 }
866 
867 qdf_export_symbol(qdf_nbuf_map_debug);
868 
869 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
870 			  qdf_nbuf_t buf,
871 			  qdf_dma_dir_t dir,
872 			  const char *func,
873 			  uint32_t line)
874 {
875 	qdf_nbuf_untrack_map(buf, func, line);
876 	__qdf_nbuf_unmap_single(osdev, buf, dir);
877 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
878 }
879 
880 qdf_export_symbol(qdf_nbuf_unmap_debug);
881 
882 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
883 				     qdf_nbuf_t buf,
884 				     qdf_dma_dir_t dir,
885 				     const char *func,
886 				     uint32_t line)
887 {
888 	QDF_STATUS status;
889 
890 	status = qdf_nbuf_track_map(buf, func, line);
891 	if (QDF_IS_STATUS_ERROR(status))
892 		return status;
893 
894 	status = __qdf_nbuf_map_single(osdev, buf, dir);
895 	if (QDF_IS_STATUS_ERROR(status)) {
896 		qdf_nbuf_untrack_map(buf, func, line);
897 	} else {
898 		if (!is_initial_mem_debug_disabled)
899 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
900 		qdf_net_buf_debug_update_map_node(buf, func, line);
901 	}
902 
903 	return status;
904 }
905 
906 qdf_export_symbol(qdf_nbuf_map_single_debug);
907 
908 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
909 				 qdf_nbuf_t buf,
910 				 qdf_dma_dir_t dir,
911 				 const char *func,
912 				 uint32_t line)
913 {
914 	qdf_nbuf_untrack_map(buf, func, line);
915 	__qdf_nbuf_unmap_single(osdev, buf, dir);
916 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
917 }
918 
919 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
920 
921 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
922 				     qdf_nbuf_t buf,
923 				     qdf_dma_dir_t dir,
924 				     int nbytes,
925 				     const char *func,
926 				     uint32_t line)
927 {
928 	QDF_STATUS status;
929 
930 	status = qdf_nbuf_track_map(buf, func, line);
931 	if (QDF_IS_STATUS_ERROR(status))
932 		return status;
933 
934 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
935 	if (QDF_IS_STATUS_ERROR(status)) {
936 		qdf_nbuf_untrack_map(buf, func, line);
937 	} else {
938 		if (!is_initial_mem_debug_disabled)
939 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
940 		qdf_net_buf_debug_update_map_node(buf, func, line);
941 	}
942 
943 	return status;
944 }
945 
946 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
947 
948 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
949 				 qdf_nbuf_t buf,
950 				 qdf_dma_dir_t dir,
951 				 int nbytes,
952 				 const char *func,
953 				 uint32_t line)
954 {
955 	qdf_nbuf_untrack_map(buf, func, line);
956 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
957 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
958 }
959 
960 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
961 
962 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
963 					    qdf_nbuf_t buf,
964 					    qdf_dma_dir_t dir,
965 					    int nbytes,
966 					    const char *func,
967 					    uint32_t line)
968 {
969 	QDF_STATUS status;
970 
971 	status = qdf_nbuf_track_map(buf, func, line);
972 	if (QDF_IS_STATUS_ERROR(status))
973 		return status;
974 
975 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
976 	if (QDF_IS_STATUS_ERROR(status)) {
977 		qdf_nbuf_untrack_map(buf, func, line);
978 	} else {
979 		if (!is_initial_mem_debug_disabled)
980 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
981 		qdf_net_buf_debug_update_map_node(buf, func, line);
982 	}
983 
984 	return status;
985 }
986 
987 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
988 
989 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
990 					qdf_nbuf_t buf,
991 					qdf_dma_dir_t dir,
992 					int nbytes,
993 					const char *func,
994 					uint32_t line)
995 {
996 	qdf_nbuf_untrack_map(buf, func, line);
997 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
998 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
999 }
1000 
1001 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1002 
1003 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1004 					      qdf_nbuf_t buf,
1005 					      qdf_dma_addr_t phy_addr,
1006 					      qdf_dma_dir_t dir, int nbytes,
1007 					      const char *func, uint32_t line)
1008 {
1009 	qdf_nbuf_untrack_map(buf, func, line);
1010 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1011 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1012 }
1013 
1014 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1015 
1016 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1017 					     const char *func,
1018 					     uint32_t line)
1019 {
1020 	char map_func[QDF_TRACKER_FUNC_SIZE];
1021 	uint32_t map_line;
1022 
1023 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1024 				&map_func, &map_line))
1025 		return;
1026 
1027 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1028 			   func, line, map_func, map_line);
1029 }
1030 #else
1031 static inline void qdf_nbuf_map_tracking_init(void)
1032 {
1033 }
1034 
1035 static inline void qdf_nbuf_map_tracking_deinit(void)
1036 {
1037 }
1038 
1039 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1040 						    const char *func,
1041 						    uint32_t line)
1042 {
1043 }
1044 #endif /* NBUF_MAP_UNMAP_DEBUG */
1045 
1046 /**
1047  * __qdf_nbuf_map() - map a buffer to local bus address space
1048  * @osdev: OS device
1049  * @bmap: Bitmap
1050  * @skb: Pointer to network buffer
1051  * @dir: Direction
1052  *
1053  * Return: QDF_STATUS
1054  */
1055 #ifdef QDF_OS_DEBUG
1056 QDF_STATUS
1057 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1058 {
1059 	struct skb_shared_info *sh = skb_shinfo(skb);
1060 
1061 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1062 			|| (dir == QDF_DMA_FROM_DEVICE));
1063 
1064 	/*
1065 	 * Assume there's only a single fragment.
1066 	 * To support multiple fragments, it would be necessary to change
1067 	 * qdf_nbuf_t to be a separate object that stores meta-info
1068 	 * (including the bus address for each fragment) and a pointer
1069 	 * to the underlying sk_buff.
1070 	 */
1071 	qdf_assert(sh->nr_frags == 0);
1072 
1073 	return __qdf_nbuf_map_single(osdev, skb, dir);
1074 }
1075 qdf_export_symbol(__qdf_nbuf_map);
1076 
1077 #else
1078 QDF_STATUS
1079 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1080 {
1081 	return __qdf_nbuf_map_single(osdev, skb, dir);
1082 }
1083 qdf_export_symbol(__qdf_nbuf_map);
1084 #endif
1085 /**
1086  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1087  * @osdev: OS device
1088  * @skb: Pointer to network buffer
1089  * @dir: dma direction
1090  *
1091  * Return: none
1092  */
1093 void
1094 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1095 			qdf_dma_dir_t dir)
1096 {
1097 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1098 		   || (dir == QDF_DMA_FROM_DEVICE));
1099 
1100 	/*
1101 	 * Assume there's a single fragment.
1102 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1103 	 */
1104 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1105 }
1106 qdf_export_symbol(__qdf_nbuf_unmap);
1107 
1108 /**
1109  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1110  * @osdev: OS device
1111  * @skb: Pointer to network buffer
1112  * @dir: Direction
1113  *
1114  * Return: QDF_STATUS
1115  */
1116 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1117 QDF_STATUS
1118 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1119 {
1120 	qdf_dma_addr_t paddr;
1121 
1122 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1123 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1124 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1125 	return QDF_STATUS_SUCCESS;
1126 }
1127 qdf_export_symbol(__qdf_nbuf_map_single);
1128 #else
1129 QDF_STATUS
1130 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1131 {
1132 	qdf_dma_addr_t paddr;
1133 
1134 	/* assume that the OS only provides a single fragment */
1135 	QDF_NBUF_CB_PADDR(buf) = paddr =
1136 		dma_map_single(osdev->dev, buf->data,
1137 				skb_end_pointer(buf) - buf->data,
1138 				__qdf_dma_dir_to_os(dir));
1139 	__qdf_record_nbuf_nbytes(
1140 		__qdf_nbuf_get_end_offset(buf), dir, true);
1141 	return dma_mapping_error(osdev->dev, paddr)
1142 		? QDF_STATUS_E_FAILURE
1143 		: QDF_STATUS_SUCCESS;
1144 }
1145 qdf_export_symbol(__qdf_nbuf_map_single);
1146 #endif
1147 /**
1148  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1149  * @osdev: OS device
1150  * @skb: Pointer to network buffer
1151  * @dir: Direction
1152  *
1153  * Return: none
1154  */
1155 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1156 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1157 				qdf_dma_dir_t dir)
1158 {
1159 }
1160 #else
1161 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1162 					qdf_dma_dir_t dir)
1163 {
1164 	if (QDF_NBUF_CB_PADDR(buf)) {
1165 		__qdf_record_nbuf_nbytes(
1166 			__qdf_nbuf_get_end_offset(buf), dir, false);
1167 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1168 			skb_end_pointer(buf) - buf->data,
1169 			__qdf_dma_dir_to_os(dir));
1170 	}
1171 }
1172 #endif
1173 qdf_export_symbol(__qdf_nbuf_unmap_single);
1174 
1175 /**
1176  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1177  * @skb: Pointer to network buffer
1178  * @cksum: Pointer to checksum value
1179  *
1180  * Return: QDF_STATUS
1181  */
1182 QDF_STATUS
1183 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1184 {
1185 	switch (cksum->l4_result) {
1186 	case QDF_NBUF_RX_CKSUM_NONE:
1187 		skb->ip_summed = CHECKSUM_NONE;
1188 		break;
1189 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1190 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1191 		break;
1192 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1193 		skb->ip_summed = CHECKSUM_PARTIAL;
1194 		skb->csum = cksum->val;
1195 		break;
1196 	default:
1197 		pr_err("Unknown checksum type\n");
1198 		qdf_assert(0);
1199 		return QDF_STATUS_E_NOSUPPORT;
1200 	}
1201 	return QDF_STATUS_SUCCESS;
1202 }
1203 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1204 
1205 /**
1206  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1207  * @skb: Pointer to network buffer
1208  *
1209  * Return: TX checksum value
1210  */
1211 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1212 {
1213 	switch (skb->ip_summed) {
1214 	case CHECKSUM_NONE:
1215 		return QDF_NBUF_TX_CKSUM_NONE;
1216 	case CHECKSUM_PARTIAL:
1217 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1218 	case CHECKSUM_COMPLETE:
1219 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1220 	default:
1221 		return QDF_NBUF_TX_CKSUM_NONE;
1222 	}
1223 }
1224 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1225 
1226 /**
1227  * __qdf_nbuf_get_tid() - get tid
1228  * @skb: Pointer to network buffer
1229  *
1230  * Return: tid
1231  */
1232 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1233 {
1234 	return skb->priority;
1235 }
1236 qdf_export_symbol(__qdf_nbuf_get_tid);
1237 
1238 /**
1239  * __qdf_nbuf_set_tid() - set tid
1240  * @skb: Pointer to network buffer
1241  *
1242  * Return: none
1243  */
1244 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1245 {
1246 	skb->priority = tid;
1247 }
1248 qdf_export_symbol(__qdf_nbuf_set_tid);
1249 
1250 /**
1251  * __qdf_nbuf_set_tid() - set tid
1252  * @skb: Pointer to network buffer
1253  *
1254  * Return: none
1255  */
1256 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1257 {
1258 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1259 }
1260 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1261 
1262 /**
1263  * __qdf_nbuf_reg_trace_cb() - register trace callback
1264  * @cb_func_ptr: Pointer to trace callback function
1265  *
1266  * Return: none
1267  */
1268 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1269 {
1270 	qdf_trace_update_cb = cb_func_ptr;
1271 }
1272 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1273 
1274 /**
1275  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1276  *              of DHCP packet.
1277  * @data: Pointer to DHCP packet data buffer
1278  *
1279  * This func. returns the subtype of DHCP packet.
1280  *
1281  * Return: subtype of the DHCP packet.
1282  */
1283 enum qdf_proto_subtype
1284 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1285 {
1286 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1287 
1288 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1289 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1290 					QDF_DHCP_OPTION53_LENGTH)) {
1291 
1292 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1293 		case QDF_DHCP_DISCOVER:
1294 			subtype = QDF_PROTO_DHCP_DISCOVER;
1295 			break;
1296 		case QDF_DHCP_REQUEST:
1297 			subtype = QDF_PROTO_DHCP_REQUEST;
1298 			break;
1299 		case QDF_DHCP_OFFER:
1300 			subtype = QDF_PROTO_DHCP_OFFER;
1301 			break;
1302 		case QDF_DHCP_ACK:
1303 			subtype = QDF_PROTO_DHCP_ACK;
1304 			break;
1305 		case QDF_DHCP_NAK:
1306 			subtype = QDF_PROTO_DHCP_NACK;
1307 			break;
1308 		case QDF_DHCP_RELEASE:
1309 			subtype = QDF_PROTO_DHCP_RELEASE;
1310 			break;
1311 		case QDF_DHCP_INFORM:
1312 			subtype = QDF_PROTO_DHCP_INFORM;
1313 			break;
1314 		case QDF_DHCP_DECLINE:
1315 			subtype = QDF_PROTO_DHCP_DECLINE;
1316 			break;
1317 		default:
1318 			break;
1319 		}
1320 	}
1321 
1322 	return subtype;
1323 }
1324 
1325 /**
1326  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1327  *            of EAPOL packet.
1328  * @data: Pointer to EAPOL packet data buffer
1329  *
1330  * This func. returns the subtype of EAPOL packet.
1331  *
1332  * Return: subtype of the EAPOL packet.
1333  */
1334 enum qdf_proto_subtype
1335 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1336 {
1337 	uint16_t eapol_key_info;
1338 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1339 	uint16_t mask;
1340 
1341 	eapol_key_info = (uint16_t)(*(uint16_t *)
1342 			(data + EAPOL_KEY_INFO_OFFSET));
1343 
1344 	mask = eapol_key_info & EAPOL_MASK;
1345 	switch (mask) {
1346 	case EAPOL_M1_BIT_MASK:
1347 		subtype = QDF_PROTO_EAPOL_M1;
1348 		break;
1349 	case EAPOL_M2_BIT_MASK:
1350 		subtype = QDF_PROTO_EAPOL_M2;
1351 		break;
1352 	case EAPOL_M3_BIT_MASK:
1353 		subtype = QDF_PROTO_EAPOL_M3;
1354 		break;
1355 	case EAPOL_M4_BIT_MASK:
1356 		subtype = QDF_PROTO_EAPOL_M4;
1357 		break;
1358 	default:
1359 		break;
1360 	}
1361 
1362 	return subtype;
1363 }
1364 
1365 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1366 
1367 /**
1368  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1369  *            of ARP packet.
1370  * @data: Pointer to ARP packet data buffer
1371  *
1372  * This func. returns the subtype of ARP packet.
1373  *
1374  * Return: subtype of the ARP packet.
1375  */
1376 enum qdf_proto_subtype
1377 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1378 {
1379 	uint16_t subtype;
1380 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1381 
1382 	subtype = (uint16_t)(*(uint16_t *)
1383 			(data + ARP_SUB_TYPE_OFFSET));
1384 
1385 	switch (QDF_SWAP_U16(subtype)) {
1386 	case ARP_REQUEST:
1387 		proto_subtype = QDF_PROTO_ARP_REQ;
1388 		break;
1389 	case ARP_RESPONSE:
1390 		proto_subtype = QDF_PROTO_ARP_RES;
1391 		break;
1392 	default:
1393 		break;
1394 	}
1395 
1396 	return proto_subtype;
1397 }
1398 
1399 /**
1400  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1401  *            of IPV4 ICMP packet.
1402  * @data: Pointer to IPV4 ICMP packet data buffer
1403  *
1404  * This func. returns the subtype of ICMP packet.
1405  *
1406  * Return: subtype of the ICMP packet.
1407  */
1408 enum qdf_proto_subtype
1409 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1410 {
1411 	uint8_t subtype;
1412 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1413 
1414 	subtype = (uint8_t)(*(uint8_t *)
1415 			(data + ICMP_SUBTYPE_OFFSET));
1416 
1417 	switch (subtype) {
1418 	case ICMP_REQUEST:
1419 		proto_subtype = QDF_PROTO_ICMP_REQ;
1420 		break;
1421 	case ICMP_RESPONSE:
1422 		proto_subtype = QDF_PROTO_ICMP_RES;
1423 		break;
1424 	default:
1425 		break;
1426 	}
1427 
1428 	return proto_subtype;
1429 }
1430 
1431 /**
1432  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1433  *            of IPV6 ICMPV6 packet.
1434  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1435  *
1436  * This func. returns the subtype of ICMPV6 packet.
1437  *
1438  * Return: subtype of the ICMPV6 packet.
1439  */
1440 enum qdf_proto_subtype
1441 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1442 {
1443 	uint8_t subtype;
1444 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1445 
1446 	subtype = (uint8_t)(*(uint8_t *)
1447 			(data + ICMPV6_SUBTYPE_OFFSET));
1448 
1449 	switch (subtype) {
1450 	case ICMPV6_REQUEST:
1451 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1452 		break;
1453 	case ICMPV6_RESPONSE:
1454 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1455 		break;
1456 	case ICMPV6_RS:
1457 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1458 		break;
1459 	case ICMPV6_RA:
1460 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1461 		break;
1462 	case ICMPV6_NS:
1463 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1464 		break;
1465 	case ICMPV6_NA:
1466 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1467 		break;
1468 	default:
1469 		break;
1470 	}
1471 
1472 	return proto_subtype;
1473 }
1474 
1475 /**
1476  * __qdf_nbuf_is_ipv4_last_fragment() - Check if IPv4 packet is last fragment
1477  * @skb: Buffer
1478  *
1479  * This function checks IPv4 packet is last fragment or not.
1480  * Caller has to call this function for IPv4 packets only.
1481  *
1482  * Return: True if IPv4 packet is last fragment otherwise false
1483  */
1484 bool
1485 __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
1486 {
1487 	if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
1488 		return true;
1489 
1490 	return false;
1491 }
1492 
1493 /**
1494  * __qdf_nbuf_data_set_ipv4_tos() - set the TOS for IPv4 packet
1495  * @data: pointer to skb payload
1496  * @tos: value of TOS to be set
1497  *
1498  * This func. set the TOS field of IPv4 packet.
1499  *
1500  * Return: None
1501  */
1502 void
1503 __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
1504 {
1505 	*(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
1506 }
1507 
1508 /**
1509  * __qdf_nbuf_data_get_ipv4_tos() - get the TOS type of IPv4 packet
1510  * @data: Pointer to skb payload
1511  *
1512  * This func. returns the TOS type of IPv4 packet.
1513  *
1514  * Return: TOS type of IPv4 packet.
1515  */
1516 uint8_t
1517 __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
1518 {
1519 	uint8_t tos;
1520 
1521 	tos = (uint8_t)(*(uint8_t *)(data +
1522 			QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
1523 	return tos;
1524 }
1525 
1526 /**
1527  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1528  *            of IPV4 packet.
1529  * @data: Pointer to IPV4 packet data buffer
1530  *
1531  * This func. returns the proto type of IPV4 packet.
1532  *
1533  * Return: proto type of IPV4 packet.
1534  */
1535 uint8_t
1536 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1537 {
1538 	uint8_t proto_type;
1539 
1540 	proto_type = (uint8_t)(*(uint8_t *)(data +
1541 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1542 	return proto_type;
1543 }
1544 
1545 /**
1546  * __qdf_nbuf_data_get_ipv6_tc() - get the TC field
1547  *                                 of IPv6 packet.
1548  * @data: Pointer to IPv6 packet data buffer
1549  *
1550  * This func. returns the TC field of IPv6 packet.
1551  *
1552  * Return: traffic classification of IPv6 packet.
1553  */
1554 uint8_t
1555 __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
1556 {
1557 	struct ipv6hdr *hdr;
1558 
1559 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1560 	return ip6_tclass(ip6_flowinfo(hdr));
1561 }
1562 
1563 /**
1564  * __qdf_nbuf_data_set_ipv6_tc() - set the TC field
1565  *                                 of IPv6 packet.
1566  * @data: Pointer to skb payload
1567  * @tc: value to set to IPv6 header TC field
1568  *
1569  * This func. set the TC field of IPv6 header.
1570  *
1571  * Return: None
1572  */
1573 void
1574 __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
1575 {
1576 	struct ipv6hdr *hdr;
1577 
1578 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1579 	ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
1580 }
1581 
1582 /**
1583  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1584  *            of IPV6 packet.
1585  * @data: Pointer to IPV6 packet data buffer
1586  *
1587  * This func. returns the proto type of IPV6 packet.
1588  *
1589  * Return: proto type of IPV6 packet.
1590  */
1591 uint8_t
1592 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1593 {
1594 	uint8_t proto_type;
1595 
1596 	proto_type = (uint8_t)(*(uint8_t *)(data +
1597 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1598 	return proto_type;
1599 }
1600 
1601 /**
1602  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1603  * @data: Pointer to network data
1604  *
1605  * This api is for Tx packets.
1606  *
1607  * Return: true if packet is ipv4 packet
1608  *	   false otherwise
1609  */
1610 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1611 {
1612 	uint16_t ether_type;
1613 
1614 	ether_type = (uint16_t)(*(uint16_t *)(data +
1615 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1616 
1617 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1618 		return true;
1619 	else
1620 		return false;
1621 }
1622 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1623 
1624 /**
1625  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1626  * @data: Pointer to network data buffer
1627  *
1628  * This api is for ipv4 packet.
1629  *
1630  * Return: true if packet is DHCP packet
1631  *	   false otherwise
1632  */
1633 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1634 {
1635 	uint16_t sport;
1636 	uint16_t dport;
1637 	uint8_t ipv4_offset;
1638 	uint8_t ipv4_hdr_len;
1639 	struct iphdr *iphdr;
1640 
1641 	if (__qdf_nbuf_get_ether_type(data) !=
1642 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1643 		return false;
1644 
1645 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1646 	iphdr = (struct iphdr *)(data + ipv4_offset);
1647 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1648 
1649 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1650 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1651 			      sizeof(uint16_t));
1652 
1653 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1654 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1655 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1656 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1657 		return true;
1658 	else
1659 		return false;
1660 }
1661 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1662 
1663 /**
1664  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1665  * @data: Pointer to network data buffer
1666  *
1667  * This api is for ipv4 packet.
1668  *
1669  * Return: true if packet is EAPOL packet
1670  *	   false otherwise.
1671  */
1672 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1673 {
1674 	uint16_t ether_type;
1675 
1676 	ether_type = __qdf_nbuf_get_ether_type(data);
1677 
1678 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1679 		return true;
1680 	else
1681 		return false;
1682 }
1683 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1684 
1685 /**
1686  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1687  * @skb: Pointer to network buffer
1688  *
1689  * This api is for ipv4 packet.
1690  *
1691  * Return: true if packet is WAPI packet
1692  *	   false otherwise.
1693  */
1694 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1695 {
1696 	uint16_t ether_type;
1697 
1698 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1699 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1700 
1701 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1702 		return true;
1703 	else
1704 		return false;
1705 }
1706 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1707 
1708 /**
1709  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
1710  * @data: Pointer to network data buffer
1711  *
1712  * This api is for ipv4 packet.
1713  *
1714  * Return: true if packet is igmp packet
1715  *	   false otherwise.
1716  */
1717 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1718 {
1719 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1720 		uint8_t pkt_type;
1721 
1722 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1723 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1724 
1725 		if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
1726 			return true;
1727 	}
1728 	return false;
1729 }
1730 
1731 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
1732 
1733 /**
1734  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
1735  * @data: Pointer to network data buffer
1736  *
1737  * This api is for ipv6 packet.
1738  *
1739  * Return: true if packet is igmp packet
1740  *	   false otherwise.
1741  */
1742 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
1743 {
1744 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1745 		uint8_t pkt_type;
1746 		uint8_t next_hdr;
1747 
1748 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1749 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1750 		next_hdr = (uint8_t)(*(uint8_t *)(data +
1751 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
1752 
1753 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1754 			return true;
1755 		if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
1756 		    (next_hdr == QDF_NBUF_TRAC_HOPOPTS_TYPE))
1757 			return true;
1758 	}
1759 	return false;
1760 }
1761 
1762 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
1763 
1764 /**
1765  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1766  * @skb: Pointer to network buffer
1767  *
1768  * This api is for ipv4 packet.
1769  *
1770  * Return: true if packet is tdls packet
1771  *	   false otherwise.
1772  */
1773 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1774 {
1775 	uint16_t ether_type;
1776 
1777 	ether_type = *(uint16_t *)(skb->data +
1778 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1779 
1780 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1781 		return true;
1782 	else
1783 		return false;
1784 }
1785 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1786 
1787 /**
1788  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1789  * @data: Pointer to network data buffer
1790  *
1791  * This api is for ipv4 packet.
1792  *
1793  * Return: true if packet is ARP packet
1794  *	   false otherwise.
1795  */
1796 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1797 {
1798 	uint16_t ether_type;
1799 
1800 	ether_type = __qdf_nbuf_get_ether_type(data);
1801 
1802 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1803 		return true;
1804 	else
1805 		return false;
1806 }
1807 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1808 
1809 /**
1810  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1811  * @data: Pointer to network data buffer
1812  *
1813  * This api is for ipv4 packet.
1814  *
1815  * Return: true if packet is ARP request
1816  *	   false otherwise.
1817  */
1818 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1819 {
1820 	uint16_t op_code;
1821 
1822 	op_code = (uint16_t)(*(uint16_t *)(data +
1823 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1824 
1825 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1826 		return true;
1827 	return false;
1828 }
1829 
1830 /**
1831  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1832  * @data: Pointer to network data buffer
1833  *
1834  * This api is for ipv4 packet.
1835  *
1836  * Return: true if packet is ARP response
1837  *	   false otherwise.
1838  */
1839 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1840 {
1841 	uint16_t op_code;
1842 
1843 	op_code = (uint16_t)(*(uint16_t *)(data +
1844 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1845 
1846 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1847 		return true;
1848 	return false;
1849 }
1850 
1851 /**
1852  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1853  * @data: Pointer to network data buffer
1854  *
1855  * This api is for ipv4 packet.
1856  *
1857  * Return: ARP packet source IP value.
1858  */
1859 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1860 {
1861 	uint32_t src_ip;
1862 
1863 	src_ip = (uint32_t)(*(uint32_t *)(data +
1864 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1865 
1866 	return src_ip;
1867 }
1868 
1869 /**
1870  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1871  * @data: Pointer to network data buffer
1872  *
1873  * This api is for ipv4 packet.
1874  *
1875  * Return: ARP packet target IP value.
1876  */
1877 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1878 {
1879 	uint32_t tgt_ip;
1880 
1881 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1882 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1883 
1884 	return tgt_ip;
1885 }
1886 
1887 /**
1888  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1889  * @data: Pointer to network data buffer
1890  * @len: length to copy
1891  *
1892  * This api is for dns domain name
1893  *
1894  * Return: dns domain name.
1895  */
1896 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1897 {
1898 	uint8_t *domain_name;
1899 
1900 	domain_name = (uint8_t *)
1901 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1902 	return domain_name;
1903 }
1904 
1905 
1906 /**
1907  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1908  * @data: Pointer to network data buffer
1909  *
1910  * This api is for dns query packet.
1911  *
1912  * Return: true if packet is dns query packet.
1913  *	   false otherwise.
1914  */
1915 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1916 {
1917 	uint16_t op_code;
1918 	uint16_t tgt_port;
1919 
1920 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1921 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1922 	/* Standard DNS query always happen on Dest Port 53. */
1923 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1924 		op_code = (uint16_t)(*(uint16_t *)(data +
1925 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1926 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1927 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1928 			return true;
1929 	}
1930 	return false;
1931 }
1932 
1933 /**
1934  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1935  * @data: Pointer to network data buffer
1936  *
1937  * This api is for dns query response.
1938  *
1939  * Return: true if packet is dns response packet.
1940  *	   false otherwise.
1941  */
1942 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1943 {
1944 	uint16_t op_code;
1945 	uint16_t src_port;
1946 
1947 	src_port = (uint16_t)(*(uint16_t *)(data +
1948 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1949 	/* Standard DNS response always comes on Src Port 53. */
1950 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1951 		op_code = (uint16_t)(*(uint16_t *)(data +
1952 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1953 
1954 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1955 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1956 			return true;
1957 	}
1958 	return false;
1959 }
1960 
1961 /**
1962  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
1963  * @data: Pointer to network data buffer
1964  *
1965  * This api is to check if the packet is tcp fin.
1966  *
1967  * Return: true if packet is tcp fin packet.
1968  *         false otherwise.
1969  */
1970 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
1971 {
1972 	uint8_t op_code;
1973 
1974 	op_code = (uint8_t)(*(uint8_t *)(data +
1975 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1976 
1977 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
1978 		return true;
1979 
1980 	return false;
1981 }
1982 
1983 /**
1984  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
1985  * @data: Pointer to network data buffer
1986  *
1987  * This api is to check if the tcp packet is fin ack.
1988  *
1989  * Return: true if packet is tcp fin ack packet.
1990  *         false otherwise.
1991  */
1992 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
1993 {
1994 	uint8_t op_code;
1995 
1996 	op_code = (uint8_t)(*(uint8_t *)(data +
1997 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1998 
1999 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
2000 		return true;
2001 
2002 	return false;
2003 }
2004 
2005 /**
2006  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
2007  * @data: Pointer to network data buffer
2008  *
2009  * This api is for tcp syn packet.
2010  *
2011  * Return: true if packet is tcp syn packet.
2012  *	   false otherwise.
2013  */
2014 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
2015 {
2016 	uint8_t op_code;
2017 
2018 	op_code = (uint8_t)(*(uint8_t *)(data +
2019 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2020 
2021 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
2022 		return true;
2023 	return false;
2024 }
2025 
2026 /**
2027  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
2028  * @data: Pointer to network data buffer
2029  *
2030  * This api is for tcp syn ack packet.
2031  *
2032  * Return: true if packet is tcp syn ack packet.
2033  *	   false otherwise.
2034  */
2035 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
2036 {
2037 	uint8_t op_code;
2038 
2039 	op_code = (uint8_t)(*(uint8_t *)(data +
2040 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2041 
2042 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
2043 		return true;
2044 	return false;
2045 }
2046 
2047 /**
2048  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
2049  * @data: Pointer to network data buffer
2050  *
2051  * This api is to check if the tcp packet is rst.
2052  *
2053  * Return: true if packet is tcp rst packet.
2054  *         false otherwise.
2055  */
2056 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2057 {
2058 	uint8_t op_code;
2059 
2060 	op_code = (uint8_t)(*(uint8_t *)(data +
2061 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2062 
2063 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2064 		return true;
2065 
2066 	return false;
2067 }
2068 
2069 /**
2070  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
2071  * @data: Pointer to network data buffer
2072  *
2073  * This api is for tcp ack packet.
2074  *
2075  * Return: true if packet is tcp ack packet.
2076  *	   false otherwise.
2077  */
2078 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2079 {
2080 	uint8_t op_code;
2081 
2082 	op_code = (uint8_t)(*(uint8_t *)(data +
2083 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2084 
2085 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2086 		return true;
2087 	return false;
2088 }
2089 
2090 /**
2091  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
2092  * @data: Pointer to network data buffer
2093  *
2094  * This api is for tcp packet.
2095  *
2096  * Return: tcp source port value.
2097  */
2098 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2099 {
2100 	uint16_t src_port;
2101 
2102 	src_port = (uint16_t)(*(uint16_t *)(data +
2103 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2104 
2105 	return src_port;
2106 }
2107 
2108 /**
2109  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
2110  * @data: Pointer to network data buffer
2111  *
2112  * This api is for tcp packet.
2113  *
2114  * Return: tcp destination port value.
2115  */
2116 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2117 {
2118 	uint16_t tgt_port;
2119 
2120 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2121 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2122 
2123 	return tgt_port;
2124 }
2125 
2126 /**
2127  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
2128  * @data: Pointer to network data buffer
2129  *
2130  * This api is for ipv4 req packet.
2131  *
2132  * Return: true if packet is icmpv4 request
2133  *	   false otherwise.
2134  */
2135 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2136 {
2137 	uint8_t op_code;
2138 
2139 	op_code = (uint8_t)(*(uint8_t *)(data +
2140 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2141 
2142 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2143 		return true;
2144 	return false;
2145 }
2146 
2147 /**
2148  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
2149  * @data: Pointer to network data buffer
2150  *
2151  * This api is for ipv4 res packet.
2152  *
2153  * Return: true if packet is icmpv4 response
2154  *	   false otherwise.
2155  */
2156 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2157 {
2158 	uint8_t op_code;
2159 
2160 	op_code = (uint8_t)(*(uint8_t *)(data +
2161 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2162 
2163 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2164 		return true;
2165 	return false;
2166 }
2167 
2168 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2169 {
2170 	uint8_t op_code;
2171 
2172 	op_code = (uint8_t)(*(uint8_t *)(data +
2173 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2174 
2175 	if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2176 		return true;
2177 	return false;
2178 }
2179 
2180 qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2181 
2182 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2183 {
2184 	uint8_t subtype;
2185 
2186 	subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2187 
2188 	if (subtype == ICMPV6_REDIRECT)
2189 		return true;
2190 	return false;
2191 }
2192 
2193 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2194 
2195 /**
2196  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
2197  * @data: Pointer to network data buffer
2198  *
2199  * This api is for ipv4 packet.
2200  *
2201  * Return: icmpv4 packet source IP value.
2202  */
2203 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2204 {
2205 	uint32_t src_ip;
2206 
2207 	src_ip = (uint32_t)(*(uint32_t *)(data +
2208 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2209 
2210 	return src_ip;
2211 }
2212 
2213 /**
2214  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
2215  * @data: Pointer to network data buffer
2216  *
2217  * This api is for ipv4 packet.
2218  *
2219  * Return: icmpv4 packet target IP value.
2220  */
2221 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2222 {
2223 	uint32_t tgt_ip;
2224 
2225 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2226 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2227 
2228 	return tgt_ip;
2229 }
2230 
2231 
2232 /**
2233  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
2234  * @data: Pointer to IPV6 packet data buffer
2235  *
2236  * This func. checks whether it is a IPV6 packet or not.
2237  *
2238  * Return: TRUE if it is a IPV6 packet
2239  *         FALSE if not
2240  */
2241 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2242 {
2243 	uint16_t ether_type;
2244 
2245 	ether_type = (uint16_t)(*(uint16_t *)(data +
2246 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2247 
2248 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2249 		return true;
2250 	else
2251 		return false;
2252 }
2253 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2254 
2255 /**
2256  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
2257  * @data: Pointer to network data buffer
2258  *
2259  * This api is for ipv6 packet.
2260  *
2261  * Return: true if packet is DHCP packet
2262  *	   false otherwise
2263  */
2264 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2265 {
2266 	uint16_t sport;
2267 	uint16_t dport;
2268 	uint8_t ipv6_offset;
2269 
2270 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2271 		return false;
2272 
2273 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2274 	sport = *(uint16_t *)(data + ipv6_offset +
2275 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2276 	dport = *(uint16_t *)(data + ipv6_offset +
2277 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2278 			      sizeof(uint16_t));
2279 
2280 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2281 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2282 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2283 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2284 		return true;
2285 	else
2286 		return false;
2287 }
2288 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2289 
2290 /**
2291  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2292  * @data: Pointer to network data buffer
2293  *
2294  * This api is for ipv6 packet.
2295  *
2296  * Return: true if packet is MDNS packet
2297  *	   false otherwise
2298  */
2299 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2300 {
2301 	uint16_t sport;
2302 	uint16_t dport;
2303 
2304 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2305 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2306 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2307 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2308 					sizeof(uint16_t));
2309 
2310 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2311 	    dport == sport)
2312 		return true;
2313 	else
2314 		return false;
2315 }
2316 
2317 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2318 
2319 /**
2320  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2321  * @data: Pointer to IPV4 packet data buffer
2322  *
2323  * This func. checks whether it is a IPV4 multicast packet or not.
2324  *
2325  * Return: TRUE if it is a IPV4 multicast packet
2326  *         FALSE if not
2327  */
2328 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2329 {
2330 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2331 		uint32_t *dst_addr =
2332 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2333 
2334 		/*
2335 		 * Check first word of the IPV4 address and if it is
2336 		 * equal to 0xE then it represents multicast IP.
2337 		 */
2338 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2339 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2340 			return true;
2341 		else
2342 			return false;
2343 	} else
2344 		return false;
2345 }
2346 
2347 /**
2348  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2349  * @data: Pointer to IPV6 packet data buffer
2350  *
2351  * This func. checks whether it is a IPV6 multicast packet or not.
2352  *
2353  * Return: TRUE if it is a IPV6 multicast packet
2354  *         FALSE if not
2355  */
2356 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2357 {
2358 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2359 		uint16_t *dst_addr;
2360 
2361 		dst_addr = (uint16_t *)
2362 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2363 
2364 		/*
2365 		 * Check first byte of the IP address and if it
2366 		 * 0xFF00 then it is a IPV6 mcast packet.
2367 		 */
2368 		if (*dst_addr ==
2369 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2370 			return true;
2371 		else
2372 			return false;
2373 	} else
2374 		return false;
2375 }
2376 
2377 /**
2378  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2379  * @data: Pointer to IPV4 ICMP packet data buffer
2380  *
2381  * This func. checks whether it is a ICMP packet or not.
2382  *
2383  * Return: TRUE if it is a ICMP packet
2384  *         FALSE if not
2385  */
2386 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2387 {
2388 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2389 		uint8_t pkt_type;
2390 
2391 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2392 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2393 
2394 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2395 			return true;
2396 		else
2397 			return false;
2398 	} else
2399 		return false;
2400 }
2401 
2402 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2403 
2404 /**
2405  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2406  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2407  *
2408  * This func. checks whether it is a ICMPV6 packet or not.
2409  *
2410  * Return: TRUE if it is a ICMPV6 packet
2411  *         FALSE if not
2412  */
2413 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2414 {
2415 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2416 		uint8_t pkt_type;
2417 
2418 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2419 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2420 
2421 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2422 			return true;
2423 		else
2424 			return false;
2425 	} else
2426 		return false;
2427 }
2428 
2429 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2430 
2431 /**
2432  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2433  * @data: Pointer to IPV4 UDP packet data buffer
2434  *
2435  * This func. checks whether it is a IPV4 UDP packet or not.
2436  *
2437  * Return: TRUE if it is a IPV4 UDP packet
2438  *         FALSE if not
2439  */
2440 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2441 {
2442 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2443 		uint8_t pkt_type;
2444 
2445 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2446 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2447 
2448 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2449 			return true;
2450 		else
2451 			return false;
2452 	} else
2453 		return false;
2454 }
2455 
2456 /**
2457  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2458  * @data: Pointer to IPV4 TCP packet data buffer
2459  *
2460  * This func. checks whether it is a IPV4 TCP packet or not.
2461  *
2462  * Return: TRUE if it is a IPV4 TCP packet
2463  *         FALSE if not
2464  */
2465 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2466 {
2467 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2468 		uint8_t pkt_type;
2469 
2470 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2471 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2472 
2473 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2474 			return true;
2475 		else
2476 			return false;
2477 	} else
2478 		return false;
2479 }
2480 
2481 /**
2482  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2483  * @data: Pointer to IPV6 UDP packet data buffer
2484  *
2485  * This func. checks whether it is a IPV6 UDP packet or not.
2486  *
2487  * Return: TRUE if it is a IPV6 UDP packet
2488  *         FALSE if not
2489  */
2490 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2491 {
2492 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2493 		uint8_t pkt_type;
2494 
2495 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2496 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2497 
2498 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2499 			return true;
2500 		else
2501 			return false;
2502 	} else
2503 		return false;
2504 }
2505 
2506 /**
2507  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2508  * @data: Pointer to IPV6 TCP packet data buffer
2509  *
2510  * This func. checks whether it is a IPV6 TCP packet or not.
2511  *
2512  * Return: TRUE if it is a IPV6 TCP packet
2513  *         FALSE if not
2514  */
2515 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2516 {
2517 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2518 		uint8_t pkt_type;
2519 
2520 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2521 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2522 
2523 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2524 			return true;
2525 		else
2526 			return false;
2527 	} else
2528 		return false;
2529 }
2530 
2531 /**
2532  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2533  * @nbuf - sk buff
2534  *
2535  * Return: true if packet is broadcast
2536  *	   false otherwise
2537  */
2538 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2539 {
2540 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2541 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2542 }
2543 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2544 
2545 /**
2546  * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
2547  * @nbuf - sk buff
2548  *
2549  * Return: true if packet is multicast replay
2550  *	   false otherwise
2551  */
2552 bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
2553 {
2554 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2555 
2556 	if (unlikely(nbuf->pkt_type == PACKET_MULTICAST)) {
2557 		if (unlikely(ether_addr_equal(eh->h_source,
2558 					      nbuf->dev->dev_addr)))
2559 			return true;
2560 	}
2561 	return false;
2562 }
2563 
2564 /**
2565  * __qdf_nbuf_is_arp_local() - check if local or non local arp
2566  * @skb: pointer to sk_buff
2567  *
2568  * Return: true if local arp or false otherwise.
2569  */
2570 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
2571 {
2572 	struct arphdr *arp;
2573 	struct in_ifaddr **ifap = NULL;
2574 	struct in_ifaddr *ifa = NULL;
2575 	struct in_device *in_dev;
2576 	unsigned char *arp_ptr;
2577 	__be32 tip;
2578 
2579 	arp = (struct arphdr *)skb->data;
2580 	if (arp->ar_op == htons(ARPOP_REQUEST)) {
2581 		/* if fail to acquire rtnl lock, assume it's local arp */
2582 		if (!rtnl_trylock())
2583 			return true;
2584 
2585 		in_dev = __in_dev_get_rtnl(skb->dev);
2586 		if (in_dev) {
2587 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
2588 				ifap = &ifa->ifa_next) {
2589 				if (!strcmp(skb->dev->name, ifa->ifa_label))
2590 					break;
2591 			}
2592 		}
2593 
2594 		if (ifa && ifa->ifa_local) {
2595 			arp_ptr = (unsigned char *)(arp + 1);
2596 			arp_ptr += (skb->dev->addr_len + 4 +
2597 					skb->dev->addr_len);
2598 			memcpy(&tip, arp_ptr, 4);
2599 			qdf_debug("ARP packet: local IP: %x dest IP: %x",
2600 				  ifa->ifa_local, tip);
2601 			if (ifa->ifa_local == tip) {
2602 				rtnl_unlock();
2603 				return true;
2604 			}
2605 		}
2606 		rtnl_unlock();
2607 	}
2608 
2609 	return false;
2610 }
2611 
2612 #ifdef NBUF_MEMORY_DEBUG
2613 
2614 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2615 
2616 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2617 static struct kmem_cache *nbuf_tracking_cache;
2618 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2619 static spinlock_t qdf_net_buf_track_free_list_lock;
2620 static uint32_t qdf_net_buf_track_free_list_count;
2621 static uint32_t qdf_net_buf_track_used_list_count;
2622 static uint32_t qdf_net_buf_track_max_used;
2623 static uint32_t qdf_net_buf_track_max_free;
2624 static uint32_t qdf_net_buf_track_max_allocated;
2625 static uint32_t qdf_net_buf_track_fail_count;
2626 
2627 /**
2628  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2629  *
2630  * tracks the max number of network buffers that the wlan driver was tracking
2631  * at any one time.
2632  *
2633  * Return: none
2634  */
2635 static inline void update_max_used(void)
2636 {
2637 	int sum;
2638 
2639 	if (qdf_net_buf_track_max_used <
2640 	    qdf_net_buf_track_used_list_count)
2641 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2642 	sum = qdf_net_buf_track_free_list_count +
2643 		qdf_net_buf_track_used_list_count;
2644 	if (qdf_net_buf_track_max_allocated < sum)
2645 		qdf_net_buf_track_max_allocated = sum;
2646 }
2647 
2648 /**
2649  * update_max_free() - update qdf_net_buf_track_free_list_count
2650  *
2651  * tracks the max number tracking buffers kept in the freelist.
2652  *
2653  * Return: none
2654  */
2655 static inline void update_max_free(void)
2656 {
2657 	if (qdf_net_buf_track_max_free <
2658 	    qdf_net_buf_track_free_list_count)
2659 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2660 }
2661 
2662 /**
2663  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2664  *
2665  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2666  * This function also ads fexibility to adjust the allocation and freelist
2667  * scheems.
2668  *
2669  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2670  */
2671 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2672 {
2673 	int flags = GFP_KERNEL;
2674 	unsigned long irq_flag;
2675 	QDF_NBUF_TRACK *new_node = NULL;
2676 
2677 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2678 	qdf_net_buf_track_used_list_count++;
2679 	if (qdf_net_buf_track_free_list) {
2680 		new_node = qdf_net_buf_track_free_list;
2681 		qdf_net_buf_track_free_list =
2682 			qdf_net_buf_track_free_list->p_next;
2683 		qdf_net_buf_track_free_list_count--;
2684 	}
2685 	update_max_used();
2686 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2687 
2688 	if (new_node)
2689 		return new_node;
2690 
2691 	if (in_interrupt() || irqs_disabled() || in_atomic())
2692 		flags = GFP_ATOMIC;
2693 
2694 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2695 }
2696 
2697 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2698 #define FREEQ_POOLSIZE 2048
2699 
2700 /**
2701  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2702  *
2703  * Matches calls to qdf_nbuf_track_alloc.
2704  * Either frees the tracking cookie to kernel or an internal
2705  * freelist based on the size of the freelist.
2706  *
2707  * Return: none
2708  */
2709 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2710 {
2711 	unsigned long irq_flag;
2712 
2713 	if (!node)
2714 		return;
2715 
2716 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2717 	 * only shrink the freelist if it is bigger than twice the number of
2718 	 * nbufs in use. If the driver is stalling in a consistent bursty
2719 	 * fasion, this will keep 3/4 of thee allocations from the free list
2720 	 * while also allowing the system to recover memory as less frantic
2721 	 * traffic occurs.
2722 	 */
2723 
2724 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2725 
2726 	qdf_net_buf_track_used_list_count--;
2727 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2728 	   (qdf_net_buf_track_free_list_count >
2729 	    qdf_net_buf_track_used_list_count << 1)) {
2730 		kmem_cache_free(nbuf_tracking_cache, node);
2731 	} else {
2732 		node->p_next = qdf_net_buf_track_free_list;
2733 		qdf_net_buf_track_free_list = node;
2734 		qdf_net_buf_track_free_list_count++;
2735 	}
2736 	update_max_free();
2737 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2738 }
2739 
2740 /**
2741  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2742  *
2743  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2744  * the freelist first makes it performant for the first iperf udp burst
2745  * as well as steady state.
2746  *
2747  * Return: None
2748  */
2749 static void qdf_nbuf_track_prefill(void)
2750 {
2751 	int i;
2752 	QDF_NBUF_TRACK *node, *head;
2753 
2754 	/* prepopulate the freelist */
2755 	head = NULL;
2756 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2757 		node = qdf_nbuf_track_alloc();
2758 		if (!node)
2759 			continue;
2760 		node->p_next = head;
2761 		head = node;
2762 	}
2763 	while (head) {
2764 		node = head->p_next;
2765 		qdf_nbuf_track_free(head);
2766 		head = node;
2767 	}
2768 
2769 	/* prefilled buffers should not count as used */
2770 	qdf_net_buf_track_max_used = 0;
2771 }
2772 
2773 /**
2774  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2775  *
2776  * This initializes the memory manager for the nbuf tracking cookies.  Because
2777  * these cookies are all the same size and only used in this feature, we can
2778  * use a kmem_cache to provide tracking as well as to speed up allocations.
2779  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2780  * features) a freelist is prepopulated here.
2781  *
2782  * Return: None
2783  */
2784 static void qdf_nbuf_track_memory_manager_create(void)
2785 {
2786 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2787 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2788 						sizeof(QDF_NBUF_TRACK),
2789 						0, 0, NULL);
2790 
2791 	qdf_nbuf_track_prefill();
2792 }
2793 
2794 /**
2795  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2796  *
2797  * Empty the freelist and print out usage statistics when it is no longer
2798  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2799  * any nbuf tracking cookies were leaked.
2800  *
2801  * Return: None
2802  */
2803 static void qdf_nbuf_track_memory_manager_destroy(void)
2804 {
2805 	QDF_NBUF_TRACK *node, *tmp;
2806 	unsigned long irq_flag;
2807 
2808 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2809 	node = qdf_net_buf_track_free_list;
2810 
2811 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2812 		qdf_print("%s: unexpectedly large max_used count %d",
2813 			  __func__, qdf_net_buf_track_max_used);
2814 
2815 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2816 		qdf_print("%s: %d unused trackers were allocated",
2817 			  __func__,
2818 			  qdf_net_buf_track_max_allocated -
2819 			  qdf_net_buf_track_max_used);
2820 
2821 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2822 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2823 		qdf_print("%s: check freelist shrinking functionality",
2824 			  __func__);
2825 
2826 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2827 		  "%s: %d residual freelist size",
2828 		  __func__, qdf_net_buf_track_free_list_count);
2829 
2830 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2831 		  "%s: %d max freelist size observed",
2832 		  __func__, qdf_net_buf_track_max_free);
2833 
2834 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2835 		  "%s: %d max buffers used observed",
2836 		  __func__, qdf_net_buf_track_max_used);
2837 
2838 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2839 		  "%s: %d max buffers allocated observed",
2840 		  __func__, qdf_net_buf_track_max_allocated);
2841 
2842 	while (node) {
2843 		tmp = node;
2844 		node = node->p_next;
2845 		kmem_cache_free(nbuf_tracking_cache, tmp);
2846 		qdf_net_buf_track_free_list_count--;
2847 	}
2848 
2849 	if (qdf_net_buf_track_free_list_count != 0)
2850 		qdf_info("%d unfreed tracking memory lost in freelist",
2851 			 qdf_net_buf_track_free_list_count);
2852 
2853 	if (qdf_net_buf_track_used_list_count != 0)
2854 		qdf_info("%d unfreed tracking memory still in use",
2855 			 qdf_net_buf_track_used_list_count);
2856 
2857 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2858 	kmem_cache_destroy(nbuf_tracking_cache);
2859 	qdf_net_buf_track_free_list = NULL;
2860 }
2861 
2862 /**
2863  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2864  *
2865  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2866  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2867  * WLAN driver module whose allocated SKB is freed by network stack are
2868  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2869  * reported as memory leak.
2870  *
2871  * Return: none
2872  */
2873 void qdf_net_buf_debug_init(void)
2874 {
2875 	uint32_t i;
2876 
2877 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2878 
2879 	if (is_initial_mem_debug_disabled)
2880 		return;
2881 
2882 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2883 
2884 	qdf_nbuf_map_tracking_init();
2885 	qdf_nbuf_track_memory_manager_create();
2886 
2887 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2888 		gp_qdf_net_buf_track_tbl[i] = NULL;
2889 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2890 	}
2891 }
2892 qdf_export_symbol(qdf_net_buf_debug_init);
2893 
2894 /**
2895  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2896  *
2897  * Exit network buffer tracking debug functionality and log SKB memory leaks
2898  * As part of exiting the functionality, free the leaked memory and
2899  * cleanup the tracking buffers.
2900  *
2901  * Return: none
2902  */
2903 void qdf_net_buf_debug_exit(void)
2904 {
2905 	uint32_t i;
2906 	uint32_t count = 0;
2907 	unsigned long irq_flag;
2908 	QDF_NBUF_TRACK *p_node;
2909 	QDF_NBUF_TRACK *p_prev;
2910 
2911 	if (is_initial_mem_debug_disabled)
2912 		return;
2913 
2914 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2915 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2916 		p_node = gp_qdf_net_buf_track_tbl[i];
2917 		while (p_node) {
2918 			p_prev = p_node;
2919 			p_node = p_node->p_next;
2920 			count++;
2921 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2922 				 p_prev->func_name, p_prev->line_num,
2923 				 p_prev->size, p_prev->net_buf);
2924 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2925 				 p_prev->map_func_name,
2926 				 p_prev->map_line_num,
2927 				 p_prev->unmap_func_name,
2928 				 p_prev->unmap_line_num,
2929 				 p_prev->is_nbuf_mapped);
2930 			qdf_nbuf_track_free(p_prev);
2931 		}
2932 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2933 	}
2934 
2935 	qdf_nbuf_track_memory_manager_destroy();
2936 	qdf_nbuf_map_tracking_deinit();
2937 
2938 #ifdef CONFIG_HALT_KMEMLEAK
2939 	if (count) {
2940 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2941 		QDF_BUG(0);
2942 	}
2943 #endif
2944 }
2945 qdf_export_symbol(qdf_net_buf_debug_exit);
2946 
2947 /**
2948  * qdf_net_buf_debug_hash() - hash network buffer pointer
2949  *
2950  * Return: hash value
2951  */
2952 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2953 {
2954 	uint32_t i;
2955 
2956 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2957 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2958 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2959 
2960 	return i;
2961 }
2962 
2963 /**
2964  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2965  *
2966  * Return: If skb is found in hash table then return pointer to network buffer
2967  *	else return %NULL
2968  */
2969 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2970 {
2971 	uint32_t i;
2972 	QDF_NBUF_TRACK *p_node;
2973 
2974 	i = qdf_net_buf_debug_hash(net_buf);
2975 	p_node = gp_qdf_net_buf_track_tbl[i];
2976 
2977 	while (p_node) {
2978 		if (p_node->net_buf == net_buf)
2979 			return p_node;
2980 		p_node = p_node->p_next;
2981 	}
2982 
2983 	return NULL;
2984 }
2985 
2986 /**
2987  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2988  *
2989  * Return: none
2990  */
2991 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2992 				const char *func_name, uint32_t line_num)
2993 {
2994 	uint32_t i;
2995 	unsigned long irq_flag;
2996 	QDF_NBUF_TRACK *p_node;
2997 	QDF_NBUF_TRACK *new_node;
2998 
2999 	if (is_initial_mem_debug_disabled)
3000 		return;
3001 
3002 	new_node = qdf_nbuf_track_alloc();
3003 
3004 	i = qdf_net_buf_debug_hash(net_buf);
3005 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3006 
3007 	p_node = qdf_net_buf_debug_look_up(net_buf);
3008 
3009 	if (p_node) {
3010 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
3011 			  p_node->net_buf, p_node->func_name, p_node->line_num,
3012 			  net_buf, func_name, line_num);
3013 		qdf_nbuf_track_free(new_node);
3014 	} else {
3015 		p_node = new_node;
3016 		if (p_node) {
3017 			p_node->net_buf = net_buf;
3018 			qdf_str_lcopy(p_node->func_name, func_name,
3019 				      QDF_MEM_FUNC_NAME_SIZE);
3020 			p_node->line_num = line_num;
3021 			p_node->is_nbuf_mapped = false;
3022 			p_node->map_line_num = 0;
3023 			p_node->unmap_line_num = 0;
3024 			p_node->map_func_name[0] = '\0';
3025 			p_node->unmap_func_name[0] = '\0';
3026 			p_node->size = size;
3027 			p_node->time = qdf_get_log_timestamp();
3028 			qdf_mem_skb_inc(size);
3029 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
3030 			gp_qdf_net_buf_track_tbl[i] = p_node;
3031 		} else {
3032 			qdf_net_buf_track_fail_count++;
3033 			qdf_print(
3034 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
3035 				  func_name, line_num, size);
3036 		}
3037 	}
3038 
3039 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3040 }
3041 qdf_export_symbol(qdf_net_buf_debug_add_node);
3042 
3043 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
3044 				   uint32_t line_num)
3045 {
3046 	uint32_t i;
3047 	unsigned long irq_flag;
3048 	QDF_NBUF_TRACK *p_node;
3049 
3050 	if (is_initial_mem_debug_disabled)
3051 		return;
3052 
3053 	i = qdf_net_buf_debug_hash(net_buf);
3054 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3055 
3056 	p_node = qdf_net_buf_debug_look_up(net_buf);
3057 
3058 	if (p_node) {
3059 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
3060 			      QDF_MEM_FUNC_NAME_SIZE);
3061 		p_node->line_num = line_num;
3062 	}
3063 
3064 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3065 }
3066 
3067 qdf_export_symbol(qdf_net_buf_debug_update_node);
3068 
3069 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
3070 				       const char *func_name,
3071 				       uint32_t line_num)
3072 {
3073 	uint32_t i;
3074 	unsigned long irq_flag;
3075 	QDF_NBUF_TRACK *p_node;
3076 
3077 	if (is_initial_mem_debug_disabled)
3078 		return;
3079 
3080 	i = qdf_net_buf_debug_hash(net_buf);
3081 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3082 
3083 	p_node = qdf_net_buf_debug_look_up(net_buf);
3084 
3085 	if (p_node) {
3086 		qdf_str_lcopy(p_node->map_func_name, func_name,
3087 			      QDF_MEM_FUNC_NAME_SIZE);
3088 		p_node->map_line_num = line_num;
3089 		p_node->is_nbuf_mapped = true;
3090 	}
3091 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3092 }
3093 
3094 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3095 					 const char *func_name,
3096 					 uint32_t line_num)
3097 {
3098 	uint32_t i;
3099 	unsigned long irq_flag;
3100 	QDF_NBUF_TRACK *p_node;
3101 
3102 	if (is_initial_mem_debug_disabled)
3103 		return;
3104 
3105 	i = qdf_net_buf_debug_hash(net_buf);
3106 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3107 
3108 	p_node = qdf_net_buf_debug_look_up(net_buf);
3109 
3110 	if (p_node) {
3111 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
3112 			      QDF_MEM_FUNC_NAME_SIZE);
3113 		p_node->unmap_line_num = line_num;
3114 		p_node->is_nbuf_mapped = false;
3115 	}
3116 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3117 }
3118 
3119 /**
3120  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
3121  *
3122  * Return: none
3123  */
3124 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3125 {
3126 	uint32_t i;
3127 	QDF_NBUF_TRACK *p_head;
3128 	QDF_NBUF_TRACK *p_node = NULL;
3129 	unsigned long irq_flag;
3130 	QDF_NBUF_TRACK *p_prev;
3131 
3132 	if (is_initial_mem_debug_disabled)
3133 		return;
3134 
3135 	i = qdf_net_buf_debug_hash(net_buf);
3136 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3137 
3138 	p_head = gp_qdf_net_buf_track_tbl[i];
3139 
3140 	/* Unallocated SKB */
3141 	if (!p_head)
3142 		goto done;
3143 
3144 	p_node = p_head;
3145 	/* Found at head of the table */
3146 	if (p_head->net_buf == net_buf) {
3147 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3148 		goto done;
3149 	}
3150 
3151 	/* Search in collision list */
3152 	while (p_node) {
3153 		p_prev = p_node;
3154 		p_node = p_node->p_next;
3155 		if ((p_node) && (p_node->net_buf == net_buf)) {
3156 			p_prev->p_next = p_node->p_next;
3157 			break;
3158 		}
3159 	}
3160 
3161 done:
3162 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3163 
3164 	if (p_node) {
3165 		qdf_mem_skb_dec(p_node->size);
3166 		qdf_nbuf_track_free(p_node);
3167 	} else {
3168 		if (qdf_net_buf_track_fail_count) {
3169 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3170 				  net_buf, qdf_net_buf_track_fail_count);
3171 		} else
3172 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3173 					   net_buf);
3174 	}
3175 }
3176 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3177 
3178 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3179 				   const char *func_name, uint32_t line_num)
3180 {
3181 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3182 
3183 	if (is_initial_mem_debug_disabled)
3184 		return;
3185 
3186 	while (ext_list) {
3187 		/*
3188 		 * Take care to add if it is Jumbo packet connected using
3189 		 * frag_list
3190 		 */
3191 		qdf_nbuf_t next;
3192 
3193 		next = qdf_nbuf_queue_next(ext_list);
3194 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3195 		ext_list = next;
3196 	}
3197 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3198 }
3199 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3200 
3201 /**
3202  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
3203  * @net_buf: Network buf holding head segment (single)
3204  *
3205  * WLAN driver module whose allocated SKB is freed by network stack are
3206  * suppose to call this API before returning SKB to network stack such
3207  * that the SKB is not reported as memory leak.
3208  *
3209  * Return: none
3210  */
3211 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3212 {
3213 	qdf_nbuf_t ext_list;
3214 
3215 	if (is_initial_mem_debug_disabled)
3216 		return;
3217 
3218 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3219 	while (ext_list) {
3220 		/*
3221 		 * Take care to free if it is Jumbo packet connected using
3222 		 * frag_list
3223 		 */
3224 		qdf_nbuf_t next;
3225 
3226 		next = qdf_nbuf_queue_next(ext_list);
3227 
3228 		if (qdf_nbuf_get_users(ext_list) > 1) {
3229 			ext_list = next;
3230 			continue;
3231 		}
3232 
3233 		qdf_net_buf_debug_delete_node(ext_list);
3234 		ext_list = next;
3235 	}
3236 
3237 	if (qdf_nbuf_get_users(net_buf) > 1)
3238 		return;
3239 
3240 	qdf_net_buf_debug_delete_node(net_buf);
3241 }
3242 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3243 
3244 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3245 				int reserve, int align, int prio,
3246 				const char *func, uint32_t line)
3247 {
3248 	qdf_nbuf_t nbuf;
3249 
3250 	if (is_initial_mem_debug_disabled)
3251 		return __qdf_nbuf_alloc(osdev, size,
3252 					reserve, align,
3253 					prio, func, line);
3254 
3255 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3256 
3257 	/* Store SKB in internal QDF tracking table */
3258 	if (qdf_likely(nbuf)) {
3259 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3260 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3261 	} else {
3262 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3263 	}
3264 
3265 	return nbuf;
3266 }
3267 qdf_export_symbol(qdf_nbuf_alloc_debug);
3268 
3269 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3270 					    const char *func, uint32_t line)
3271 {
3272 	qdf_nbuf_t nbuf;
3273 
3274 	if (is_initial_mem_debug_disabled)
3275 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3276 						    line);
3277 
3278 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3279 
3280 	/* Store SKB in internal QDF tracking table */
3281 	if (qdf_likely(nbuf)) {
3282 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3283 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3284 	} else {
3285 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3286 	}
3287 
3288 	return nbuf;
3289 }
3290 
3291 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3292 
3293 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3294 {
3295 	qdf_nbuf_t ext_list;
3296 	qdf_frag_t p_frag;
3297 	uint32_t num_nr_frags;
3298 	uint32_t idx = 0;
3299 
3300 	if (qdf_unlikely(!nbuf))
3301 		return;
3302 
3303 	if (is_initial_mem_debug_disabled)
3304 		goto free_buf;
3305 
3306 	if (qdf_nbuf_get_users(nbuf) > 1)
3307 		goto free_buf;
3308 
3309 	/* Remove SKB from internal QDF tracking table */
3310 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3311 	qdf_net_buf_debug_delete_node(nbuf);
3312 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3313 
3314 	/* Take care to delete the debug entries for frags */
3315 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3316 
3317 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3318 
3319 	while (idx < num_nr_frags) {
3320 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3321 		if (qdf_likely(p_frag))
3322 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3323 		idx++;
3324 	}
3325 
3326 	/**
3327 	 * Take care to update the debug entries for frag_list and also
3328 	 * for the frags attached to frag_list
3329 	 */
3330 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3331 	while (ext_list) {
3332 		if (qdf_nbuf_get_users(ext_list) == 1) {
3333 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3334 			idx = 0;
3335 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3336 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3337 			while (idx < num_nr_frags) {
3338 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3339 				if (qdf_likely(p_frag))
3340 					qdf_frag_debug_refcount_dec(p_frag,
3341 								    func, line);
3342 				idx++;
3343 			}
3344 			qdf_net_buf_debug_delete_node(ext_list);
3345 		}
3346 
3347 		ext_list = qdf_nbuf_queue_next(ext_list);
3348 	}
3349 
3350 free_buf:
3351 	__qdf_nbuf_free(nbuf);
3352 }
3353 qdf_export_symbol(qdf_nbuf_free_debug);
3354 
3355 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3356 					const char *func, uint32_t line)
3357 {
3358 	struct sk_buff *skb;
3359 	int flags = GFP_KERNEL;
3360 
3361 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3362 		flags = GFP_ATOMIC;
3363 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3364 		/*
3365 		 * Observed that kcompactd burns out CPU to make order-3 page.
3366 		 *__netdev_alloc_skb has 4k page fallback option just in case of
3367 		 * failing high order page allocation so we don't need to be
3368 		 * hard. Make kcompactd rest in piece.
3369 		 */
3370 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3371 #endif
3372 	}
3373 
3374 	skb = __netdev_alloc_skb(NULL, size, flags);
3375 
3376 
3377 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3378 		if (qdf_likely(skb))
3379 			qdf_nbuf_count_inc(skb);
3380 	} else {
3381 		if (qdf_likely(skb)) {
3382 			qdf_nbuf_count_inc(skb);
3383 			qdf_net_buf_debug_add_node(skb, size, func, line);
3384 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
3385 		} else {
3386 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
3387 		}
3388 	}
3389 
3390 
3391 	return skb;
3392 }
3393 
3394 qdf_export_symbol(__qdf_nbuf_alloc_simple);
3395 
3396 void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
3397 				uint32_t line)
3398 {
3399 	if (qdf_likely(nbuf)) {
3400 		if (is_initial_mem_debug_disabled) {
3401 			dev_kfree_skb_any(nbuf);
3402 		} else {
3403 			qdf_nbuf_free_debug(nbuf, func, line);
3404 		}
3405 	}
3406 }
3407 
3408 qdf_export_symbol(qdf_nbuf_free_debug_simple);
3409 
3410 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3411 {
3412 	uint32_t num_nr_frags;
3413 	uint32_t idx = 0;
3414 	qdf_nbuf_t ext_list;
3415 	qdf_frag_t p_frag;
3416 
3417 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3418 
3419 	if (is_initial_mem_debug_disabled)
3420 		return cloned_buf;
3421 
3422 	if (qdf_unlikely(!cloned_buf))
3423 		return NULL;
3424 
3425 	/* Take care to update the debug entries for frags */
3426 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3427 
3428 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3429 
3430 	while (idx < num_nr_frags) {
3431 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3432 		if (qdf_likely(p_frag))
3433 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3434 		idx++;
3435 	}
3436 
3437 	/* Take care to update debug entries for frags attached to frag_list */
3438 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3439 	while (ext_list) {
3440 		idx = 0;
3441 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3442 
3443 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3444 
3445 		while (idx < num_nr_frags) {
3446 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3447 			if (qdf_likely(p_frag))
3448 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3449 			idx++;
3450 		}
3451 		ext_list = qdf_nbuf_queue_next(ext_list);
3452 	}
3453 
3454 	/* Store SKB in internal QDF tracking table */
3455 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3456 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3457 
3458 	return cloned_buf;
3459 }
3460 qdf_export_symbol(qdf_nbuf_clone_debug);
3461 
3462 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3463 {
3464 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3465 
3466 	if (is_initial_mem_debug_disabled)
3467 		return copied_buf;
3468 
3469 	if (qdf_unlikely(!copied_buf))
3470 		return NULL;
3471 
3472 	/* Store SKB in internal QDF tracking table */
3473 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3474 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3475 
3476 	return copied_buf;
3477 }
3478 qdf_export_symbol(qdf_nbuf_copy_debug);
3479 
3480 qdf_nbuf_t
3481 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3482 			   const char *func, uint32_t line)
3483 {
3484 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3485 
3486 	if (qdf_unlikely(!copied_buf))
3487 		return NULL;
3488 
3489 	if (is_initial_mem_debug_disabled)
3490 		return copied_buf;
3491 
3492 	/* Store SKB in internal QDF tracking table */
3493 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3494 	qdf_nbuf_history_add(copied_buf, func, line,
3495 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3496 
3497 	return copied_buf;
3498 }
3499 
3500 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3501 
3502 qdf_nbuf_t
3503 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3504 		       uint32_t line_num)
3505 {
3506 	qdf_nbuf_t unshared_buf;
3507 	qdf_frag_t p_frag;
3508 	uint32_t num_nr_frags;
3509 	uint32_t idx = 0;
3510 	qdf_nbuf_t ext_list, next;
3511 
3512 	if (is_initial_mem_debug_disabled)
3513 		return __qdf_nbuf_unshare(buf);
3514 
3515 	/* Not a shared buffer, nothing to do */
3516 	if (!qdf_nbuf_is_cloned(buf))
3517 		return buf;
3518 
3519 	if (qdf_nbuf_get_users(buf) > 1)
3520 		goto unshare_buf;
3521 
3522 	/* Take care to delete the debug entries for frags */
3523 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3524 
3525 	while (idx < num_nr_frags) {
3526 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3527 		if (qdf_likely(p_frag))
3528 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3529 						    line_num);
3530 		idx++;
3531 	}
3532 
3533 	qdf_net_buf_debug_delete_node(buf);
3534 
3535 	 /* Take care of jumbo packet connected using frag_list and frags */
3536 	ext_list = qdf_nbuf_get_ext_list(buf);
3537 	while (ext_list) {
3538 		idx = 0;
3539 		next = qdf_nbuf_queue_next(ext_list);
3540 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3541 
3542 		if (qdf_nbuf_get_users(ext_list) > 1) {
3543 			ext_list = next;
3544 			continue;
3545 		}
3546 
3547 		while (idx < num_nr_frags) {
3548 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3549 			if (qdf_likely(p_frag))
3550 				qdf_frag_debug_refcount_dec(p_frag, func_name,
3551 							    line_num);
3552 			idx++;
3553 		}
3554 
3555 		qdf_net_buf_debug_delete_node(ext_list);
3556 		ext_list = next;
3557 	}
3558 
3559 unshare_buf:
3560 	unshared_buf = __qdf_nbuf_unshare(buf);
3561 
3562 	if (qdf_likely(unshared_buf))
3563 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3564 					   line_num);
3565 
3566 	return unshared_buf;
3567 }
3568 
3569 qdf_export_symbol(qdf_nbuf_unshare_debug);
3570 
3571 #endif /* NBUF_MEMORY_DEBUG */
3572 
3573 #if defined(FEATURE_TSO)
3574 
3575 /**
3576  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3577  *
3578  * @ethproto: ethernet type of the msdu
3579  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3580  * @l2_len: L2 length for the msdu
3581  * @eit_hdr: pointer to EIT header
3582  * @eit_hdr_len: EIT header length for the msdu
3583  * @eit_hdr_dma_map_addr: dma addr for EIT header
3584  * @tcphdr: pointer to tcp header
3585  * @ipv4_csum_en: ipv4 checksum enable
3586  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3587  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3588  * @ip_id: IP id
3589  * @tcp_seq_num: TCP sequence number
3590  *
3591  * This structure holds the TSO common info that is common
3592  * across all the TCP segments of the jumbo packet.
3593  */
3594 struct qdf_tso_cmn_seg_info_t {
3595 	uint16_t ethproto;
3596 	uint16_t ip_tcp_hdr_len;
3597 	uint16_t l2_len;
3598 	uint8_t *eit_hdr;
3599 	uint32_t eit_hdr_len;
3600 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3601 	struct tcphdr *tcphdr;
3602 	uint16_t ipv4_csum_en;
3603 	uint16_t tcp_ipv4_csum_en;
3604 	uint16_t tcp_ipv6_csum_en;
3605 	uint16_t ip_id;
3606 	uint32_t tcp_seq_num;
3607 };
3608 
3609 /**
3610  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
3611  *
3612  * @skb: network buffer
3613  *
3614  * Return: byte offset length of 8 bytes aligned.
3615  */
3616 #ifdef FIX_TXDMA_LIMITATION
3617 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3618 {
3619 	uint32_t eit_hdr_len;
3620 	uint8_t *eit_hdr;
3621 	uint8_t byte_8_align_offset;
3622 
3623 	eit_hdr = skb->data;
3624 	eit_hdr_len = (skb_transport_header(skb)
3625 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3626 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
3627 	if (qdf_unlikely(byte_8_align_offset)) {
3628 		TSO_DEBUG("%pK,Len %d %d",
3629 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
3630 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
3631 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
3632 				  __LINE__, skb->head, skb->data,
3633 				 byte_8_align_offset);
3634 			return 0;
3635 		}
3636 		qdf_nbuf_push_head(skb, byte_8_align_offset);
3637 		qdf_mem_move(skb->data,
3638 			     skb->data + byte_8_align_offset,
3639 			     eit_hdr_len);
3640 		skb->len -= byte_8_align_offset;
3641 		skb->mac_header -= byte_8_align_offset;
3642 		skb->network_header -= byte_8_align_offset;
3643 		skb->transport_header -= byte_8_align_offset;
3644 	}
3645 	return byte_8_align_offset;
3646 }
3647 #else
3648 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3649 {
3650 	return 0;
3651 }
3652 #endif
3653 
3654 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
3655 void qdf_record_nbuf_nbytes(
3656 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
3657 {
3658 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
3659 }
3660 
3661 qdf_export_symbol(qdf_record_nbuf_nbytes);
3662 
3663 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
3664 
3665 /**
3666  * qdf_nbuf_tso_map_frag() - Map TSO segment
3667  * @osdev: qdf device handle
3668  * @tso_frag_vaddr: addr of tso fragment
3669  * @nbytes: number of bytes
3670  * @dir: direction
3671  *
3672  * Map TSO segment and for MCL record the amount of memory mapped
3673  *
3674  * Return: DMA address of mapped TSO fragment in success and
3675  * NULL in case of DMA mapping failure
3676  */
3677 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
3678 	qdf_device_t osdev, void *tso_frag_vaddr,
3679 	uint32_t nbytes, qdf_dma_dir_t dir)
3680 {
3681 	qdf_dma_addr_t tso_frag_paddr = 0;
3682 
3683 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
3684 					nbytes, __qdf_dma_dir_to_os(dir));
3685 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
3686 		qdf_err("DMA mapping error!");
3687 		qdf_assert_always(0);
3688 		return 0;
3689 	}
3690 	qdf_record_nbuf_nbytes(nbytes, dir, true);
3691 	return tso_frag_paddr;
3692 }
3693 
3694 /**
3695  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
3696  * @osdev: qdf device handle
3697  * @tso_frag_paddr: DMA addr of tso fragment
3698  * @dir: direction
3699  * @nbytes: number of bytes
3700  *
3701  * Unmap TSO segment and for MCL record the amount of memory mapped
3702  *
3703  * Return: None
3704  */
3705 static inline void qdf_nbuf_tso_unmap_frag(
3706 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
3707 	uint32_t nbytes, qdf_dma_dir_t dir)
3708 {
3709 	qdf_record_nbuf_nbytes(nbytes, dir, false);
3710 	dma_unmap_single(osdev->dev, tso_frag_paddr,
3711 			 nbytes, __qdf_dma_dir_to_os(dir));
3712 }
3713 
3714 /**
3715  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
3716  * information
3717  * @osdev: qdf device handle
3718  * @skb: skb buffer
3719  * @tso_info: Parameters common to all segements
3720  *
3721  * Get the TSO information that is common across all the TCP
3722  * segments of the jumbo packet
3723  *
3724  * Return: 0 - success 1 - failure
3725  */
3726 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3727 			struct sk_buff *skb,
3728 			struct qdf_tso_cmn_seg_info_t *tso_info)
3729 {
3730 	/* Get ethernet type and ethernet header length */
3731 	tso_info->ethproto = vlan_get_protocol(skb);
3732 
3733 	/* Determine whether this is an IPv4 or IPv6 packet */
3734 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3735 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3736 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3737 
3738 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3739 		tso_info->ipv4_csum_en = 1;
3740 		tso_info->tcp_ipv4_csum_en = 1;
3741 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3742 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3743 				ipv4_hdr->protocol);
3744 			return 1;
3745 		}
3746 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3747 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3748 		tso_info->tcp_ipv6_csum_en = 1;
3749 	} else {
3750 		qdf_err("TSO: ethertype 0x%x is not supported!",
3751 			tso_info->ethproto);
3752 		return 1;
3753 	}
3754 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3755 	tso_info->tcphdr = tcp_hdr(skb);
3756 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3757 	/* get pointer to the ethernet + IP + TCP header and their length */
3758 	tso_info->eit_hdr = skb->data;
3759 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3760 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3761 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
3762 						osdev, tso_info->eit_hdr,
3763 						tso_info->eit_hdr_len,
3764 						QDF_DMA_TO_DEVICE);
3765 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
3766 		return 1;
3767 
3768 	if (tso_info->ethproto == htons(ETH_P_IP)) {
3769 		/* inlcude IPv4 header length for IPV4 (total length) */
3770 		tso_info->ip_tcp_hdr_len =
3771 			tso_info->eit_hdr_len - tso_info->l2_len;
3772 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
3773 		/* exclude IPv6 header length for IPv6 (payload length) */
3774 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3775 	}
3776 	/*
3777 	 * The length of the payload (application layer data) is added to
3778 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3779 	 * descriptor.
3780 	 */
3781 
3782 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3783 		tso_info->tcp_seq_num,
3784 		tso_info->eit_hdr_len,
3785 		tso_info->l2_len,
3786 		skb->len);
3787 	return 0;
3788 }
3789 
3790 
3791 /**
3792  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3793  *
3794  * @curr_seg: Segment whose contents are initialized
3795  * @tso_cmn_info: Parameters common to all segements
3796  *
3797  * Return: None
3798  */
3799 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3800 				struct qdf_tso_seg_elem_t *curr_seg,
3801 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3802 {
3803 	/* Initialize the flags to 0 */
3804 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3805 
3806 	/*
3807 	 * The following fields remain the same across all segments of
3808 	 * a jumbo packet
3809 	 */
3810 	curr_seg->seg.tso_flags.tso_enable = 1;
3811 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3812 		tso_cmn_info->ipv4_csum_en;
3813 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3814 		tso_cmn_info->tcp_ipv6_csum_en;
3815 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3816 		tso_cmn_info->tcp_ipv4_csum_en;
3817 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3818 
3819 	/* The following fields change for the segments */
3820 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3821 	tso_cmn_info->ip_id++;
3822 
3823 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3824 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3825 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3826 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3827 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3828 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3829 
3830 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3831 
3832 	/*
3833 	 * First fragment for each segment always contains the ethernet,
3834 	 * IP and TCP header
3835 	 */
3836 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3837 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3838 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3839 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3840 
3841 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3842 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3843 		   tso_cmn_info->eit_hdr_len,
3844 		   curr_seg->seg.tso_flags.tcp_seq_num,
3845 		   curr_seg->seg.total_len);
3846 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3847 }
3848 
3849 /**
3850  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3851  * into segments
3852  * @nbuf: network buffer to be segmented
3853  * @tso_info: This is the output. The information about the
3854  *           TSO segments will be populated within this.
3855  *
3856  * This function fragments a TCP jumbo packet into smaller
3857  * segments to be transmitted by the driver. It chains the TSO
3858  * segments created into a list.
3859  *
3860  * Return: number of TSO segments
3861  */
3862 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3863 		struct qdf_tso_info_t *tso_info)
3864 {
3865 	/* common across all segments */
3866 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3867 	/* segment specific */
3868 	void *tso_frag_vaddr;
3869 	qdf_dma_addr_t tso_frag_paddr = 0;
3870 	uint32_t num_seg = 0;
3871 	struct qdf_tso_seg_elem_t *curr_seg;
3872 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3873 	skb_frag_t *frag = NULL;
3874 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3875 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3876 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3877 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3878 	int j = 0; /* skb fragment index */
3879 	uint8_t byte_8_align_offset;
3880 
3881 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3882 	total_num_seg = tso_info->tso_num_seg_list;
3883 	curr_seg = tso_info->tso_seg_list;
3884 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3885 
3886 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3887 
3888 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3889 						skb, &tso_cmn_info))) {
3890 		qdf_warn("TSO: error getting common segment info");
3891 		return 0;
3892 	}
3893 
3894 	/* length of the first chunk of data in the skb */
3895 	skb_frag_len = skb_headlen(skb);
3896 
3897 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3898 	/* update the remaining skb fragment length and TSO segment length */
3899 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3900 	skb_proc -= tso_cmn_info.eit_hdr_len;
3901 
3902 	/* get the address to the next tso fragment */
3903 	tso_frag_vaddr = skb->data +
3904 			 tso_cmn_info.eit_hdr_len +
3905 			 byte_8_align_offset;
3906 	/* get the length of the next tso fragment */
3907 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3908 
3909 	if (tso_frag_len != 0) {
3910 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
3911 					osdev, tso_frag_vaddr, tso_frag_len,
3912 					QDF_DMA_TO_DEVICE);
3913 		if (qdf_unlikely(!tso_frag_paddr))
3914 			return 0;
3915 	}
3916 
3917 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3918 		__LINE__, skb_frag_len, tso_frag_len);
3919 	num_seg = tso_info->num_segs;
3920 	tso_info->num_segs = 0;
3921 	tso_info->is_tso = 1;
3922 
3923 	while (num_seg && curr_seg) {
3924 		int i = 1; /* tso fragment index */
3925 		uint8_t more_tso_frags = 1;
3926 
3927 		curr_seg->seg.num_frags = 0;
3928 		tso_info->num_segs++;
3929 		total_num_seg->num_seg.tso_cmn_num_seg++;
3930 
3931 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3932 						 &tso_cmn_info);
3933 
3934 		/* If TCP PSH flag is set, set it in the last or only segment */
3935 		if (num_seg == 1)
3936 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3937 
3938 		if (unlikely(skb_proc == 0))
3939 			return tso_info->num_segs;
3940 
3941 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3942 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3943 		/* frag len is added to ip_len in while loop below*/
3944 
3945 		curr_seg->seg.num_frags++;
3946 
3947 		while (more_tso_frags) {
3948 			if (tso_frag_len != 0) {
3949 				curr_seg->seg.tso_frags[i].vaddr =
3950 					tso_frag_vaddr;
3951 				curr_seg->seg.tso_frags[i].length =
3952 					tso_frag_len;
3953 				curr_seg->seg.total_len += tso_frag_len;
3954 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3955 				curr_seg->seg.num_frags++;
3956 				skb_proc = skb_proc - tso_frag_len;
3957 
3958 				/* increment the TCP sequence number */
3959 
3960 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3961 				curr_seg->seg.tso_frags[i].paddr =
3962 					tso_frag_paddr;
3963 
3964 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
3965 			}
3966 
3967 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3968 					__func__, __LINE__,
3969 					i,
3970 					tso_frag_len,
3971 					curr_seg->seg.total_len,
3972 					curr_seg->seg.tso_frags[i].vaddr);
3973 
3974 			/* if there is no more data left in the skb */
3975 			if (!skb_proc)
3976 				return tso_info->num_segs;
3977 
3978 			/* get the next payload fragment information */
3979 			/* check if there are more fragments in this segment */
3980 			if (tso_frag_len < tso_seg_size) {
3981 				more_tso_frags = 1;
3982 				if (tso_frag_len != 0) {
3983 					tso_seg_size = tso_seg_size -
3984 						tso_frag_len;
3985 					i++;
3986 					if (curr_seg->seg.num_frags ==
3987 								FRAG_NUM_MAX) {
3988 						more_tso_frags = 0;
3989 						/*
3990 						 * reset i and the tso
3991 						 * payload size
3992 						 */
3993 						i = 1;
3994 						tso_seg_size =
3995 							skb_shinfo(skb)->
3996 								gso_size;
3997 					}
3998 				}
3999 			} else {
4000 				more_tso_frags = 0;
4001 				/* reset i and the tso payload size */
4002 				i = 1;
4003 				tso_seg_size = skb_shinfo(skb)->gso_size;
4004 			}
4005 
4006 			/* if the next fragment is contiguous */
4007 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
4008 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
4009 				skb_frag_len = skb_frag_len - tso_frag_len;
4010 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4011 
4012 			} else { /* the next fragment is not contiguous */
4013 				if (skb_shinfo(skb)->nr_frags == 0) {
4014 					qdf_info("TSO: nr_frags == 0!");
4015 					qdf_assert(0);
4016 					return 0;
4017 				}
4018 				if (j >= skb_shinfo(skb)->nr_frags) {
4019 					qdf_info("TSO: nr_frags %d j %d",
4020 						 skb_shinfo(skb)->nr_frags, j);
4021 					qdf_assert(0);
4022 					return 0;
4023 				}
4024 				frag = &skb_shinfo(skb)->frags[j];
4025 				skb_frag_len = skb_frag_size(frag);
4026 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4027 				tso_frag_vaddr = skb_frag_address_safe(frag);
4028 				j++;
4029 			}
4030 
4031 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
4032 				__func__, __LINE__, skb_frag_len, tso_frag_len,
4033 				tso_seg_size);
4034 
4035 			if (!(tso_frag_vaddr)) {
4036 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
4037 						__func__);
4038 				return 0;
4039 			}
4040 
4041 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
4042 						osdev, tso_frag_vaddr,
4043 						tso_frag_len,
4044 						QDF_DMA_TO_DEVICE);
4045 			if (qdf_unlikely(!tso_frag_paddr))
4046 				return 0;
4047 		}
4048 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
4049 				curr_seg->seg.tso_flags.tcp_seq_num);
4050 		num_seg--;
4051 		/* if TCP FIN flag was set, set it in the last segment */
4052 		if (!num_seg)
4053 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
4054 
4055 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
4056 		curr_seg = curr_seg->next;
4057 	}
4058 	return tso_info->num_segs;
4059 }
4060 qdf_export_symbol(__qdf_nbuf_get_tso_info);
4061 
4062 /**
4063  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
4064  *
4065  * @osdev: qdf device handle
4066  * @tso_seg: TSO segment element to be unmapped
4067  * @is_last_seg: whether this is last tso seg or not
4068  *
4069  * Return: none
4070  */
4071 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
4072 			  struct qdf_tso_seg_elem_t *tso_seg,
4073 			  bool is_last_seg)
4074 {
4075 	uint32_t num_frags = 0;
4076 
4077 	if (tso_seg->seg.num_frags > 0)
4078 		num_frags = tso_seg->seg.num_frags - 1;
4079 
4080 	/*Num of frags in a tso seg cannot be less than 2 */
4081 	if (num_frags < 1) {
4082 		/*
4083 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
4084 		 * this may happen when qdf_nbuf_get_tso_info failed,
4085 		 * do dma unmap for the 0th frag in this seg.
4086 		 */
4087 		if (is_last_seg && tso_seg->seg.num_frags == 1)
4088 			goto last_seg_free_first_frag;
4089 
4090 		qdf_assert(0);
4091 		qdf_err("ERROR: num of frags in a tso segment is %d",
4092 			(num_frags + 1));
4093 		return;
4094 	}
4095 
4096 	while (num_frags) {
4097 		/*Do dma unmap the tso seg except the 0th frag */
4098 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
4099 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4100 				num_frags);
4101 			qdf_assert(0);
4102 			return;
4103 		}
4104 		qdf_nbuf_tso_unmap_frag(
4105 			osdev,
4106 			tso_seg->seg.tso_frags[num_frags].paddr,
4107 			tso_seg->seg.tso_frags[num_frags].length,
4108 			QDF_DMA_TO_DEVICE);
4109 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
4110 		num_frags--;
4111 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4112 	}
4113 
4114 last_seg_free_first_frag:
4115 	if (is_last_seg) {
4116 		/*Do dma unmap for the tso seg 0th frag */
4117 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
4118 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4119 			qdf_assert(0);
4120 			return;
4121 		}
4122 		qdf_nbuf_tso_unmap_frag(osdev,
4123 					tso_seg->seg.tso_frags[0].paddr,
4124 					tso_seg->seg.tso_frags[0].length,
4125 					QDF_DMA_TO_DEVICE);
4126 		tso_seg->seg.tso_frags[0].paddr = 0;
4127 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4128 	}
4129 }
4130 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4131 
4132 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4133 {
4134 	size_t packet_len;
4135 
4136 	packet_len = skb->len -
4137 		((skb_transport_header(skb) - skb_mac_header(skb)) +
4138 		 tcp_hdrlen(skb));
4139 
4140 	return packet_len;
4141 }
4142 
4143 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4144 
4145 /**
4146  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
4147  * into segments
4148  * @nbuf:   network buffer to be segmented
4149  * @tso_info:  This is the output. The information about the
4150  *      TSO segments will be populated within this.
4151  *
4152  * This function fragments a TCP jumbo packet into smaller
4153  * segments to be transmitted by the driver. It chains the TSO
4154  * segments created into a list.
4155  *
4156  * Return: 0 - success, 1 - failure
4157  */
4158 #ifndef BUILD_X86
4159 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4160 {
4161 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4162 	uint32_t remainder, num_segs = 0;
4163 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4164 	uint8_t frags_per_tso = 0;
4165 	uint32_t skb_frag_len = 0;
4166 	uint32_t eit_hdr_len = (skb_transport_header(skb)
4167 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4168 	skb_frag_t *frag = NULL;
4169 	int j = 0;
4170 	uint32_t temp_num_seg = 0;
4171 
4172 	/* length of the first chunk of data in the skb minus eit header*/
4173 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4174 
4175 	/* Calculate num of segs for skb's first chunk of data*/
4176 	remainder = skb_frag_len % tso_seg_size;
4177 	num_segs = skb_frag_len / tso_seg_size;
4178 	/**
4179 	 * Remainder non-zero and nr_frags zero implies end of skb data.
4180 	 * In that case, one more tso seg is required to accommodate
4181 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
4182 	 * then remaining data will be accomodated while doing the calculation
4183 	 * for nr_frags data. Hence, frags_per_tso++.
4184 	 */
4185 	if (remainder) {
4186 		if (!skb_nr_frags)
4187 			num_segs++;
4188 		else
4189 			frags_per_tso++;
4190 	}
4191 
4192 	while (skb_nr_frags) {
4193 		if (j >= skb_shinfo(skb)->nr_frags) {
4194 			qdf_info("TSO: nr_frags %d j %d",
4195 				 skb_shinfo(skb)->nr_frags, j);
4196 			qdf_assert(0);
4197 			return 0;
4198 		}
4199 		/**
4200 		 * Calculate the number of tso seg for nr_frags data:
4201 		 * Get the length of each frag in skb_frag_len, add to
4202 		 * remainder.Get the number of segments by dividing it to
4203 		 * tso_seg_size and calculate the new remainder.
4204 		 * Decrement the nr_frags value and keep
4205 		 * looping all the skb_fragments.
4206 		 */
4207 		frag = &skb_shinfo(skb)->frags[j];
4208 		skb_frag_len = skb_frag_size(frag);
4209 		temp_num_seg = num_segs;
4210 		remainder += skb_frag_len;
4211 		num_segs += remainder / tso_seg_size;
4212 		remainder = remainder % tso_seg_size;
4213 		skb_nr_frags--;
4214 		if (remainder) {
4215 			if (num_segs > temp_num_seg)
4216 				frags_per_tso = 0;
4217 			/**
4218 			 * increment the tso per frags whenever remainder is
4219 			 * positive. If frags_per_tso reaches the (max-1),
4220 			 * [First frags always have EIT header, therefore max-1]
4221 			 * increment the num_segs as no more data can be
4222 			 * accomodated in the curr tso seg. Reset the remainder
4223 			 * and frags per tso and keep looping.
4224 			 */
4225 			frags_per_tso++;
4226 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4227 				num_segs++;
4228 				frags_per_tso = 0;
4229 				remainder = 0;
4230 			}
4231 			/**
4232 			 * If this is the last skb frag and still remainder is
4233 			 * non-zero(frags_per_tso is not reached to the max-1)
4234 			 * then increment the num_segs to take care of the
4235 			 * remaining length.
4236 			 */
4237 			if (!skb_nr_frags && remainder) {
4238 				num_segs++;
4239 				frags_per_tso = 0;
4240 			}
4241 		} else {
4242 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4243 			frags_per_tso = 0;
4244 		}
4245 		j++;
4246 	}
4247 
4248 	return num_segs;
4249 }
4250 #elif !defined(QCA_WIFI_QCN9000)
4251 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4252 {
4253 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4254 	skb_frag_t *frag = NULL;
4255 
4256 	/*
4257 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4258 	 * region which cannot be accessed by Target
4259 	 */
4260 	if (virt_to_phys(skb->data) < 0x50000040) {
4261 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4262 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4263 				virt_to_phys(skb->data));
4264 		goto fail;
4265 
4266 	}
4267 
4268 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4269 		frag = &skb_shinfo(skb)->frags[i];
4270 
4271 		if (!frag)
4272 			goto fail;
4273 
4274 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4275 			goto fail;
4276 	}
4277 
4278 
4279 	gso_size = skb_shinfo(skb)->gso_size;
4280 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4281 			+ tcp_hdrlen(skb));
4282 	while (tmp_len) {
4283 		num_segs++;
4284 		if (tmp_len > gso_size)
4285 			tmp_len -= gso_size;
4286 		else
4287 			break;
4288 	}
4289 
4290 	return num_segs;
4291 
4292 	/*
4293 	 * Do not free this frame, just do socket level accounting
4294 	 * so that this is not reused.
4295 	 */
4296 fail:
4297 	if (skb->sk)
4298 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4299 
4300 	return 0;
4301 }
4302 #else
4303 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4304 {
4305 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4306 	skb_frag_t *frag = NULL;
4307 
4308 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4309 		frag = &skb_shinfo(skb)->frags[i];
4310 
4311 		if (!frag)
4312 			goto fail;
4313 	}
4314 
4315 	gso_size = skb_shinfo(skb)->gso_size;
4316 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4317 			+ tcp_hdrlen(skb));
4318 	while (tmp_len) {
4319 		num_segs++;
4320 		if (tmp_len > gso_size)
4321 			tmp_len -= gso_size;
4322 		else
4323 			break;
4324 	}
4325 
4326 	return num_segs;
4327 
4328 	/*
4329 	 * Do not free this frame, just do socket level accounting
4330 	 * so that this is not reused.
4331 	 */
4332 fail:
4333 	if (skb->sk)
4334 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4335 
4336 	return 0;
4337 }
4338 #endif
4339 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4340 
4341 #endif /* FEATURE_TSO */
4342 
4343 /**
4344  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
4345  *
4346  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
4347  *
4348  * Return: N/A
4349  */
4350 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4351 			  uint32_t *lo, uint32_t *hi)
4352 {
4353 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4354 		*lo = lower_32_bits(dmaaddr);
4355 		*hi = upper_32_bits(dmaaddr);
4356 	} else {
4357 		*lo = dmaaddr;
4358 		*hi = 0;
4359 	}
4360 }
4361 
4362 qdf_export_symbol(__qdf_dmaaddr_to_32s);
4363 
4364 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4365 {
4366 	qdf_nbuf_users_inc(&skb->users);
4367 	return skb;
4368 }
4369 qdf_export_symbol(__qdf_nbuf_inc_users);
4370 
4371 int __qdf_nbuf_get_users(struct sk_buff *skb)
4372 {
4373 	return qdf_nbuf_users_read(&skb->users);
4374 }
4375 qdf_export_symbol(__qdf_nbuf_get_users);
4376 
4377 /**
4378  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
4379  * @skb: sk_buff handle
4380  *
4381  * Return: none
4382  */
4383 
4384 void __qdf_nbuf_ref(struct sk_buff *skb)
4385 {
4386 	skb_get(skb);
4387 }
4388 qdf_export_symbol(__qdf_nbuf_ref);
4389 
4390 /**
4391  * __qdf_nbuf_shared() - Check whether the buffer is shared
4392  *  @skb: sk_buff buffer
4393  *
4394  *  Return: true if more than one person has a reference to this buffer.
4395  */
4396 int __qdf_nbuf_shared(struct sk_buff *skb)
4397 {
4398 	return skb_shared(skb);
4399 }
4400 qdf_export_symbol(__qdf_nbuf_shared);
4401 
4402 /**
4403  * __qdf_nbuf_dmamap_create() - create a DMA map.
4404  * @osdev: qdf device handle
4405  * @dmap: dma map handle
4406  *
4407  * This can later be used to map networking buffers. They :
4408  * - need space in adf_drv's software descriptor
4409  * - are typically created during adf_drv_create
4410  * - need to be created before any API(qdf_nbuf_map) that uses them
4411  *
4412  * Return: QDF STATUS
4413  */
4414 QDF_STATUS
4415 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4416 {
4417 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4418 	/*
4419 	 * driver can tell its SG capablity, it must be handled.
4420 	 * Bounce buffers if they are there
4421 	 */
4422 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4423 	if (!(*dmap))
4424 		error = QDF_STATUS_E_NOMEM;
4425 
4426 	return error;
4427 }
4428 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4429 /**
4430  * __qdf_nbuf_dmamap_destroy() - delete a dma map
4431  * @osdev: qdf device handle
4432  * @dmap: dma map handle
4433  *
4434  * Return: none
4435  */
4436 void
4437 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4438 {
4439 	kfree(dmap);
4440 }
4441 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4442 
4443 /**
4444  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
4445  * @osdev: os device
4446  * @skb: skb handle
4447  * @dir: dma direction
4448  * @nbytes: number of bytes to be mapped
4449  *
4450  * Return: QDF_STATUS
4451  */
4452 #ifdef QDF_OS_DEBUG
4453 QDF_STATUS
4454 __qdf_nbuf_map_nbytes(
4455 	qdf_device_t osdev,
4456 	struct sk_buff *skb,
4457 	qdf_dma_dir_t dir,
4458 	int nbytes)
4459 {
4460 	struct skb_shared_info  *sh = skb_shinfo(skb);
4461 
4462 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4463 
4464 	/*
4465 	 * Assume there's only a single fragment.
4466 	 * To support multiple fragments, it would be necessary to change
4467 	 * adf_nbuf_t to be a separate object that stores meta-info
4468 	 * (including the bus address for each fragment) and a pointer
4469 	 * to the underlying sk_buff.
4470 	 */
4471 	qdf_assert(sh->nr_frags == 0);
4472 
4473 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4474 }
4475 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4476 #else
4477 QDF_STATUS
4478 __qdf_nbuf_map_nbytes(
4479 	qdf_device_t osdev,
4480 	struct sk_buff *skb,
4481 	qdf_dma_dir_t dir,
4482 	int nbytes)
4483 {
4484 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4485 }
4486 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4487 #endif
4488 /**
4489  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
4490  * @osdev: OS device
4491  * @skb: skb handle
4492  * @dir: direction
4493  * @nbytes: number of bytes
4494  *
4495  * Return: none
4496  */
4497 void
4498 __qdf_nbuf_unmap_nbytes(
4499 	qdf_device_t osdev,
4500 	struct sk_buff *skb,
4501 	qdf_dma_dir_t dir,
4502 	int nbytes)
4503 {
4504 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4505 
4506 	/*
4507 	 * Assume there's a single fragment.
4508 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4509 	 */
4510 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4511 }
4512 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4513 
4514 /**
4515  * __qdf_nbuf_dma_map_info() - return the dma map info
4516  * @bmap: dma map
4517  * @sg: dma map info
4518  *
4519  * Return: none
4520  */
4521 void
4522 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4523 {
4524 	qdf_assert(bmap->mapped);
4525 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4526 
4527 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4528 			sizeof(struct __qdf_segment));
4529 	sg->nsegs = bmap->nsegs;
4530 }
4531 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4532 /**
4533  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
4534  *			specified by the index
4535  * @skb: sk buff
4536  * @sg: scatter/gather list of all the frags
4537  *
4538  * Return: none
4539  */
4540 #if defined(__QDF_SUPPORT_FRAG_MEM)
4541 void
4542 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4543 {
4544 	qdf_assert(skb);
4545 	sg->sg_segs[0].vaddr = skb->data;
4546 	sg->sg_segs[0].len   = skb->len;
4547 	sg->nsegs            = 1;
4548 
4549 	for (int i = 1; i <= sh->nr_frags; i++) {
4550 		skb_frag_t    *f        = &sh->frags[i - 1];
4551 
4552 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4553 			f->page_offset);
4554 		sg->sg_segs[i].len      = f->size;
4555 
4556 		qdf_assert(i < QDF_MAX_SGLIST);
4557 	}
4558 	sg->nsegs += i;
4559 
4560 }
4561 qdf_export_symbol(__qdf_nbuf_frag_info);
4562 #else
4563 #ifdef QDF_OS_DEBUG
4564 void
4565 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4566 {
4567 
4568 	struct skb_shared_info  *sh = skb_shinfo(skb);
4569 
4570 	qdf_assert(skb);
4571 	sg->sg_segs[0].vaddr = skb->data;
4572 	sg->sg_segs[0].len   = skb->len;
4573 	sg->nsegs            = 1;
4574 
4575 	qdf_assert(sh->nr_frags == 0);
4576 }
4577 qdf_export_symbol(__qdf_nbuf_frag_info);
4578 #else
4579 void
4580 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4581 {
4582 	sg->sg_segs[0].vaddr = skb->data;
4583 	sg->sg_segs[0].len   = skb->len;
4584 	sg->nsegs            = 1;
4585 }
4586 qdf_export_symbol(__qdf_nbuf_frag_info);
4587 #endif
4588 #endif
4589 /**
4590  * __qdf_nbuf_get_frag_size() - get frag size
4591  * @nbuf: sk buffer
4592  * @cur_frag: current frag
4593  *
4594  * Return: frag size
4595  */
4596 uint32_t
4597 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4598 {
4599 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4600 	const skb_frag_t *frag = sh->frags + cur_frag;
4601 
4602 	return skb_frag_size(frag);
4603 }
4604 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4605 
4606 /**
4607  * __qdf_nbuf_frag_map() - dma map frag
4608  * @osdev: os device
4609  * @nbuf: sk buff
4610  * @offset: offset
4611  * @dir: direction
4612  * @cur_frag: current fragment
4613  *
4614  * Return: QDF status
4615  */
4616 #ifdef A_SIMOS_DEVHOST
4617 QDF_STATUS __qdf_nbuf_frag_map(
4618 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4619 	int offset, qdf_dma_dir_t dir, int cur_frag)
4620 {
4621 	int32_t paddr, frag_len;
4622 
4623 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4624 	return QDF_STATUS_SUCCESS;
4625 }
4626 qdf_export_symbol(__qdf_nbuf_frag_map);
4627 #else
4628 QDF_STATUS __qdf_nbuf_frag_map(
4629 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4630 	int offset, qdf_dma_dir_t dir, int cur_frag)
4631 {
4632 	dma_addr_t paddr, frag_len;
4633 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4634 	const skb_frag_t *frag = sh->frags + cur_frag;
4635 
4636 	frag_len = skb_frag_size(frag);
4637 
4638 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4639 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4640 					__qdf_dma_dir_to_os(dir));
4641 	return dma_mapping_error(osdev->dev, paddr) ?
4642 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4643 }
4644 qdf_export_symbol(__qdf_nbuf_frag_map);
4645 #endif
4646 /**
4647  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
4648  * @dmap: dma map
4649  * @cb: callback
4650  * @arg: argument
4651  *
4652  * Return: none
4653  */
4654 void
4655 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4656 {
4657 	return;
4658 }
4659 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4660 
4661 
4662 /**
4663  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4664  * @osdev: os device
4665  * @buf: sk buff
4666  * @dir: direction
4667  *
4668  * Return: none
4669  */
4670 #if defined(A_SIMOS_DEVHOST)
4671 static void __qdf_nbuf_sync_single_for_cpu(
4672 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4673 {
4674 	return;
4675 }
4676 #else
4677 static void __qdf_nbuf_sync_single_for_cpu(
4678 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4679 {
4680 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4681 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4682 		return;
4683 	}
4684 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4685 		skb_end_offset(buf) - skb_headroom(buf),
4686 		__qdf_dma_dir_to_os(dir));
4687 }
4688 #endif
4689 /**
4690  * __qdf_nbuf_sync_for_cpu() - nbuf sync
4691  * @osdev: os device
4692  * @skb: sk buff
4693  * @dir: direction
4694  *
4695  * Return: none
4696  */
4697 void
4698 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4699 	struct sk_buff *skb, qdf_dma_dir_t dir)
4700 {
4701 	qdf_assert(
4702 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4703 
4704 	/*
4705 	 * Assume there's a single fragment.
4706 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4707 	 */
4708 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4709 }
4710 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4711 
4712 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4713 /**
4714  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4715  * @rx_status: Pointer to rx_status.
4716  * @rtap_buf: Buf to which VHT info has to be updated.
4717  * @rtap_len: Current length of radiotap buffer
4718  *
4719  * Return: Length of radiotap after VHT flags updated.
4720  */
4721 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4722 					struct mon_rx_status *rx_status,
4723 					int8_t *rtap_buf,
4724 					uint32_t rtap_len)
4725 {
4726 	uint16_t vht_flags = 0;
4727 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4728 
4729 	rtap_len = qdf_align(rtap_len, 2);
4730 
4731 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4732 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4733 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4734 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4735 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4736 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4737 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4738 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4739 	rtap_len += 2;
4740 
4741 	rtap_buf[rtap_len] |=
4742 		(rx_status->is_stbc ?
4743 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4744 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4745 		(rx_status->ldpc ?
4746 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4747 		(rx_status->beamformed ?
4748 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4749 	rtap_len += 1;
4750 
4751 	if (!rx_user_status) {
4752 		switch (rx_status->vht_flag_values2) {
4753 		case IEEE80211_RADIOTAP_VHT_BW_20:
4754 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4755 			break;
4756 		case IEEE80211_RADIOTAP_VHT_BW_40:
4757 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4758 			break;
4759 		case IEEE80211_RADIOTAP_VHT_BW_80:
4760 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4761 			break;
4762 		case IEEE80211_RADIOTAP_VHT_BW_160:
4763 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4764 			break;
4765 		}
4766 		rtap_len += 1;
4767 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
4768 		rtap_len += 1;
4769 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
4770 		rtap_len += 1;
4771 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4772 		rtap_len += 1;
4773 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4774 		rtap_len += 1;
4775 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4776 		rtap_len += 1;
4777 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4778 		rtap_len += 1;
4779 		put_unaligned_le16(rx_status->vht_flag_values6,
4780 				   &rtap_buf[rtap_len]);
4781 		rtap_len += 2;
4782 	} else {
4783 		switch (rx_user_status->vht_flag_values2) {
4784 		case IEEE80211_RADIOTAP_VHT_BW_20:
4785 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4786 			break;
4787 		case IEEE80211_RADIOTAP_VHT_BW_40:
4788 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4789 			break;
4790 		case IEEE80211_RADIOTAP_VHT_BW_80:
4791 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4792 			break;
4793 		case IEEE80211_RADIOTAP_VHT_BW_160:
4794 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4795 			break;
4796 		}
4797 		rtap_len += 1;
4798 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
4799 		rtap_len += 1;
4800 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
4801 		rtap_len += 1;
4802 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
4803 		rtap_len += 1;
4804 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
4805 		rtap_len += 1;
4806 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
4807 		rtap_len += 1;
4808 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
4809 		rtap_len += 1;
4810 		put_unaligned_le16(rx_user_status->vht_flag_values6,
4811 				   &rtap_buf[rtap_len]);
4812 		rtap_len += 2;
4813 	}
4814 
4815 	return rtap_len;
4816 }
4817 
4818 /**
4819  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4820  * @rx_status: Pointer to rx_status.
4821  * @rtap_buf: buffer to which radiotap has to be updated
4822  * @rtap_len: radiotap length
4823  *
4824  * API update high-efficiency (11ax) fields in the radiotap header
4825  *
4826  * Return: length of rtap_len updated.
4827  */
4828 static unsigned int
4829 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4830 				     int8_t *rtap_buf, uint32_t rtap_len)
4831 {
4832 	/*
4833 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4834 	 * Enable all "known" HE radiotap flags for now
4835 	 */
4836 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4837 
4838 	rtap_len = qdf_align(rtap_len, 2);
4839 
4840 	if (!rx_user_status) {
4841 		put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4842 		rtap_len += 2;
4843 
4844 		put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4845 		rtap_len += 2;
4846 
4847 		put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4848 		rtap_len += 2;
4849 
4850 		put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4851 		rtap_len += 2;
4852 
4853 		put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4854 		rtap_len += 2;
4855 
4856 		put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4857 		rtap_len += 2;
4858 		qdf_rl_debug("he data %x %x %x %x %x %x",
4859 			     rx_status->he_data1,
4860 			     rx_status->he_data2, rx_status->he_data3,
4861 			     rx_status->he_data4, rx_status->he_data5,
4862 			     rx_status->he_data6);
4863 	} else {
4864 		put_unaligned_le16(rx_user_status->he_data1,
4865 				   &rtap_buf[rtap_len]);
4866 		rtap_len += 2;
4867 
4868 		put_unaligned_le16(rx_user_status->he_data2,
4869 				   &rtap_buf[rtap_len]);
4870 		rtap_len += 2;
4871 
4872 		put_unaligned_le16(rx_user_status->he_data3,
4873 				   &rtap_buf[rtap_len]);
4874 		rtap_len += 2;
4875 
4876 		put_unaligned_le16(rx_user_status->he_data4,
4877 				   &rtap_buf[rtap_len]);
4878 		rtap_len += 2;
4879 
4880 		put_unaligned_le16(rx_user_status->he_data5,
4881 				   &rtap_buf[rtap_len]);
4882 		rtap_len += 2;
4883 
4884 		put_unaligned_le16(rx_user_status->he_data6,
4885 				   &rtap_buf[rtap_len]);
4886 		rtap_len += 2;
4887 		qdf_rl_debug("he data %x %x %x %x %x %x",
4888 			     rx_user_status->he_data1,
4889 			     rx_user_status->he_data2, rx_user_status->he_data3,
4890 			     rx_user_status->he_data4, rx_user_status->he_data5,
4891 			     rx_user_status->he_data6);
4892 	}
4893 
4894 	return rtap_len;
4895 }
4896 
4897 
4898 /**
4899  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4900  * @rx_status: Pointer to rx_status.
4901  * @rtap_buf: buffer to which radiotap has to be updated
4902  * @rtap_len: radiotap length
4903  *
4904  * API update HE-MU fields in the radiotap header
4905  *
4906  * Return: length of rtap_len updated.
4907  */
4908 static unsigned int
4909 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4910 				     int8_t *rtap_buf, uint32_t rtap_len)
4911 {
4912 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4913 
4914 	rtap_len = qdf_align(rtap_len, 2);
4915 
4916 	/*
4917 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4918 	 * Enable all "known" he-mu radiotap flags for now
4919 	 */
4920 
4921 	if (!rx_user_status) {
4922 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4923 		rtap_len += 2;
4924 
4925 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4926 		rtap_len += 2;
4927 
4928 		rtap_buf[rtap_len] = rx_status->he_RU[0];
4929 		rtap_len += 1;
4930 
4931 		rtap_buf[rtap_len] = rx_status->he_RU[1];
4932 		rtap_len += 1;
4933 
4934 		rtap_buf[rtap_len] = rx_status->he_RU[2];
4935 		rtap_len += 1;
4936 
4937 		rtap_buf[rtap_len] = rx_status->he_RU[3];
4938 		rtap_len += 1;
4939 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4940 			  rx_status->he_flags1,
4941 			  rx_status->he_flags2, rx_status->he_RU[0],
4942 			  rx_status->he_RU[1], rx_status->he_RU[2],
4943 			  rx_status->he_RU[3]);
4944 	} else {
4945 		put_unaligned_le16(rx_user_status->he_flags1,
4946 				   &rtap_buf[rtap_len]);
4947 		rtap_len += 2;
4948 
4949 		put_unaligned_le16(rx_user_status->he_flags2,
4950 				   &rtap_buf[rtap_len]);
4951 		rtap_len += 2;
4952 
4953 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
4954 		rtap_len += 1;
4955 
4956 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
4957 		rtap_len += 1;
4958 
4959 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
4960 		rtap_len += 1;
4961 
4962 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
4963 		rtap_len += 1;
4964 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4965 			  rx_user_status->he_flags1,
4966 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
4967 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
4968 			  rx_user_status->he_RU[3]);
4969 	}
4970 
4971 	return rtap_len;
4972 }
4973 
4974 /**
4975  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4976  * @rx_status: Pointer to rx_status.
4977  * @rtap_buf: buffer to which radiotap has to be updated
4978  * @rtap_len: radiotap length
4979  *
4980  * API update he-mu-other fields in the radiotap header
4981  *
4982  * Return: length of rtap_len updated.
4983  */
4984 static unsigned int
4985 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4986 				     int8_t *rtap_buf, uint32_t rtap_len)
4987 {
4988 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4989 
4990 	rtap_len = qdf_align(rtap_len, 2);
4991 
4992 	/*
4993 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4994 	 * Enable all "known" he-mu-other radiotap flags for now
4995 	 */
4996 	if (!rx_user_status) {
4997 		put_unaligned_le16(rx_status->he_per_user_1,
4998 				   &rtap_buf[rtap_len]);
4999 		rtap_len += 2;
5000 
5001 		put_unaligned_le16(rx_status->he_per_user_2,
5002 				   &rtap_buf[rtap_len]);
5003 		rtap_len += 2;
5004 
5005 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
5006 		rtap_len += 1;
5007 
5008 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
5009 		rtap_len += 1;
5010 		qdf_debug("he_per_user %x %x pos %x knwn %x",
5011 			  rx_status->he_per_user_1,
5012 			  rx_status->he_per_user_2,
5013 			  rx_status->he_per_user_position,
5014 			  rx_status->he_per_user_known);
5015 	} else {
5016 		put_unaligned_le16(rx_user_status->he_per_user_1,
5017 				   &rtap_buf[rtap_len]);
5018 		rtap_len += 2;
5019 
5020 		put_unaligned_le16(rx_user_status->he_per_user_2,
5021 				   &rtap_buf[rtap_len]);
5022 		rtap_len += 2;
5023 
5024 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
5025 		rtap_len += 1;
5026 
5027 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
5028 		rtap_len += 1;
5029 		qdf_debug("he_per_user %x %x pos %x knwn %x",
5030 			  rx_user_status->he_per_user_1,
5031 			  rx_user_status->he_per_user_2,
5032 			  rx_user_status->he_per_user_position,
5033 			  rx_user_status->he_per_user_known);
5034 	}
5035 
5036 	return rtap_len;
5037 }
5038 
5039 /**
5040  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
5041  *						from rx_status
5042  * @rx_status: Pointer to rx_status.
5043  * @rtap_buf: buffer to which radiotap has to be updated
5044  * @rtap_len: radiotap length
5045  *
5046  * API update Extra High Throughput (11be) fields in the radiotap header
5047  *
5048  * Return: length of rtap_len updated.
5049  */
5050 static unsigned int
5051 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
5052 				    int8_t *rtap_buf, uint32_t rtap_len)
5053 {
5054 	/*
5055 	 * IEEE80211_RADIOTAP_USIG:
5056 	 *		u32, u32, u32
5057 	 */
5058 	rtap_len = qdf_align(rtap_len, 4);
5059 
5060 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
5061 	rtap_len += 4;
5062 
5063 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
5064 	rtap_len += 4;
5065 
5066 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
5067 	rtap_len += 4;
5068 
5069 	qdf_rl_debug("U-SIG data %x %x %x",
5070 		     rx_status->usig_common, rx_status->usig_value,
5071 		     rx_status->usig_mask);
5072 
5073 	return rtap_len;
5074 }
5075 
5076 /**
5077  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
5078  *					from rx_status
5079  * @rx_status: Pointer to rx_status.
5080  * @rtap_buf: buffer to which radiotap has to be updated
5081  * @rtap_len: radiotap length
5082  *
5083  * API update Extra High Throughput (11be) fields in the radiotap header
5084  *
5085  * Return: length of rtap_len updated.
5086  */
5087 static unsigned int
5088 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5089 				   int8_t *rtap_buf, uint32_t rtap_len)
5090 {
5091 	uint32_t user;
5092 
5093 	/*
5094 	 * IEEE80211_RADIOTAP_EHT:
5095 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5096 	 */
5097 	rtap_len = qdf_align(rtap_len, 4);
5098 
5099 	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5100 	rtap_len += 4;
5101 
5102 	put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5103 	rtap_len += 4;
5104 
5105 	put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5106 	rtap_len += 4;
5107 
5108 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5109 	rtap_len += 4;
5110 
5111 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5112 	rtap_len += 4;
5113 
5114 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5115 	rtap_len += 4;
5116 
5117 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5118 	rtap_len += 4;
5119 
5120 	for (user = 0; user < rx_status->num_eht_user_info_valid; user++) {
5121 		put_unaligned_le32(rx_status->eht_user_info[user],
5122 				   &rtap_buf[rtap_len]);
5123 		rtap_len += 4;
5124 	}
5125 
5126 	qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5127 		     rx_status->eht_known, rx_status->eht_data[0],
5128 		     rx_status->eht_data[1], rx_status->eht_data[2],
5129 		     rx_status->eht_data[3], rx_status->eht_data[4],
5130 		     rx_status->eht_data[5]);
5131 
5132 	return rtap_len;
5133 }
5134 
5135 #define IEEE80211_RADIOTAP_TX_STATUS 0
5136 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5137 #define IEEE80211_RADIOTAP_EXTENSION2 2
5138 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5139 
5140 /**
5141  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5142  * @rx_status: Pointer to rx_status.
5143  * @rtap_buf: Buf to which AMPDU info has to be updated.
5144  * @rtap_len: Current length of radiotap buffer
5145  *
5146  * Return: Length of radiotap after AMPDU flags updated.
5147  */
5148 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5149 					struct mon_rx_status *rx_status,
5150 					uint8_t *rtap_buf,
5151 					uint32_t rtap_len)
5152 {
5153 	/*
5154 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5155 	 * First 32 bits of AMPDU represents the reference number
5156 	 */
5157 
5158 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
5159 	uint16_t ampdu_flags = 0;
5160 	uint16_t ampdu_reserved_flags = 0;
5161 
5162 	rtap_len = qdf_align(rtap_len, 4);
5163 
5164 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5165 	rtap_len += 4;
5166 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5167 	rtap_len += 2;
5168 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5169 	rtap_len += 2;
5170 
5171 	return rtap_len;
5172 }
5173 
5174 #ifdef DP_MON_RSSI_IN_DBM
5175 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5176 (rx_status->rssi_comb)
5177 #else
5178 #ifdef QCA_RSSI_DB2DBM
5179 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5180 (((rx_status)->rssi_dbm_conv_support) ? \
5181 ((rx_status)->rssi_comb + (rx_status)->min_nf_dbm +\
5182 (rx_status)->rssi_temp_offset) : \
5183 ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5184 #else
5185 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5186 (rx_status->rssi_comb + rx_status->chan_noise_floor)
5187 #endif
5188 #endif
5189 
5190 /**
5191  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5192  * @rx_status: Pointer to rx_status.
5193  * @rtap_buf: Buf to which tx info has to be updated.
5194  * @rtap_len: Current length of radiotap buffer
5195  *
5196  * Return: Length of radiotap after tx flags updated.
5197  */
5198 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5199 						struct mon_rx_status *rx_status,
5200 						uint8_t *rtap_buf,
5201 						uint32_t rtap_len)
5202 {
5203 	/*
5204 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
5205 	 */
5206 
5207 	uint16_t tx_flags = 0;
5208 
5209 	rtap_len = qdf_align(rtap_len, 2);
5210 
5211 	switch (rx_status->tx_status) {
5212 	case RADIOTAP_TX_STATUS_FAIL:
5213 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5214 		break;
5215 	case RADIOTAP_TX_STATUS_NOACK:
5216 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5217 		break;
5218 	}
5219 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5220 	rtap_len += 2;
5221 
5222 	return rtap_len;
5223 }
5224 
5225 /**
5226  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
5227  * @rx_status: Pointer to rx_status.
5228  * @nbuf:      nbuf pointer to which radiotap has to be updated
5229  * @headroom_sz: Available headroom size.
5230  *
5231  * Return: length of rtap_len updated.
5232  */
5233 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5234 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5235 {
5236 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5237 	struct ieee80211_radiotap_header *rthdr =
5238 		(struct ieee80211_radiotap_header *)rtap_buf;
5239 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5240 	uint32_t rtap_len = rtap_hdr_len;
5241 	uint8_t length = rtap_len;
5242 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5243 	struct qdf_radiotap_ext2 *rtap_ext2;
5244 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5245 
5246 	/* per user info */
5247 	qdf_le32_t *it_present;
5248 	uint32_t it_present_val;
5249 	bool radiotap_ext1_hdr_present = false;
5250 
5251 	it_present = &rthdr->it_present;
5252 
5253 	/* Adding Extended Header space */
5254 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5255 	    rx_status->usig_flags || rx_status->eht_flags) {
5256 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5257 		rtap_len = rtap_hdr_len;
5258 		radiotap_ext1_hdr_present = true;
5259 	}
5260 
5261 	length = rtap_len;
5262 
5263 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5264 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5265 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5266 	rtap_len += 8;
5267 
5268 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5269 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5270 
5271 	if (rx_status->rs_fcs_err)
5272 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5273 
5274 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5275 	rtap_len += 1;
5276 
5277 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5278 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5279 	    !rx_status->he_flags) {
5280 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5281 		rtap_buf[rtap_len] = rx_status->rate;
5282 	} else
5283 		rtap_buf[rtap_len] = 0;
5284 	rtap_len += 1;
5285 
5286 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5287 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5288 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5289 	rtap_len += 2;
5290 	/* Channel flags. */
5291 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5292 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5293 	else
5294 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5295 	if (rx_status->cck_flag)
5296 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5297 	if (rx_status->ofdm_flag)
5298 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5299 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5300 	rtap_len += 2;
5301 
5302 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5303 	 *					(dBm)
5304 	 */
5305 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5306 	/*
5307 	 * rssi_comb is int dB, need to convert it to dBm.
5308 	 * normalize value to noise floor of -96 dBm
5309 	 */
5310 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5311 	rtap_len += 1;
5312 
5313 	/* RX signal noise floor */
5314 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5315 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5316 	rtap_len += 1;
5317 
5318 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
5319 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5320 	rtap_buf[rtap_len] = rx_status->nr_ant;
5321 	rtap_len += 1;
5322 
5323 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5324 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5325 		return 0;
5326 	}
5327 
5328 	/* update tx flags for pkt capture*/
5329 	if (rx_status->add_rtap_ext) {
5330 		rthdr->it_present |=
5331 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5332 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5333 							     rtap_buf,
5334 							     rtap_len);
5335 
5336 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5337 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5338 			return 0;
5339 		}
5340 	}
5341 
5342 	if (rx_status->ht_flags) {
5343 		length = rtap_len;
5344 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5345 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5346 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5347 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5348 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
5349 		rtap_len += 1;
5350 
5351 		if (rx_status->sgi)
5352 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5353 		if (rx_status->bw)
5354 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5355 		else
5356 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5357 		rtap_len += 1;
5358 
5359 		rtap_buf[rtap_len] = rx_status->ht_mcs;
5360 		rtap_len += 1;
5361 
5362 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5363 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5364 			return 0;
5365 		}
5366 	}
5367 
5368 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5369 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5370 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5371 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5372 								rtap_buf,
5373 								rtap_len);
5374 	}
5375 
5376 	if (rx_status->vht_flags) {
5377 		length = rtap_len;
5378 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5379 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5380 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5381 								rtap_buf,
5382 								rtap_len);
5383 
5384 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5385 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5386 			return 0;
5387 		}
5388 	}
5389 
5390 	if (rx_status->he_flags) {
5391 		length = rtap_len;
5392 		/* IEEE80211_RADIOTAP_HE */
5393 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5394 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5395 								rtap_buf,
5396 								rtap_len);
5397 
5398 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5399 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5400 			return 0;
5401 		}
5402 	}
5403 
5404 	if (rx_status->he_mu_flags) {
5405 		length = rtap_len;
5406 		/* IEEE80211_RADIOTAP_HE-MU */
5407 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5408 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5409 								rtap_buf,
5410 								rtap_len);
5411 
5412 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5413 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5414 			return 0;
5415 		}
5416 	}
5417 
5418 	if (rx_status->he_mu_other_flags) {
5419 		length = rtap_len;
5420 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
5421 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5422 		rtap_len =
5423 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5424 								rtap_buf,
5425 								rtap_len);
5426 
5427 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5428 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5429 			return 0;
5430 		}
5431 	}
5432 
5433 	rtap_len = qdf_align(rtap_len, 2);
5434 	/*
5435 	 * Radiotap Vendor Namespace
5436 	 */
5437 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5438 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5439 					(rtap_buf + rtap_len);
5440 	/*
5441 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5442 	 */
5443 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5444 	/*
5445 	 * Name space selector = 0
5446 	 * We only will have one namespace for now
5447 	 */
5448 	radiotap_vendor_ns_ath->hdr.selector = 0;
5449 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5450 					sizeof(*radiotap_vendor_ns_ath) -
5451 					sizeof(radiotap_vendor_ns_ath->hdr));
5452 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5453 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5454 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5455 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
5456 				cpu_to_le32(rx_status->ppdu_timestamp);
5457 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
5458 
5459 	/* Move to next it_present */
5460 	if (radiotap_ext1_hdr_present) {
5461 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5462 		put_unaligned_le32(it_present_val, it_present);
5463 		it_present_val = 0;
5464 		it_present++;
5465 	}
5466 
5467 	/* Add Extension to Radiotap Header & corresponding data */
5468 	if (rx_status->add_rtap_ext) {
5469 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5470 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5471 
5472 		rtap_buf[rtap_len] = rx_status->tx_status;
5473 		rtap_len += 1;
5474 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5475 		rtap_len += 1;
5476 	}
5477 
5478 	/* Add Extension2 to Radiotap Header */
5479 	if (rx_status->add_rtap_ext2) {
5480 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5481 
5482 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5483 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
5484 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5485 		if (!rx_user_status) {
5486 			rtap_ext2->tid = rx_status->tid;
5487 			rtap_ext2->start_seq = rx_status->start_seq;
5488 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5489 				     rx_status->ba_bitmap,
5490 				     8 * (sizeof(uint32_t)));
5491 		} else {
5492 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5493 
5494 			/* set default bitmap sz if not set */
5495 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5496 			rtap_ext2->tid = rx_user_status->tid;
5497 			rtap_ext2->start_seq = rx_user_status->start_seq;
5498 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5499 				     rx_user_status->ba_bitmap,
5500 				     ba_bitmap_sz * (sizeof(uint32_t)));
5501 		}
5502 
5503 		rtap_len += sizeof(*rtap_ext2);
5504 	}
5505 
5506 	if (rx_status->usig_flags) {
5507 		length = rtap_len;
5508 		/* IEEE80211_RADIOTAP_USIG */
5509 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5510 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5511 							       rtap_buf,
5512 							       rtap_len);
5513 
5514 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5515 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5516 			return 0;
5517 		}
5518 	}
5519 
5520 	if (rx_status->eht_flags) {
5521 		length = rtap_len;
5522 		/* IEEE80211_RADIOTAP_EHT */
5523 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5524 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5525 							      rtap_buf,
5526 							      rtap_len);
5527 
5528 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5529 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5530 			return 0;
5531 		}
5532 	}
5533 
5534 	put_unaligned_le32(it_present_val, it_present);
5535 	rthdr->it_len = cpu_to_le16(rtap_len);
5536 
5537 	if (headroom_sz < rtap_len) {
5538 		qdf_debug("DEBUG: Not enough space to update radiotap");
5539 		return 0;
5540 	}
5541 
5542 	qdf_nbuf_push_head(nbuf, rtap_len);
5543 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5544 	return rtap_len;
5545 }
5546 #else
5547 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5548 					struct mon_rx_status *rx_status,
5549 					int8_t *rtap_buf,
5550 					uint32_t rtap_len)
5551 {
5552 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5553 	return 0;
5554 }
5555 
5556 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5557 				      int8_t *rtap_buf, uint32_t rtap_len)
5558 {
5559 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5560 	return 0;
5561 }
5562 
5563 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5564 					struct mon_rx_status *rx_status,
5565 					uint8_t *rtap_buf,
5566 					uint32_t rtap_len)
5567 {
5568 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5569 	return 0;
5570 }
5571 
5572 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5573 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5574 {
5575 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5576 	return 0;
5577 }
5578 #endif
5579 qdf_export_symbol(qdf_nbuf_update_radiotap);
5580 
5581 /**
5582  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
5583  * @cb_func_ptr: function pointer to the nbuf free callback
5584  *
5585  * This function registers a callback function for nbuf free.
5586  *
5587  * Return: none
5588  */
5589 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5590 {
5591 	nbuf_free_cb = cb_func_ptr;
5592 }
5593 
5594 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5595 
5596 /**
5597  * qdf_nbuf_classify_pkt() - classify packet
5598  * @skb - sk buff
5599  *
5600  * Return: none
5601  */
5602 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5603 {
5604 	struct ethhdr *eh = (struct ethhdr *)skb->data;
5605 
5606 	/* check destination mac address is broadcast/multicast */
5607 	if (is_broadcast_ether_addr((uint8_t *)eh))
5608 		QDF_NBUF_CB_SET_BCAST(skb);
5609 	else if (is_multicast_ether_addr((uint8_t *)eh))
5610 		QDF_NBUF_CB_SET_MCAST(skb);
5611 
5612 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5613 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5614 			QDF_NBUF_CB_PACKET_TYPE_ARP;
5615 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5616 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5617 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
5618 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5619 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5620 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5621 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5622 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5623 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
5624 }
5625 qdf_export_symbol(qdf_nbuf_classify_pkt);
5626 
5627 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5628 {
5629 	qdf_nbuf_users_set(&nbuf->users, 1);
5630 	nbuf->data = nbuf->head + NET_SKB_PAD;
5631 	skb_reset_tail_pointer(nbuf);
5632 }
5633 qdf_export_symbol(__qdf_nbuf_init);
5634 
5635 #ifdef WLAN_FEATURE_FASTPATH
5636 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5637 {
5638 	qdf_nbuf_users_set(&nbuf->users, 1);
5639 	nbuf->data = nbuf->head + NET_SKB_PAD;
5640 	skb_reset_tail_pointer(nbuf);
5641 }
5642 qdf_export_symbol(qdf_nbuf_init_fast);
5643 #endif /* WLAN_FEATURE_FASTPATH */
5644 
5645 
5646 #ifdef QDF_NBUF_GLOBAL_COUNT
5647 /**
5648  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
5649  *
5650  * Return void
5651  */
5652 void __qdf_nbuf_mod_init(void)
5653 {
5654 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5655 	qdf_atomic_init(&nbuf_count);
5656 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5657 }
5658 
5659 /**
5660  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
5661  *
5662  * Return void
5663  */
5664 void __qdf_nbuf_mod_exit(void)
5665 {
5666 }
5667 #endif
5668 
5669 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
5670 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5671 					    int offset)
5672 {
5673 	unsigned int frag_offset;
5674 	skb_frag_t *frag;
5675 
5676 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5677 		return QDF_STATUS_E_FAILURE;
5678 
5679 	frag = &skb_shinfo(nbuf)->frags[idx];
5680 	frag_offset = skb_frag_off(frag);
5681 
5682 	frag_offset += offset;
5683 	skb_frag_off_set(frag, frag_offset);
5684 
5685 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5686 
5687 	return QDF_STATUS_SUCCESS;
5688 }
5689 
5690 #else
5691 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5692 					    int offset)
5693 {
5694 	uint16_t frag_offset;
5695 	skb_frag_t *frag;
5696 
5697 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5698 		return QDF_STATUS_E_FAILURE;
5699 
5700 	frag = &skb_shinfo(nbuf)->frags[idx];
5701 	frag_offset = frag->page_offset;
5702 
5703 	frag_offset += offset;
5704 	frag->page_offset = frag_offset;
5705 
5706 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5707 
5708 	return QDF_STATUS_SUCCESS;
5709 }
5710 #endif
5711 
5712 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5713 
5714 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
5715 			    uint16_t idx,
5716 			    uint16_t truesize)
5717 {
5718 	struct page *page;
5719 	uint16_t frag_len;
5720 
5721 	page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
5722 
5723 	if (qdf_unlikely(!page))
5724 		return;
5725 
5726 	frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
5727 	put_page(page);
5728 	nbuf->len -= frag_len;
5729 	nbuf->data_len -= frag_len;
5730 	nbuf->truesize -= truesize;
5731 	skb_shinfo(nbuf)->nr_frags--;
5732 }
5733 
5734 qdf_export_symbol(__qdf_nbuf_remove_frag);
5735 
5736 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5737 			    int offset, int frag_len,
5738 			    unsigned int truesize, bool take_frag_ref)
5739 {
5740 	struct page *page;
5741 	int frag_offset;
5742 	uint8_t nr_frag;
5743 
5744 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5745 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5746 
5747 	page = virt_to_head_page(buf);
5748 	frag_offset = buf - page_address(page);
5749 
5750 	skb_add_rx_frag(nbuf, nr_frag, page,
5751 			(frag_offset + offset),
5752 			frag_len, truesize);
5753 
5754 	if (unlikely(take_frag_ref)) {
5755 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5756 		skb_frag_ref(nbuf, nr_frag);
5757 	}
5758 }
5759 
5760 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5761 
5762 void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
5763 {
5764 	struct page *page;
5765 	skb_frag_t frag = {0};
5766 
5767 	page = virt_to_head_page(buf);
5768 	__skb_frag_set_page(&frag, page);
5769 
5770 	/*
5771 	 * since __skb_frag_ref() just use page to increase ref
5772 	 * we just decode page alone
5773 	 */
5774 	qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5775 	__skb_frag_ref(&frag);
5776 }
5777 
5778 qdf_export_symbol(__qdf_nbuf_ref_frag);
5779 
5780 #ifdef NBUF_FRAG_MEMORY_DEBUG
5781 
5782 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5783 						int offset, const char *func,
5784 						uint32_t line)
5785 {
5786 	QDF_STATUS result;
5787 	qdf_frag_t p_fragp, n_fragp;
5788 
5789 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5790 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5791 
5792 	if (qdf_likely(is_initial_mem_debug_disabled))
5793 		return result;
5794 
5795 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5796 
5797 	/*
5798 	 * Update frag address in frag debug tracker
5799 	 * when frag offset is successfully changed in skb
5800 	 */
5801 	if (result == QDF_STATUS_SUCCESS)
5802 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
5803 
5804 	return result;
5805 }
5806 
5807 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
5808 
5809 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
5810 				int offset, int frag_len,
5811 				unsigned int truesize, bool take_frag_ref,
5812 				const char *func, uint32_t line)
5813 {
5814 	qdf_frag_t fragp;
5815 	uint32_t num_nr_frags;
5816 
5817 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
5818 			       frag_len, truesize, take_frag_ref);
5819 
5820 	if (qdf_likely(is_initial_mem_debug_disabled))
5821 		return;
5822 
5823 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
5824 
5825 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5826 
5827 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
5828 
5829 	/* Update frag address in frag debug tracking table */
5830 	if (fragp != buf)
5831 		qdf_frag_debug_update_addr(buf, fragp, func, line);
5832 
5833 	/* Update frag refcount in frag debug tracking table */
5834 	qdf_frag_debug_refcount_inc(fragp, func, line);
5835 }
5836 
5837 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
5838 
5839 /**
5840  * qdf_nbuf_ref_frag_debug() - get frag reference
5841  * @buf: Frag pointer needs to be taken reference.
5842  *
5843  * return: void
5844  */
5845 void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
5846 {
5847 	__qdf_nbuf_ref_frag(buf);
5848 
5849 	if (qdf_likely(is_initial_mem_debug_disabled))
5850 		return;
5851 
5852 	/* Update frag refcount in frag debug tracking table */
5853 	qdf_frag_debug_refcount_inc(buf, func, line);
5854 }
5855 
5856 qdf_export_symbol(qdf_nbuf_ref_frag_debug);
5857 
5858 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
5859 				    uint32_t line)
5860 {
5861 	uint32_t num_nr_frags;
5862 	uint32_t idx = 0;
5863 	qdf_nbuf_t ext_list;
5864 	qdf_frag_t p_frag;
5865 
5866 	if (qdf_likely(is_initial_mem_debug_disabled))
5867 		return;
5868 
5869 	if (qdf_unlikely(!buf))
5870 		return;
5871 
5872 	/* Take care to update the refcount in the debug entries for frags */
5873 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5874 
5875 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5876 
5877 	while (idx < num_nr_frags) {
5878 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5879 		if (qdf_likely(p_frag))
5880 			qdf_frag_debug_refcount_inc(p_frag, func, line);
5881 		idx++;
5882 	}
5883 
5884 	/**
5885 	 * Take care to update the refcount in the debug entries for the
5886 	 * frags attached to frag_list
5887 	 */
5888 	ext_list = qdf_nbuf_get_ext_list(buf);
5889 	while (ext_list) {
5890 		idx = 0;
5891 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5892 
5893 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5894 
5895 		while (idx < num_nr_frags) {
5896 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5897 			if (qdf_likely(p_frag))
5898 				qdf_frag_debug_refcount_inc(p_frag, func, line);
5899 			idx++;
5900 		}
5901 		ext_list = qdf_nbuf_queue_next(ext_list);
5902 	}
5903 }
5904 
5905 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
5906 
5907 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
5908 				    uint32_t line)
5909 {
5910 	uint32_t num_nr_frags;
5911 	qdf_nbuf_t ext_list;
5912 	uint32_t idx = 0;
5913 	qdf_frag_t p_frag;
5914 
5915 	if (qdf_likely(is_initial_mem_debug_disabled))
5916 		return;
5917 
5918 	if (qdf_unlikely(!buf))
5919 		return;
5920 
5921 	/**
5922 	 * Decrement refcount for frag debug nodes only when last user
5923 	 * of nbuf calls this API so as to avoid decrementing refcount
5924 	 * on every call expect the last one in case where nbuf has multiple
5925 	 * users
5926 	 */
5927 	if (qdf_nbuf_get_users(buf) > 1)
5928 		return;
5929 
5930 	/* Take care to update the refcount in the debug entries for frags */
5931 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5932 
5933 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5934 
5935 	while (idx < num_nr_frags) {
5936 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5937 		if (qdf_likely(p_frag))
5938 			qdf_frag_debug_refcount_dec(p_frag, func, line);
5939 		idx++;
5940 	}
5941 
5942 	/* Take care to update debug entries for frags attached to frag_list */
5943 	ext_list = qdf_nbuf_get_ext_list(buf);
5944 	while (ext_list) {
5945 		if (qdf_nbuf_get_users(ext_list) == 1) {
5946 			idx = 0;
5947 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5948 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5949 			while (idx < num_nr_frags) {
5950 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5951 				if (qdf_likely(p_frag))
5952 					qdf_frag_debug_refcount_dec(p_frag,
5953 								    func, line);
5954 				idx++;
5955 			}
5956 		}
5957 		ext_list = qdf_nbuf_queue_next(ext_list);
5958 	}
5959 }
5960 
5961 qdf_export_symbol(qdf_net_buf_debug_release_frag);
5962 
5963 /**
5964  * qdf_nbuf_remove_frag_debug - Remove frag from nbuf
5965  * @nbuf: nbuf  where frag will be removed
5966  * @idx: frag index
5967  * @truesize: truesize of frag
5968  * @func: Caller function name
5969  * @line:  Caller function line no.
5970  *
5971  * Return: QDF_STATUS
5972  *
5973  */
5974 QDF_STATUS
5975 qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
5976 			   uint16_t idx,
5977 			   uint16_t truesize,
5978 			   const char *func,
5979 			   uint32_t line)
5980 {
5981 	uint16_t num_frags;
5982 	qdf_frag_t frag;
5983 
5984 	if (qdf_unlikely(!nbuf))
5985 		return QDF_STATUS_E_INVAL;
5986 
5987 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
5988 	if (idx >= num_frags)
5989 		return QDF_STATUS_E_INVAL;
5990 
5991 	if (qdf_likely(is_initial_mem_debug_disabled)) {
5992 		__qdf_nbuf_remove_frag(nbuf, idx, truesize);
5993 		return QDF_STATUS_SUCCESS;
5994 	}
5995 
5996 	frag = qdf_nbuf_get_frag_addr(nbuf, idx);
5997 	if (qdf_likely(frag))
5998 		qdf_frag_debug_refcount_dec(frag, func, line);
5999 
6000 	__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6001 
6002 	return QDF_STATUS_SUCCESS;
6003 }
6004 
6005 qdf_export_symbol(qdf_nbuf_remove_frag_debug);
6006 
6007 #endif /* NBUF_FRAG_MEMORY_DEBUG */
6008 
6009 /**
6010  * qdf_get_nbuf_valid_frag() - Get nbuf to store frag
6011  * @nbuf: qdf_nbuf_t master nbuf
6012  *
6013  * Return: qdf_nbuf_t
6014  */
6015 qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
6016 {
6017 	qdf_nbuf_t last_nbuf;
6018 	uint32_t num_frags;
6019 
6020 	if (qdf_unlikely(!nbuf))
6021 		return NULL;
6022 
6023 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6024 
6025 	/* Check nbuf has enough memory to store frag memory */
6026 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6027 		return nbuf;
6028 
6029 	if (!__qdf_nbuf_has_fraglist(nbuf))
6030 		return NULL;
6031 
6032 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
6033 	if (qdf_unlikely(!last_nbuf))
6034 		return NULL;
6035 
6036 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
6037 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6038 		return last_nbuf;
6039 
6040 	return NULL;
6041 }
6042 
6043 qdf_export_symbol(qdf_get_nbuf_valid_frag);
6044 
6045 /**
6046  * qdf_nbuf_add_frag_debug() - Add frag to nbuf
6047  * @osdev: Device handle
6048  * @buf: Frag pointer needs to be added in nbuf frag
6049  * @nbuf: qdf_nbuf_t where frag will be added
6050  * @offset: Offset in frag to be added to nbuf_frags
6051  * @frag_len: Frag length
6052  * @truesize: truesize
6053  * @take_frag_ref: Whether to take ref for frag or not
6054  *      This bool must be set as per below comdition:
6055  *      1. False: If this frag is being added in any nbuf
6056  *              for the first time after allocation
6057  *      2. True: If frag is already attached part of any
6058  *              nbuf
6059  * @minsize: Minimum size to allocate
6060  * @func: Caller function name
6061  * @line: Caller function line no.
6062  *
6063  * if number of frag exceed maximum frag array. A new nbuf is allocated
6064  * with minimum headroom and frag it added to that nbuf.
6065  * new nbuf is added as frag_list to the master nbuf.
6066  *
6067  * Return: QDF_STATUS
6068  */
6069 QDF_STATUS
6070 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
6071 			qdf_nbuf_t nbuf, int offset,
6072 			int frag_len, unsigned int truesize,
6073 			bool take_frag_ref, unsigned int minsize,
6074 			const char *func, uint32_t line)
6075 {
6076 	qdf_nbuf_t cur_nbuf;
6077 	qdf_nbuf_t this_nbuf;
6078 
6079 	cur_nbuf = nbuf;
6080 	this_nbuf = nbuf;
6081 
6082 	if (qdf_unlikely(!frag_len || !buf)) {
6083 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
6084 			     func, line,
6085 			     buf, frag_len);
6086 		return QDF_STATUS_E_INVAL;
6087 	}
6088 
6089 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6090 
6091 	if (this_nbuf) {
6092 		cur_nbuf = this_nbuf;
6093 	} else {
6094 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
6095 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6096 		if (qdf_unlikely(!this_nbuf)) {
6097 			qdf_nofl_err("%s : %d no memory to allocate\n",
6098 				     func, line);
6099 			return QDF_STATUS_E_NOMEM;
6100 		}
6101 	}
6102 
6103 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6104 			     take_frag_ref);
6105 
6106 	if (this_nbuf != cur_nbuf) {
6107 		/* add new skb to frag list */
6108 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6109 					 qdf_nbuf_len(this_nbuf));
6110 	}
6111 
6112 	return QDF_STATUS_SUCCESS;
6113 }
6114 
6115 qdf_export_symbol(qdf_nbuf_add_frag_debug);
6116 
6117 #ifdef MEMORY_DEBUG
6118 void qdf_nbuf_acquire_track_lock(uint32_t index,
6119 				 unsigned long irq_flag)
6120 {
6121 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6122 			  irq_flag);
6123 }
6124 
6125 void qdf_nbuf_release_track_lock(uint32_t index,
6126 				 unsigned long irq_flag)
6127 {
6128 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6129 			       irq_flag);
6130 }
6131 
6132 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6133 {
6134 	return gp_qdf_net_buf_track_tbl[index];
6135 }
6136 #endif /* MEMORY_DEBUG */
6137 
6138 #ifdef ENHANCED_OS_ABSTRACTION
6139 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6140 {
6141 	__qdf_nbuf_set_timestamp(buf);
6142 }
6143 
6144 qdf_export_symbol(qdf_nbuf_set_timestamp);
6145 
6146 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6147 {
6148 	return __qdf_nbuf_get_timestamp(buf);
6149 }
6150 
6151 qdf_export_symbol(qdf_nbuf_get_timestamp);
6152 
6153 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6154 {
6155 	return __qdf_nbuf_get_timedelta_us(buf);
6156 }
6157 
6158 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6159 
6160 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6161 {
6162 	return __qdf_nbuf_get_timedelta_ms(buf);
6163 }
6164 
6165 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6166 
6167 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6168 {
6169 	return __qdf_nbuf_net_timedelta(t);
6170 }
6171 
6172 qdf_export_symbol(qdf_nbuf_net_timedelta);
6173 #endif
6174