xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define CHANNEL_FREQ_5150 5150
88 #define FREQ_MULTIPLIER_CONST_5MHZ 5
89 #define FREQ_MULTIPLIER_CONST_20MHZ 20
90 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
91 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
92 #define RADIOTAP_CCK_CHANNEL 0x0020
93 #define RADIOTAP_OFDM_CHANNEL 0x0040
94 
95 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
96 #include <qdf_mc_timer.h>
97 
98 struct qdf_track_timer {
99 	qdf_mc_timer_t track_timer;
100 	qdf_atomic_t alloc_fail_cnt;
101 };
102 
103 static struct qdf_track_timer alloc_track_timer;
104 
105 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
106 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
107 #endif
108 
109 /* Packet Counter */
110 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
111 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
112 #ifdef QDF_NBUF_GLOBAL_COUNT
113 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
114 static qdf_atomic_t nbuf_count;
115 #endif
116 
117 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
118 static bool is_initial_mem_debug_disabled;
119 #endif
120 
121 /**
122  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
123  *
124  * Return: none
125  */
126 void qdf_nbuf_tx_desc_count_display(void)
127 {
128 	qdf_debug("Current Snapshot of the Driver:");
129 	qdf_debug("Data Packets:");
130 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
132 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
141 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
146 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
147 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
148 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
149 	qdf_debug("Mgmt Packets:");
150 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
161 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
162 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
163 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
164 }
165 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
166 
167 /**
168  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
169  * @packet_type   : packet type either mgmt/data
170  * @current_state : layer at which the packet currently present
171  *
172  * Return: none
173  */
174 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
175 			uint8_t current_state)
176 {
177 	switch (packet_type) {
178 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
179 		nbuf_tx_mgmt[current_state]++;
180 		break;
181 	case QDF_NBUF_TX_PKT_DATA_TRACK:
182 		nbuf_tx_data[current_state]++;
183 		break;
184 	default:
185 		break;
186 	}
187 }
188 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
189 
190 /**
191  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
192  *
193  * Return: none
194  */
195 void qdf_nbuf_tx_desc_count_clear(void)
196 {
197 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
198 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
199 }
200 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
201 
202 /**
203  * qdf_nbuf_set_state() - Updates the packet state
204  * @nbuf:            network buffer
205  * @current_state :  layer at which the packet currently is
206  *
207  * This function updates the packet state to the layer at which the packet
208  * currently is
209  *
210  * Return: none
211  */
212 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
213 {
214 	/*
215 	 * Only Mgmt, Data Packets are tracked. WMI messages
216 	 * such as scan commands are not tracked
217 	 */
218 	uint8_t packet_type;
219 
220 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
221 
222 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
223 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
224 		return;
225 	}
226 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
227 	qdf_nbuf_tx_desc_count_update(packet_type,
228 					current_state);
229 }
230 qdf_export_symbol(qdf_nbuf_set_state);
231 
232 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
233 /**
234  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
235  *
236  * This function starts the alloc fail replenish timer.
237  *
238  * Return: void
239  */
240 static void __qdf_nbuf_start_replenish_timer(void)
241 {
242 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
243 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
244 	    QDF_TIMER_STATE_RUNNING)
245 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
246 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
247 }
248 
249 /**
250  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
251  *
252  * This function stops the alloc fail replenish timer.
253  *
254  * Return: void
255  */
256 static void __qdf_nbuf_stop_replenish_timer(void)
257 {
258 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
259 		return;
260 
261 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
262 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
263 	    QDF_TIMER_STATE_RUNNING)
264 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
265 }
266 
267 /**
268  * qdf_replenish_expire_handler - Replenish expire handler
269  *
270  * This function triggers when the alloc fail replenish timer expires.
271  *
272  * Return: void
273  */
274 static void qdf_replenish_expire_handler(void *arg)
275 {
276 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
277 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
278 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
279 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
280 
281 		/* Error handling here */
282 	}
283 }
284 
285 /**
286  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
287  *
288  * This function initializes the nbuf alloc fail replenish timer.
289  *
290  * Return: void
291  */
292 void __qdf_nbuf_init_replenish_timer(void)
293 {
294 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
295 			  qdf_replenish_expire_handler, NULL);
296 }
297 
298 /**
299  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
300  *
301  * This function deinitializes the nbuf alloc fail replenish timer.
302  *
303  * Return: void
304  */
305 void __qdf_nbuf_deinit_replenish_timer(void)
306 {
307 	__qdf_nbuf_stop_replenish_timer();
308 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
309 }
310 #else
311 
312 static inline void __qdf_nbuf_start_replenish_timer(void) {}
313 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
314 #endif
315 
316 /* globals do not need to be initialized to NULL/0 */
317 qdf_nbuf_trace_update_t qdf_trace_update_cb;
318 qdf_nbuf_free_t nbuf_free_cb;
319 
320 #ifdef QDF_NBUF_GLOBAL_COUNT
321 
322 /**
323  * __qdf_nbuf_count_get() - get nbuf global count
324  *
325  * Return: nbuf global count
326  */
327 int __qdf_nbuf_count_get(void)
328 {
329 	return qdf_atomic_read(&nbuf_count);
330 }
331 qdf_export_symbol(__qdf_nbuf_count_get);
332 
333 /**
334  * __qdf_nbuf_count_inc() - increment nbuf global count
335  *
336  * @buf: sk buff
337  *
338  * Return: void
339  */
340 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
341 {
342 	int num_nbuf = 1;
343 	qdf_nbuf_t ext_list;
344 
345 	if (qdf_likely(is_initial_mem_debug_disabled))
346 		return;
347 
348 	ext_list = qdf_nbuf_get_ext_list(nbuf);
349 
350 	/* Take care to account for frag_list */
351 	while (ext_list) {
352 		++num_nbuf;
353 		ext_list = qdf_nbuf_queue_next(ext_list);
354 	}
355 
356 	qdf_atomic_add(num_nbuf, &nbuf_count);
357 }
358 qdf_export_symbol(__qdf_nbuf_count_inc);
359 
360 /**
361  * __qdf_nbuf_count_dec() - decrement nbuf global count
362  *
363  * @buf: sk buff
364  *
365  * Return: void
366  */
367 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
368 {
369 	qdf_nbuf_t ext_list;
370 	int num_nbuf;
371 
372 	if (qdf_likely(is_initial_mem_debug_disabled))
373 		return;
374 
375 	if (qdf_nbuf_get_users(nbuf) > 1)
376 		return;
377 
378 	num_nbuf = 1;
379 
380 	/* Take care to account for frag_list */
381 	ext_list = qdf_nbuf_get_ext_list(nbuf);
382 	while (ext_list) {
383 		if (qdf_nbuf_get_users(ext_list) == 1)
384 			++num_nbuf;
385 		ext_list = qdf_nbuf_queue_next(ext_list);
386 	}
387 
388 	qdf_atomic_sub(num_nbuf, &nbuf_count);
389 }
390 qdf_export_symbol(__qdf_nbuf_count_dec);
391 #endif
392 
393 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
394 	!defined(QCA_WIFI_QCN9000)
395 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
396 				 int align, int prio, const char *func,
397 				 uint32_t line)
398 {
399 	struct sk_buff *skb;
400 	unsigned long offset;
401 	uint32_t lowmem_alloc_tries = 0;
402 
403 	if (align)
404 		size += (align - 1);
405 
406 realloc:
407 	skb = dev_alloc_skb(size);
408 
409 	if (skb)
410 		goto skb_alloc;
411 
412 	skb = pld_nbuf_pre_alloc(size);
413 
414 	if (!skb) {
415 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
416 				size, func, line);
417 		return NULL;
418 	}
419 
420 skb_alloc:
421 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
422 	 * Though we are trying to reserve low memory upfront to prevent this,
423 	 * we sometimes see SKBs allocated from low memory.
424 	 */
425 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
426 		lowmem_alloc_tries++;
427 		if (lowmem_alloc_tries > 100) {
428 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
429 				     size, func, line);
430 			return NULL;
431 		} else {
432 			/* Not freeing to make sure it
433 			 * will not get allocated again
434 			 */
435 			goto realloc;
436 		}
437 	}
438 	memset(skb->cb, 0x0, sizeof(skb->cb));
439 
440 	/*
441 	 * The default is for netbuf fragments to be interpreted
442 	 * as wordstreams rather than bytestreams.
443 	 */
444 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
445 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
446 
447 	/*
448 	 * XXX:how about we reserve first then align
449 	 * Align & make sure that the tail & data are adjusted properly
450 	 */
451 
452 	if (align) {
453 		offset = ((unsigned long)skb->data) % align;
454 		if (offset)
455 			skb_reserve(skb, align - offset);
456 	}
457 
458 	/*
459 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
460 	 * pointer
461 	 */
462 	skb_reserve(skb, reserve);
463 	qdf_nbuf_count_inc(skb);
464 
465 	return skb;
466 }
467 #else
468 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
469 				 int align, int prio, const char *func,
470 				 uint32_t line)
471 {
472 	struct sk_buff *skb;
473 	unsigned long offset;
474 	int flags = GFP_KERNEL;
475 
476 	if (align)
477 		size += (align - 1);
478 
479 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
480 		flags = GFP_ATOMIC;
481 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
482 		/*
483 		 * Observed that kcompactd burns out CPU to make order-3 page.
484 		 *__netdev_alloc_skb has 4k page fallback option just in case of
485 		 * failing high order page allocation so we don't need to be
486 		 * hard. Make kcompactd rest in piece.
487 		 */
488 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
489 #endif
490 	}
491 
492 	skb = __netdev_alloc_skb(NULL, size, flags);
493 
494 	if (skb)
495 		goto skb_alloc;
496 
497 	skb = pld_nbuf_pre_alloc(size);
498 
499 	if (!skb) {
500 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
501 				size, func, line);
502 		__qdf_nbuf_start_replenish_timer();
503 		return NULL;
504 	} else {
505 		__qdf_nbuf_stop_replenish_timer();
506 	}
507 
508 skb_alloc:
509 	memset(skb->cb, 0x0, sizeof(skb->cb));
510 
511 	/*
512 	 * The default is for netbuf fragments to be interpreted
513 	 * as wordstreams rather than bytestreams.
514 	 */
515 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
516 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
517 
518 	/*
519 	 * XXX:how about we reserve first then align
520 	 * Align & make sure that the tail & data are adjusted properly
521 	 */
522 
523 	if (align) {
524 		offset = ((unsigned long)skb->data) % align;
525 		if (offset)
526 			skb_reserve(skb, align - offset);
527 	}
528 
529 	/*
530 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
531 	 * pointer
532 	 */
533 	skb_reserve(skb, reserve);
534 	qdf_nbuf_count_inc(skb);
535 
536 	return skb;
537 }
538 #endif
539 qdf_export_symbol(__qdf_nbuf_alloc);
540 
541 /**
542  * __qdf_nbuf_free() - free the nbuf its interrupt safe
543  * @skb: Pointer to network buffer
544  *
545  * Return: none
546  */
547 
548 void __qdf_nbuf_free(struct sk_buff *skb)
549 {
550 	if (pld_nbuf_pre_alloc_free(skb))
551 		return;
552 
553 	qdf_nbuf_count_dec(skb);
554 	qdf_mem_skb_dec(skb->truesize);
555 	if (nbuf_free_cb)
556 		nbuf_free_cb(skb);
557 	else
558 		dev_kfree_skb_any(skb);
559 }
560 
561 qdf_export_symbol(__qdf_nbuf_free);
562 
563 #ifdef NBUF_MEMORY_DEBUG
564 enum qdf_nbuf_event_type {
565 	QDF_NBUF_ALLOC,
566 	QDF_NBUF_ALLOC_CLONE,
567 	QDF_NBUF_ALLOC_COPY,
568 	QDF_NBUF_ALLOC_FAILURE,
569 	QDF_NBUF_FREE,
570 	QDF_NBUF_MAP,
571 	QDF_NBUF_UNMAP,
572 	QDF_NBUF_ALLOC_COPY_EXPAND,
573 };
574 
575 struct qdf_nbuf_event {
576 	qdf_nbuf_t nbuf;
577 	char func[QDF_MEM_FUNC_NAME_SIZE];
578 	uint32_t line;
579 	enum qdf_nbuf_event_type type;
580 	uint64_t timestamp;
581 };
582 
583 #define QDF_NBUF_HISTORY_SIZE 4096
584 static qdf_atomic_t qdf_nbuf_history_index;
585 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
586 
587 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
588 {
589 	int32_t next = qdf_atomic_inc_return(index);
590 
591 	if (next == size)
592 		qdf_atomic_sub(size, index);
593 
594 	return next % size;
595 }
596 
597 static void
598 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
599 		     enum qdf_nbuf_event_type type)
600 {
601 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
602 						   QDF_NBUF_HISTORY_SIZE);
603 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
604 
605 	event->nbuf = nbuf;
606 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
607 	event->line = line;
608 	event->type = type;
609 	event->timestamp = qdf_get_log_timestamp();
610 }
611 #endif /* NBUF_MEMORY_DEBUG */
612 
613 #ifdef NBUF_MAP_UNMAP_DEBUG
614 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
615 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
616 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
617 
618 static void qdf_nbuf_map_tracking_init(void)
619 {
620 	qdf_tracker_init(&qdf_nbuf_map_tracker);
621 }
622 
623 static void qdf_nbuf_map_tracking_deinit(void)
624 {
625 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
626 }
627 
628 static QDF_STATUS
629 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
630 {
631 	QDF_STATUS status;
632 
633 	if (is_initial_mem_debug_disabled)
634 		return QDF_STATUS_SUCCESS;
635 
636 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
637 	if (QDF_IS_STATUS_ERROR(status))
638 		return status;
639 
640 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
641 
642 	return QDF_STATUS_SUCCESS;
643 }
644 
645 static void
646 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
647 {
648 	if (is_initial_mem_debug_disabled)
649 		return;
650 
651 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
652 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
653 }
654 
655 void qdf_nbuf_map_check_for_leaks(void)
656 {
657 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
658 }
659 
660 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
661 			      qdf_nbuf_t buf,
662 			      qdf_dma_dir_t dir,
663 			      const char *func,
664 			      uint32_t line)
665 {
666 	QDF_STATUS status;
667 
668 	status = qdf_nbuf_track_map(buf, func, line);
669 	if (QDF_IS_STATUS_ERROR(status))
670 		return status;
671 
672 	status = __qdf_nbuf_map(osdev, buf, dir);
673 	if (QDF_IS_STATUS_ERROR(status))
674 		qdf_nbuf_untrack_map(buf, func, line);
675 	else
676 		qdf_net_buf_debug_update_map_node(buf, func, line);
677 
678 	return status;
679 }
680 
681 qdf_export_symbol(qdf_nbuf_map_debug);
682 
683 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
684 			  qdf_nbuf_t buf,
685 			  qdf_dma_dir_t dir,
686 			  const char *func,
687 			  uint32_t line)
688 {
689 	qdf_nbuf_untrack_map(buf, func, line);
690 	__qdf_nbuf_unmap_single(osdev, buf, dir);
691 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
692 }
693 
694 qdf_export_symbol(qdf_nbuf_unmap_debug);
695 
696 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
697 				     qdf_nbuf_t buf,
698 				     qdf_dma_dir_t dir,
699 				     const char *func,
700 				     uint32_t line)
701 {
702 	QDF_STATUS status;
703 
704 	status = qdf_nbuf_track_map(buf, func, line);
705 	if (QDF_IS_STATUS_ERROR(status))
706 		return status;
707 
708 	status = __qdf_nbuf_map_single(osdev, buf, dir);
709 	if (QDF_IS_STATUS_ERROR(status))
710 		qdf_nbuf_untrack_map(buf, func, line);
711 	else
712 		qdf_net_buf_debug_update_map_node(buf, func, line);
713 
714 	return status;
715 }
716 
717 qdf_export_symbol(qdf_nbuf_map_single_debug);
718 
719 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
720 				 qdf_nbuf_t buf,
721 				 qdf_dma_dir_t dir,
722 				 const char *func,
723 				 uint32_t line)
724 {
725 	qdf_nbuf_untrack_map(buf, func, line);
726 	__qdf_nbuf_unmap_single(osdev, buf, dir);
727 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
728 }
729 
730 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
731 
732 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
733 				     qdf_nbuf_t buf,
734 				     qdf_dma_dir_t dir,
735 				     int nbytes,
736 				     const char *func,
737 				     uint32_t line)
738 {
739 	QDF_STATUS status;
740 
741 	status = qdf_nbuf_track_map(buf, func, line);
742 	if (QDF_IS_STATUS_ERROR(status))
743 		return status;
744 
745 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
746 	if (QDF_IS_STATUS_ERROR(status))
747 		qdf_nbuf_untrack_map(buf, func, line);
748 	else
749 		qdf_net_buf_debug_update_map_node(buf, func, line);
750 
751 	return status;
752 }
753 
754 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
755 
756 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
757 				 qdf_nbuf_t buf,
758 				 qdf_dma_dir_t dir,
759 				 int nbytes,
760 				 const char *func,
761 				 uint32_t line)
762 {
763 	qdf_nbuf_untrack_map(buf, func, line);
764 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
765 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
766 }
767 
768 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
769 
770 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
771 					    qdf_nbuf_t buf,
772 					    qdf_dma_dir_t dir,
773 					    int nbytes,
774 					    const char *func,
775 					    uint32_t line)
776 {
777 	QDF_STATUS status;
778 
779 	status = qdf_nbuf_track_map(buf, func, line);
780 	if (QDF_IS_STATUS_ERROR(status))
781 		return status;
782 
783 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
784 	if (QDF_IS_STATUS_ERROR(status))
785 		qdf_nbuf_untrack_map(buf, func, line);
786 	else
787 		qdf_net_buf_debug_update_map_node(buf, func, line);
788 
789 	return status;
790 }
791 
792 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
793 
794 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
795 					qdf_nbuf_t buf,
796 					qdf_dma_dir_t dir,
797 					int nbytes,
798 					const char *func,
799 					uint32_t line)
800 {
801 	qdf_nbuf_untrack_map(buf, func, line);
802 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
803 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
804 }
805 
806 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
807 
808 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
809 					     const char *func,
810 					     uint32_t line)
811 {
812 	char map_func[QDF_TRACKER_FUNC_SIZE];
813 	uint32_t map_line;
814 
815 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
816 				&map_func, &map_line))
817 		return;
818 
819 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
820 			   func, line, map_func, map_line);
821 }
822 #else
823 static inline void qdf_nbuf_map_tracking_init(void)
824 {
825 }
826 
827 static inline void qdf_nbuf_map_tracking_deinit(void)
828 {
829 }
830 
831 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
832 						    const char *func,
833 						    uint32_t line)
834 {
835 }
836 #endif /* NBUF_MAP_UNMAP_DEBUG */
837 
838 /**
839  * __qdf_nbuf_map() - map a buffer to local bus address space
840  * @osdev: OS device
841  * @bmap: Bitmap
842  * @skb: Pointer to network buffer
843  * @dir: Direction
844  *
845  * Return: QDF_STATUS
846  */
847 #ifdef QDF_OS_DEBUG
848 QDF_STATUS
849 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
850 {
851 	struct skb_shared_info *sh = skb_shinfo(skb);
852 
853 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
854 			|| (dir == QDF_DMA_FROM_DEVICE));
855 
856 	/*
857 	 * Assume there's only a single fragment.
858 	 * To support multiple fragments, it would be necessary to change
859 	 * qdf_nbuf_t to be a separate object that stores meta-info
860 	 * (including the bus address for each fragment) and a pointer
861 	 * to the underlying sk_buff.
862 	 */
863 	qdf_assert(sh->nr_frags == 0);
864 
865 	return __qdf_nbuf_map_single(osdev, skb, dir);
866 }
867 qdf_export_symbol(__qdf_nbuf_map);
868 
869 #else
870 QDF_STATUS
871 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
872 {
873 	return __qdf_nbuf_map_single(osdev, skb, dir);
874 }
875 qdf_export_symbol(__qdf_nbuf_map);
876 #endif
877 /**
878  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
879  * @osdev: OS device
880  * @skb: Pointer to network buffer
881  * @dir: dma direction
882  *
883  * Return: none
884  */
885 void
886 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
887 			qdf_dma_dir_t dir)
888 {
889 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
890 		   || (dir == QDF_DMA_FROM_DEVICE));
891 
892 	/*
893 	 * Assume there's a single fragment.
894 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
895 	 */
896 	__qdf_nbuf_unmap_single(osdev, skb, dir);
897 }
898 qdf_export_symbol(__qdf_nbuf_unmap);
899 
900 /**
901  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
902  * @osdev: OS device
903  * @skb: Pointer to network buffer
904  * @dir: Direction
905  *
906  * Return: QDF_STATUS
907  */
908 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
909 QDF_STATUS
910 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
911 {
912 	qdf_dma_addr_t paddr;
913 
914 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
915 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
916 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
917 	return QDF_STATUS_SUCCESS;
918 }
919 qdf_export_symbol(__qdf_nbuf_map_single);
920 #else
921 QDF_STATUS
922 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
923 {
924 	qdf_dma_addr_t paddr;
925 
926 	/* assume that the OS only provides a single fragment */
927 	QDF_NBUF_CB_PADDR(buf) = paddr =
928 		dma_map_single(osdev->dev, buf->data,
929 				skb_end_pointer(buf) - buf->data,
930 				__qdf_dma_dir_to_os(dir));
931 	return dma_mapping_error(osdev->dev, paddr)
932 		? QDF_STATUS_E_FAILURE
933 		: QDF_STATUS_SUCCESS;
934 }
935 qdf_export_symbol(__qdf_nbuf_map_single);
936 #endif
937 /**
938  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
939  * @osdev: OS device
940  * @skb: Pointer to network buffer
941  * @dir: Direction
942  *
943  * Return: none
944  */
945 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
946 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
947 				qdf_dma_dir_t dir)
948 {
949 }
950 #else
951 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
952 					qdf_dma_dir_t dir)
953 {
954 	if (QDF_NBUF_CB_PADDR(buf))
955 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
956 			skb_end_pointer(buf) - buf->data,
957 			__qdf_dma_dir_to_os(dir));
958 }
959 #endif
960 qdf_export_symbol(__qdf_nbuf_unmap_single);
961 
962 /**
963  * __qdf_nbuf_set_rx_cksum() - set rx checksum
964  * @skb: Pointer to network buffer
965  * @cksum: Pointer to checksum value
966  *
967  * Return: QDF_STATUS
968  */
969 QDF_STATUS
970 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
971 {
972 	switch (cksum->l4_result) {
973 	case QDF_NBUF_RX_CKSUM_NONE:
974 		skb->ip_summed = CHECKSUM_NONE;
975 		break;
976 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
977 		skb->ip_summed = CHECKSUM_UNNECESSARY;
978 		break;
979 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
980 		skb->ip_summed = CHECKSUM_PARTIAL;
981 		skb->csum = cksum->val;
982 		break;
983 	default:
984 		pr_err("Unknown checksum type\n");
985 		qdf_assert(0);
986 		return QDF_STATUS_E_NOSUPPORT;
987 	}
988 	return QDF_STATUS_SUCCESS;
989 }
990 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
991 
992 /**
993  * __qdf_nbuf_get_tx_cksum() - get tx checksum
994  * @skb: Pointer to network buffer
995  *
996  * Return: TX checksum value
997  */
998 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
999 {
1000 	switch (skb->ip_summed) {
1001 	case CHECKSUM_NONE:
1002 		return QDF_NBUF_TX_CKSUM_NONE;
1003 	case CHECKSUM_PARTIAL:
1004 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1005 	case CHECKSUM_COMPLETE:
1006 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1007 	default:
1008 		return QDF_NBUF_TX_CKSUM_NONE;
1009 	}
1010 }
1011 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1012 
1013 /**
1014  * __qdf_nbuf_get_tid() - get tid
1015  * @skb: Pointer to network buffer
1016  *
1017  * Return: tid
1018  */
1019 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1020 {
1021 	return skb->priority;
1022 }
1023 qdf_export_symbol(__qdf_nbuf_get_tid);
1024 
1025 /**
1026  * __qdf_nbuf_set_tid() - set tid
1027  * @skb: Pointer to network buffer
1028  *
1029  * Return: none
1030  */
1031 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1032 {
1033 	skb->priority = tid;
1034 }
1035 qdf_export_symbol(__qdf_nbuf_set_tid);
1036 
1037 /**
1038  * __qdf_nbuf_set_tid() - set tid
1039  * @skb: Pointer to network buffer
1040  *
1041  * Return: none
1042  */
1043 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1044 {
1045 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1046 }
1047 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1048 
1049 /**
1050  * __qdf_nbuf_reg_trace_cb() - register trace callback
1051  * @cb_func_ptr: Pointer to trace callback function
1052  *
1053  * Return: none
1054  */
1055 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1056 {
1057 	qdf_trace_update_cb = cb_func_ptr;
1058 }
1059 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1060 
1061 /**
1062  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1063  *              of DHCP packet.
1064  * @data: Pointer to DHCP packet data buffer
1065  *
1066  * This func. returns the subtype of DHCP packet.
1067  *
1068  * Return: subtype of the DHCP packet.
1069  */
1070 enum qdf_proto_subtype
1071 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1072 {
1073 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1074 
1075 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1076 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1077 					QDF_DHCP_OPTION53_LENGTH)) {
1078 
1079 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1080 		case QDF_DHCP_DISCOVER:
1081 			subtype = QDF_PROTO_DHCP_DISCOVER;
1082 			break;
1083 		case QDF_DHCP_REQUEST:
1084 			subtype = QDF_PROTO_DHCP_REQUEST;
1085 			break;
1086 		case QDF_DHCP_OFFER:
1087 			subtype = QDF_PROTO_DHCP_OFFER;
1088 			break;
1089 		case QDF_DHCP_ACK:
1090 			subtype = QDF_PROTO_DHCP_ACK;
1091 			break;
1092 		case QDF_DHCP_NAK:
1093 			subtype = QDF_PROTO_DHCP_NACK;
1094 			break;
1095 		case QDF_DHCP_RELEASE:
1096 			subtype = QDF_PROTO_DHCP_RELEASE;
1097 			break;
1098 		case QDF_DHCP_INFORM:
1099 			subtype = QDF_PROTO_DHCP_INFORM;
1100 			break;
1101 		case QDF_DHCP_DECLINE:
1102 			subtype = QDF_PROTO_DHCP_DECLINE;
1103 			break;
1104 		default:
1105 			break;
1106 		}
1107 	}
1108 
1109 	return subtype;
1110 }
1111 
1112 /**
1113  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1114  *            of EAPOL packet.
1115  * @data: Pointer to EAPOL packet data buffer
1116  *
1117  * This func. returns the subtype of EAPOL packet.
1118  *
1119  * Return: subtype of the EAPOL packet.
1120  */
1121 enum qdf_proto_subtype
1122 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1123 {
1124 	uint16_t eapol_key_info;
1125 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1126 	uint16_t mask;
1127 
1128 	eapol_key_info = (uint16_t)(*(uint16_t *)
1129 			(data + EAPOL_KEY_INFO_OFFSET));
1130 
1131 	mask = eapol_key_info & EAPOL_MASK;
1132 	switch (mask) {
1133 	case EAPOL_M1_BIT_MASK:
1134 		subtype = QDF_PROTO_EAPOL_M1;
1135 		break;
1136 	case EAPOL_M2_BIT_MASK:
1137 		subtype = QDF_PROTO_EAPOL_M2;
1138 		break;
1139 	case EAPOL_M3_BIT_MASK:
1140 		subtype = QDF_PROTO_EAPOL_M3;
1141 		break;
1142 	case EAPOL_M4_BIT_MASK:
1143 		subtype = QDF_PROTO_EAPOL_M4;
1144 		break;
1145 	default:
1146 		break;
1147 	}
1148 
1149 	return subtype;
1150 }
1151 
1152 /**
1153  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1154  *            of ARP packet.
1155  * @data: Pointer to ARP packet data buffer
1156  *
1157  * This func. returns the subtype of ARP packet.
1158  *
1159  * Return: subtype of the ARP packet.
1160  */
1161 enum qdf_proto_subtype
1162 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1163 {
1164 	uint16_t subtype;
1165 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1166 
1167 	subtype = (uint16_t)(*(uint16_t *)
1168 			(data + ARP_SUB_TYPE_OFFSET));
1169 
1170 	switch (QDF_SWAP_U16(subtype)) {
1171 	case ARP_REQUEST:
1172 		proto_subtype = QDF_PROTO_ARP_REQ;
1173 		break;
1174 	case ARP_RESPONSE:
1175 		proto_subtype = QDF_PROTO_ARP_RES;
1176 		break;
1177 	default:
1178 		break;
1179 	}
1180 
1181 	return proto_subtype;
1182 }
1183 
1184 /**
1185  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1186  *            of IPV4 ICMP packet.
1187  * @data: Pointer to IPV4 ICMP packet data buffer
1188  *
1189  * This func. returns the subtype of ICMP packet.
1190  *
1191  * Return: subtype of the ICMP packet.
1192  */
1193 enum qdf_proto_subtype
1194 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1195 {
1196 	uint8_t subtype;
1197 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1198 
1199 	subtype = (uint8_t)(*(uint8_t *)
1200 			(data + ICMP_SUBTYPE_OFFSET));
1201 
1202 	switch (subtype) {
1203 	case ICMP_REQUEST:
1204 		proto_subtype = QDF_PROTO_ICMP_REQ;
1205 		break;
1206 	case ICMP_RESPONSE:
1207 		proto_subtype = QDF_PROTO_ICMP_RES;
1208 		break;
1209 	default:
1210 		break;
1211 	}
1212 
1213 	return proto_subtype;
1214 }
1215 
1216 /**
1217  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1218  *            of IPV6 ICMPV6 packet.
1219  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1220  *
1221  * This func. returns the subtype of ICMPV6 packet.
1222  *
1223  * Return: subtype of the ICMPV6 packet.
1224  */
1225 enum qdf_proto_subtype
1226 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1227 {
1228 	uint8_t subtype;
1229 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1230 
1231 	subtype = (uint8_t)(*(uint8_t *)
1232 			(data + ICMPV6_SUBTYPE_OFFSET));
1233 
1234 	switch (subtype) {
1235 	case ICMPV6_REQUEST:
1236 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1237 		break;
1238 	case ICMPV6_RESPONSE:
1239 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1240 		break;
1241 	case ICMPV6_RS:
1242 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1243 		break;
1244 	case ICMPV6_RA:
1245 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1246 		break;
1247 	case ICMPV6_NS:
1248 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1249 		break;
1250 	case ICMPV6_NA:
1251 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1252 		break;
1253 	default:
1254 		break;
1255 	}
1256 
1257 	return proto_subtype;
1258 }
1259 
1260 /**
1261  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1262  *            of IPV4 packet.
1263  * @data: Pointer to IPV4 packet data buffer
1264  *
1265  * This func. returns the proto type of IPV4 packet.
1266  *
1267  * Return: proto type of IPV4 packet.
1268  */
1269 uint8_t
1270 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1271 {
1272 	uint8_t proto_type;
1273 
1274 	proto_type = (uint8_t)(*(uint8_t *)(data +
1275 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1276 	return proto_type;
1277 }
1278 
1279 /**
1280  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1281  *            of IPV6 packet.
1282  * @data: Pointer to IPV6 packet data buffer
1283  *
1284  * This func. returns the proto type of IPV6 packet.
1285  *
1286  * Return: proto type of IPV6 packet.
1287  */
1288 uint8_t
1289 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1290 {
1291 	uint8_t proto_type;
1292 
1293 	proto_type = (uint8_t)(*(uint8_t *)(data +
1294 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1295 	return proto_type;
1296 }
1297 
1298 /**
1299  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1300  * @data: Pointer to network data
1301  *
1302  * This api is for Tx packets.
1303  *
1304  * Return: true if packet is ipv4 packet
1305  *	   false otherwise
1306  */
1307 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1308 {
1309 	uint16_t ether_type;
1310 
1311 	ether_type = (uint16_t)(*(uint16_t *)(data +
1312 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1313 
1314 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1315 		return true;
1316 	else
1317 		return false;
1318 }
1319 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1320 
1321 /**
1322  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1323  * @data: Pointer to network data buffer
1324  *
1325  * This api is for ipv4 packet.
1326  *
1327  * Return: true if packet is DHCP packet
1328  *	   false otherwise
1329  */
1330 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1331 {
1332 	uint16_t sport;
1333 	uint16_t dport;
1334 
1335 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1336 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1337 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1338 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1339 					 sizeof(uint16_t)));
1340 
1341 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1342 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1343 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1344 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1345 		return true;
1346 	else
1347 		return false;
1348 }
1349 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1350 
1351 /**
1352  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1353  * @data: Pointer to network data buffer
1354  *
1355  * This api is for ipv4 packet.
1356  *
1357  * Return: true if packet is EAPOL packet
1358  *	   false otherwise.
1359  */
1360 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1361 {
1362 	uint16_t ether_type;
1363 
1364 	ether_type = (uint16_t)(*(uint16_t *)(data +
1365 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1366 
1367 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1368 		return true;
1369 	else
1370 		return false;
1371 }
1372 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1373 
1374 /**
1375  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1376  * @skb: Pointer to network buffer
1377  *
1378  * This api is for ipv4 packet.
1379  *
1380  * Return: true if packet is WAPI packet
1381  *	   false otherwise.
1382  */
1383 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1384 {
1385 	uint16_t ether_type;
1386 
1387 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1388 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1389 
1390 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1391 		return true;
1392 	else
1393 		return false;
1394 }
1395 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1396 
1397 /**
1398  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1399  * @skb: Pointer to network buffer
1400  *
1401  * This api is for ipv4 packet.
1402  *
1403  * Return: true if packet is tdls packet
1404  *	   false otherwise.
1405  */
1406 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1407 {
1408 	uint16_t ether_type;
1409 
1410 	ether_type = *(uint16_t *)(skb->data +
1411 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1412 
1413 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1414 		return true;
1415 	else
1416 		return false;
1417 }
1418 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1419 
1420 /**
1421  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1422  * @data: Pointer to network data buffer
1423  *
1424  * This api is for ipv4 packet.
1425  *
1426  * Return: true if packet is ARP packet
1427  *	   false otherwise.
1428  */
1429 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1430 {
1431 	uint16_t ether_type;
1432 
1433 	ether_type = (uint16_t)(*(uint16_t *)(data +
1434 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1435 
1436 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1437 		return true;
1438 	else
1439 		return false;
1440 }
1441 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1442 
1443 /**
1444  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1445  * @data: Pointer to network data buffer
1446  *
1447  * This api is for ipv4 packet.
1448  *
1449  * Return: true if packet is ARP request
1450  *	   false otherwise.
1451  */
1452 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1453 {
1454 	uint16_t op_code;
1455 
1456 	op_code = (uint16_t)(*(uint16_t *)(data +
1457 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1458 
1459 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1460 		return true;
1461 	return false;
1462 }
1463 
1464 /**
1465  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1466  * @data: Pointer to network data buffer
1467  *
1468  * This api is for ipv4 packet.
1469  *
1470  * Return: true if packet is ARP response
1471  *	   false otherwise.
1472  */
1473 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1474 {
1475 	uint16_t op_code;
1476 
1477 	op_code = (uint16_t)(*(uint16_t *)(data +
1478 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1479 
1480 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1481 		return true;
1482 	return false;
1483 }
1484 
1485 /**
1486  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1487  * @data: Pointer to network data buffer
1488  *
1489  * This api is for ipv4 packet.
1490  *
1491  * Return: ARP packet source IP value.
1492  */
1493 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1494 {
1495 	uint32_t src_ip;
1496 
1497 	src_ip = (uint32_t)(*(uint32_t *)(data +
1498 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1499 
1500 	return src_ip;
1501 }
1502 
1503 /**
1504  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1505  * @data: Pointer to network data buffer
1506  *
1507  * This api is for ipv4 packet.
1508  *
1509  * Return: ARP packet target IP value.
1510  */
1511 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1512 {
1513 	uint32_t tgt_ip;
1514 
1515 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1516 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1517 
1518 	return tgt_ip;
1519 }
1520 
1521 /**
1522  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1523  * @data: Pointer to network data buffer
1524  * @len: length to copy
1525  *
1526  * This api is for dns domain name
1527  *
1528  * Return: dns domain name.
1529  */
1530 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1531 {
1532 	uint8_t *domain_name;
1533 
1534 	domain_name = (uint8_t *)
1535 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1536 	return domain_name;
1537 }
1538 
1539 
1540 /**
1541  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1542  * @data: Pointer to network data buffer
1543  *
1544  * This api is for dns query packet.
1545  *
1546  * Return: true if packet is dns query packet.
1547  *	   false otherwise.
1548  */
1549 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1550 {
1551 	uint16_t op_code;
1552 	uint16_t tgt_port;
1553 
1554 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1555 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1556 	/* Standard DNS query always happen on Dest Port 53. */
1557 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1558 		op_code = (uint16_t)(*(uint16_t *)(data +
1559 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1560 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1561 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1562 			return true;
1563 	}
1564 	return false;
1565 }
1566 
1567 /**
1568  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1569  * @data: Pointer to network data buffer
1570  *
1571  * This api is for dns query response.
1572  *
1573  * Return: true if packet is dns response packet.
1574  *	   false otherwise.
1575  */
1576 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1577 {
1578 	uint16_t op_code;
1579 	uint16_t src_port;
1580 
1581 	src_port = (uint16_t)(*(uint16_t *)(data +
1582 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1583 	/* Standard DNS response always comes on Src Port 53. */
1584 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1585 		op_code = (uint16_t)(*(uint16_t *)(data +
1586 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1587 
1588 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1589 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1590 			return true;
1591 	}
1592 	return false;
1593 }
1594 
1595 /**
1596  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1597  * @data: Pointer to network data buffer
1598  *
1599  * This api is for tcp syn packet.
1600  *
1601  * Return: true if packet is tcp syn packet.
1602  *	   false otherwise.
1603  */
1604 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1605 {
1606 	uint8_t op_code;
1607 
1608 	op_code = (uint8_t)(*(uint8_t *)(data +
1609 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1610 
1611 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1612 		return true;
1613 	return false;
1614 }
1615 
1616 /**
1617  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1618  * @data: Pointer to network data buffer
1619  *
1620  * This api is for tcp syn ack packet.
1621  *
1622  * Return: true if packet is tcp syn ack packet.
1623  *	   false otherwise.
1624  */
1625 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1626 {
1627 	uint8_t op_code;
1628 
1629 	op_code = (uint8_t)(*(uint8_t *)(data +
1630 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1631 
1632 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1633 		return true;
1634 	return false;
1635 }
1636 
1637 /**
1638  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1639  * @data: Pointer to network data buffer
1640  *
1641  * This api is for tcp ack packet.
1642  *
1643  * Return: true if packet is tcp ack packet.
1644  *	   false otherwise.
1645  */
1646 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1647 {
1648 	uint8_t op_code;
1649 
1650 	op_code = (uint8_t)(*(uint8_t *)(data +
1651 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1652 
1653 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1654 		return true;
1655 	return false;
1656 }
1657 
1658 /**
1659  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1660  * @data: Pointer to network data buffer
1661  *
1662  * This api is for tcp packet.
1663  *
1664  * Return: tcp source port value.
1665  */
1666 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1667 {
1668 	uint16_t src_port;
1669 
1670 	src_port = (uint16_t)(*(uint16_t *)(data +
1671 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1672 
1673 	return src_port;
1674 }
1675 
1676 /**
1677  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1678  * @data: Pointer to network data buffer
1679  *
1680  * This api is for tcp packet.
1681  *
1682  * Return: tcp destination port value.
1683  */
1684 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1685 {
1686 	uint16_t tgt_port;
1687 
1688 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1689 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1690 
1691 	return tgt_port;
1692 }
1693 
1694 /**
1695  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1696  * @data: Pointer to network data buffer
1697  *
1698  * This api is for ipv4 req packet.
1699  *
1700  * Return: true if packet is icmpv4 request
1701  *	   false otherwise.
1702  */
1703 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1704 {
1705 	uint8_t op_code;
1706 
1707 	op_code = (uint8_t)(*(uint8_t *)(data +
1708 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1709 
1710 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1711 		return true;
1712 	return false;
1713 }
1714 
1715 /**
1716  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1717  * @data: Pointer to network data buffer
1718  *
1719  * This api is for ipv4 res packet.
1720  *
1721  * Return: true if packet is icmpv4 response
1722  *	   false otherwise.
1723  */
1724 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1725 {
1726 	uint8_t op_code;
1727 
1728 	op_code = (uint8_t)(*(uint8_t *)(data +
1729 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1730 
1731 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1732 		return true;
1733 	return false;
1734 }
1735 
1736 /**
1737  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1738  * @data: Pointer to network data buffer
1739  *
1740  * This api is for ipv4 packet.
1741  *
1742  * Return: icmpv4 packet source IP value.
1743  */
1744 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1745 {
1746 	uint32_t src_ip;
1747 
1748 	src_ip = (uint32_t)(*(uint32_t *)(data +
1749 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1750 
1751 	return src_ip;
1752 }
1753 
1754 /**
1755  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1756  * @data: Pointer to network data buffer
1757  *
1758  * This api is for ipv4 packet.
1759  *
1760  * Return: icmpv4 packet target IP value.
1761  */
1762 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1763 {
1764 	uint32_t tgt_ip;
1765 
1766 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1767 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1768 
1769 	return tgt_ip;
1770 }
1771 
1772 
1773 /**
1774  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1775  * @data: Pointer to IPV6 packet data buffer
1776  *
1777  * This func. checks whether it is a IPV6 packet or not.
1778  *
1779  * Return: TRUE if it is a IPV6 packet
1780  *         FALSE if not
1781  */
1782 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1783 {
1784 	uint16_t ether_type;
1785 
1786 	ether_type = (uint16_t)(*(uint16_t *)(data +
1787 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1788 
1789 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1790 		return true;
1791 	else
1792 		return false;
1793 }
1794 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1795 
1796 /**
1797  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1798  * @data: Pointer to network data buffer
1799  *
1800  * This api is for ipv6 packet.
1801  *
1802  * Return: true if packet is DHCP packet
1803  *	   false otherwise
1804  */
1805 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1806 {
1807 	uint16_t sport;
1808 	uint16_t dport;
1809 
1810 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1811 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1812 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1813 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1814 					sizeof(uint16_t));
1815 
1816 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1817 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1818 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1819 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1820 		return true;
1821 	else
1822 		return false;
1823 }
1824 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1825 
1826 /**
1827  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
1828  * @data: Pointer to network data buffer
1829  *
1830  * This api is for ipv6 packet.
1831  *
1832  * Return: true if packet is MDNS packet
1833  *	   false otherwise
1834  */
1835 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
1836 {
1837 	uint16_t sport;
1838 	uint16_t dport;
1839 
1840 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1841 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1842 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1843 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1844 					sizeof(uint16_t));
1845 
1846 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
1847 	    dport == sport)
1848 		return true;
1849 	else
1850 		return false;
1851 }
1852 
1853 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
1854 
1855 /**
1856  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1857  * @data: Pointer to IPV4 packet data buffer
1858  *
1859  * This func. checks whether it is a IPV4 multicast packet or not.
1860  *
1861  * Return: TRUE if it is a IPV4 multicast packet
1862  *         FALSE if not
1863  */
1864 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1865 {
1866 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1867 		uint32_t *dst_addr =
1868 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1869 
1870 		/*
1871 		 * Check first word of the IPV4 address and if it is
1872 		 * equal to 0xE then it represents multicast IP.
1873 		 */
1874 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1875 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1876 			return true;
1877 		else
1878 			return false;
1879 	} else
1880 		return false;
1881 }
1882 
1883 /**
1884  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1885  * @data: Pointer to IPV6 packet data buffer
1886  *
1887  * This func. checks whether it is a IPV6 multicast packet or not.
1888  *
1889  * Return: TRUE if it is a IPV6 multicast packet
1890  *         FALSE if not
1891  */
1892 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1893 {
1894 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1895 		uint16_t *dst_addr;
1896 
1897 		dst_addr = (uint16_t *)
1898 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1899 
1900 		/*
1901 		 * Check first byte of the IP address and if it
1902 		 * 0xFF00 then it is a IPV6 mcast packet.
1903 		 */
1904 		if (*dst_addr ==
1905 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1906 			return true;
1907 		else
1908 			return false;
1909 	} else
1910 		return false;
1911 }
1912 
1913 /**
1914  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1915  * @data: Pointer to IPV4 ICMP packet data buffer
1916  *
1917  * This func. checks whether it is a ICMP packet or not.
1918  *
1919  * Return: TRUE if it is a ICMP packet
1920  *         FALSE if not
1921  */
1922 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1923 {
1924 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1925 		uint8_t pkt_type;
1926 
1927 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1928 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1929 
1930 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1931 			return true;
1932 		else
1933 			return false;
1934 	} else
1935 		return false;
1936 }
1937 
1938 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
1939 
1940 /**
1941  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1942  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1943  *
1944  * This func. checks whether it is a ICMPV6 packet or not.
1945  *
1946  * Return: TRUE if it is a ICMPV6 packet
1947  *         FALSE if not
1948  */
1949 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1950 {
1951 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1952 		uint8_t pkt_type;
1953 
1954 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1955 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1956 
1957 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1958 			return true;
1959 		else
1960 			return false;
1961 	} else
1962 		return false;
1963 }
1964 
1965 /**
1966  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1967  * @data: Pointer to IPV4 UDP packet data buffer
1968  *
1969  * This func. checks whether it is a IPV4 UDP packet or not.
1970  *
1971  * Return: TRUE if it is a IPV4 UDP packet
1972  *         FALSE if not
1973  */
1974 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1975 {
1976 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1977 		uint8_t pkt_type;
1978 
1979 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1980 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1981 
1982 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1983 			return true;
1984 		else
1985 			return false;
1986 	} else
1987 		return false;
1988 }
1989 
1990 /**
1991  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1992  * @data: Pointer to IPV4 TCP packet data buffer
1993  *
1994  * This func. checks whether it is a IPV4 TCP packet or not.
1995  *
1996  * Return: TRUE if it is a IPV4 TCP packet
1997  *         FALSE if not
1998  */
1999 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2000 {
2001 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2002 		uint8_t pkt_type;
2003 
2004 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2005 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2006 
2007 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2008 			return true;
2009 		else
2010 			return false;
2011 	} else
2012 		return false;
2013 }
2014 
2015 /**
2016  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2017  * @data: Pointer to IPV6 UDP packet data buffer
2018  *
2019  * This func. checks whether it is a IPV6 UDP packet or not.
2020  *
2021  * Return: TRUE if it is a IPV6 UDP packet
2022  *         FALSE if not
2023  */
2024 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2025 {
2026 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2027 		uint8_t pkt_type;
2028 
2029 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2030 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2031 
2032 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2033 			return true;
2034 		else
2035 			return false;
2036 	} else
2037 		return false;
2038 }
2039 
2040 /**
2041  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2042  * @data: Pointer to IPV6 TCP packet data buffer
2043  *
2044  * This func. checks whether it is a IPV6 TCP packet or not.
2045  *
2046  * Return: TRUE if it is a IPV6 TCP packet
2047  *         FALSE if not
2048  */
2049 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2050 {
2051 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2052 		uint8_t pkt_type;
2053 
2054 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2055 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2056 
2057 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2058 			return true;
2059 		else
2060 			return false;
2061 	} else
2062 		return false;
2063 }
2064 
2065 /**
2066  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2067  * @nbuf - sk buff
2068  *
2069  * Return: true if packet is broadcast
2070  *	   false otherwise
2071  */
2072 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2073 {
2074 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2075 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2076 }
2077 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2078 
2079 #ifdef NBUF_MEMORY_DEBUG
2080 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2081 
2082 /**
2083  * struct qdf_nbuf_track_t - Network buffer track structure
2084  *
2085  * @p_next: Pointer to next
2086  * @net_buf: Pointer to network buffer
2087  * @func_name: Function name
2088  * @line_num: Line number
2089  * @size: Size
2090  * @map_func_name: nbuf mapping function name
2091  * @map_line_num: mapping function line number
2092  * @unmap_func_name: nbuf unmapping function name
2093  * @unmap_line_num: mapping function line number
2094  * @is_nbuf_mapped: indicate mapped/unmapped nbuf
2095  */
2096 struct qdf_nbuf_track_t {
2097 	struct qdf_nbuf_track_t *p_next;
2098 	qdf_nbuf_t net_buf;
2099 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2100 	uint32_t line_num;
2101 	size_t size;
2102 	char map_func_name[QDF_MEM_FUNC_NAME_SIZE];
2103 	uint32_t map_line_num;
2104 	char unmap_func_name[QDF_MEM_FUNC_NAME_SIZE];
2105 	uint32_t unmap_line_num;
2106 	bool is_nbuf_mapped;
2107 };
2108 
2109 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2110 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2111 
2112 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2113 static struct kmem_cache *nbuf_tracking_cache;
2114 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2115 static spinlock_t qdf_net_buf_track_free_list_lock;
2116 static uint32_t qdf_net_buf_track_free_list_count;
2117 static uint32_t qdf_net_buf_track_used_list_count;
2118 static uint32_t qdf_net_buf_track_max_used;
2119 static uint32_t qdf_net_buf_track_max_free;
2120 static uint32_t qdf_net_buf_track_max_allocated;
2121 
2122 /**
2123  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2124  *
2125  * tracks the max number of network buffers that the wlan driver was tracking
2126  * at any one time.
2127  *
2128  * Return: none
2129  */
2130 static inline void update_max_used(void)
2131 {
2132 	int sum;
2133 
2134 	if (qdf_net_buf_track_max_used <
2135 	    qdf_net_buf_track_used_list_count)
2136 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2137 	sum = qdf_net_buf_track_free_list_count +
2138 		qdf_net_buf_track_used_list_count;
2139 	if (qdf_net_buf_track_max_allocated < sum)
2140 		qdf_net_buf_track_max_allocated = sum;
2141 }
2142 
2143 /**
2144  * update_max_free() - update qdf_net_buf_track_free_list_count
2145  *
2146  * tracks the max number tracking buffers kept in the freelist.
2147  *
2148  * Return: none
2149  */
2150 static inline void update_max_free(void)
2151 {
2152 	if (qdf_net_buf_track_max_free <
2153 	    qdf_net_buf_track_free_list_count)
2154 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2155 }
2156 
2157 /**
2158  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2159  *
2160  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2161  * This function also ads fexibility to adjust the allocation and freelist
2162  * scheems.
2163  *
2164  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2165  */
2166 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2167 {
2168 	int flags = GFP_KERNEL;
2169 	unsigned long irq_flag;
2170 	QDF_NBUF_TRACK *new_node = NULL;
2171 
2172 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2173 	qdf_net_buf_track_used_list_count++;
2174 	if (qdf_net_buf_track_free_list) {
2175 		new_node = qdf_net_buf_track_free_list;
2176 		qdf_net_buf_track_free_list =
2177 			qdf_net_buf_track_free_list->p_next;
2178 		qdf_net_buf_track_free_list_count--;
2179 	}
2180 	update_max_used();
2181 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2182 
2183 	if (new_node)
2184 		return new_node;
2185 
2186 	if (in_interrupt() || irqs_disabled() || in_atomic())
2187 		flags = GFP_ATOMIC;
2188 
2189 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2190 }
2191 
2192 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2193 #define FREEQ_POOLSIZE 2048
2194 
2195 /**
2196  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2197  *
2198  * Matches calls to qdf_nbuf_track_alloc.
2199  * Either frees the tracking cookie to kernel or an internal
2200  * freelist based on the size of the freelist.
2201  *
2202  * Return: none
2203  */
2204 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2205 {
2206 	unsigned long irq_flag;
2207 
2208 	if (!node)
2209 		return;
2210 
2211 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2212 	 * only shrink the freelist if it is bigger than twice the number of
2213 	 * nbufs in use. If the driver is stalling in a consistent bursty
2214 	 * fasion, this will keep 3/4 of thee allocations from the free list
2215 	 * while also allowing the system to recover memory as less frantic
2216 	 * traffic occurs.
2217 	 */
2218 
2219 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2220 
2221 	qdf_net_buf_track_used_list_count--;
2222 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2223 	   (qdf_net_buf_track_free_list_count >
2224 	    qdf_net_buf_track_used_list_count << 1)) {
2225 		kmem_cache_free(nbuf_tracking_cache, node);
2226 	} else {
2227 		node->p_next = qdf_net_buf_track_free_list;
2228 		qdf_net_buf_track_free_list = node;
2229 		qdf_net_buf_track_free_list_count++;
2230 	}
2231 	update_max_free();
2232 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2233 }
2234 
2235 /**
2236  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2237  *
2238  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2239  * the freelist first makes it performant for the first iperf udp burst
2240  * as well as steady state.
2241  *
2242  * Return: None
2243  */
2244 static void qdf_nbuf_track_prefill(void)
2245 {
2246 	int i;
2247 	QDF_NBUF_TRACK *node, *head;
2248 
2249 	/* prepopulate the freelist */
2250 	head = NULL;
2251 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2252 		node = qdf_nbuf_track_alloc();
2253 		if (!node)
2254 			continue;
2255 		node->p_next = head;
2256 		head = node;
2257 	}
2258 	while (head) {
2259 		node = head->p_next;
2260 		qdf_nbuf_track_free(head);
2261 		head = node;
2262 	}
2263 
2264 	/* prefilled buffers should not count as used */
2265 	qdf_net_buf_track_max_used = 0;
2266 }
2267 
2268 /**
2269  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2270  *
2271  * This initializes the memory manager for the nbuf tracking cookies.  Because
2272  * these cookies are all the same size and only used in this feature, we can
2273  * use a kmem_cache to provide tracking as well as to speed up allocations.
2274  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2275  * features) a freelist is prepopulated here.
2276  *
2277  * Return: None
2278  */
2279 static void qdf_nbuf_track_memory_manager_create(void)
2280 {
2281 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2282 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2283 						sizeof(QDF_NBUF_TRACK),
2284 						0, 0, NULL);
2285 
2286 	qdf_nbuf_track_prefill();
2287 }
2288 
2289 /**
2290  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2291  *
2292  * Empty the freelist and print out usage statistics when it is no longer
2293  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2294  * any nbuf tracking cookies were leaked.
2295  *
2296  * Return: None
2297  */
2298 static void qdf_nbuf_track_memory_manager_destroy(void)
2299 {
2300 	QDF_NBUF_TRACK *node, *tmp;
2301 	unsigned long irq_flag;
2302 
2303 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2304 	node = qdf_net_buf_track_free_list;
2305 
2306 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2307 		qdf_print("%s: unexpectedly large max_used count %d",
2308 			  __func__, qdf_net_buf_track_max_used);
2309 
2310 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2311 		qdf_print("%s: %d unused trackers were allocated",
2312 			  __func__,
2313 			  qdf_net_buf_track_max_allocated -
2314 			  qdf_net_buf_track_max_used);
2315 
2316 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2317 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2318 		qdf_print("%s: check freelist shrinking functionality",
2319 			  __func__);
2320 
2321 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2322 		  "%s: %d residual freelist size",
2323 		  __func__, qdf_net_buf_track_free_list_count);
2324 
2325 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2326 		  "%s: %d max freelist size observed",
2327 		  __func__, qdf_net_buf_track_max_free);
2328 
2329 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2330 		  "%s: %d max buffers used observed",
2331 		  __func__, qdf_net_buf_track_max_used);
2332 
2333 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2334 		  "%s: %d max buffers allocated observed",
2335 		  __func__, qdf_net_buf_track_max_allocated);
2336 
2337 	while (node) {
2338 		tmp = node;
2339 		node = node->p_next;
2340 		kmem_cache_free(nbuf_tracking_cache, tmp);
2341 		qdf_net_buf_track_free_list_count--;
2342 	}
2343 
2344 	if (qdf_net_buf_track_free_list_count != 0)
2345 		qdf_info("%d unfreed tracking memory lost in freelist",
2346 			 qdf_net_buf_track_free_list_count);
2347 
2348 	if (qdf_net_buf_track_used_list_count != 0)
2349 		qdf_info("%d unfreed tracking memory still in use",
2350 			 qdf_net_buf_track_used_list_count);
2351 
2352 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2353 	kmem_cache_destroy(nbuf_tracking_cache);
2354 	qdf_net_buf_track_free_list = NULL;
2355 }
2356 
2357 /**
2358  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2359  *
2360  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2361  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2362  * WLAN driver module whose allocated SKB is freed by network stack are
2363  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2364  * reported as memory leak.
2365  *
2366  * Return: none
2367  */
2368 void qdf_net_buf_debug_init(void)
2369 {
2370 	uint32_t i;
2371 
2372 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2373 
2374 	if (is_initial_mem_debug_disabled)
2375 		return;
2376 
2377 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2378 
2379 	qdf_nbuf_map_tracking_init();
2380 	qdf_nbuf_track_memory_manager_create();
2381 
2382 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2383 		gp_qdf_net_buf_track_tbl[i] = NULL;
2384 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2385 	}
2386 }
2387 qdf_export_symbol(qdf_net_buf_debug_init);
2388 
2389 /**
2390  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2391  *
2392  * Exit network buffer tracking debug functionality and log SKB memory leaks
2393  * As part of exiting the functionality, free the leaked memory and
2394  * cleanup the tracking buffers.
2395  *
2396  * Return: none
2397  */
2398 void qdf_net_buf_debug_exit(void)
2399 {
2400 	uint32_t i;
2401 	uint32_t count = 0;
2402 	unsigned long irq_flag;
2403 	QDF_NBUF_TRACK *p_node;
2404 	QDF_NBUF_TRACK *p_prev;
2405 
2406 	if (is_initial_mem_debug_disabled)
2407 		return;
2408 
2409 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2410 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2411 		p_node = gp_qdf_net_buf_track_tbl[i];
2412 		while (p_node) {
2413 			p_prev = p_node;
2414 			p_node = p_node->p_next;
2415 			count++;
2416 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2417 				 p_prev->func_name, p_prev->line_num,
2418 				 p_prev->size, p_prev->net_buf);
2419 			qdf_info(
2420 				 "SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2421 				 p_prev->map_func_name,
2422 				 p_prev->map_line_num,
2423 				 p_prev->unmap_func_name,
2424 				 p_prev->unmap_line_num,
2425 				 p_prev->is_nbuf_mapped);
2426 			qdf_nbuf_track_free(p_prev);
2427 		}
2428 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2429 	}
2430 
2431 	qdf_nbuf_track_memory_manager_destroy();
2432 	qdf_nbuf_map_tracking_deinit();
2433 
2434 #ifdef CONFIG_HALT_KMEMLEAK
2435 	if (count) {
2436 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2437 		QDF_BUG(0);
2438 	}
2439 #endif
2440 }
2441 qdf_export_symbol(qdf_net_buf_debug_exit);
2442 
2443 /**
2444  * qdf_net_buf_debug_hash() - hash network buffer pointer
2445  *
2446  * Return: hash value
2447  */
2448 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2449 {
2450 	uint32_t i;
2451 
2452 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2453 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2454 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2455 
2456 	return i;
2457 }
2458 
2459 /**
2460  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2461  *
2462  * Return: If skb is found in hash table then return pointer to network buffer
2463  *	else return %NULL
2464  */
2465 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2466 {
2467 	uint32_t i;
2468 	QDF_NBUF_TRACK *p_node;
2469 
2470 	i = qdf_net_buf_debug_hash(net_buf);
2471 	p_node = gp_qdf_net_buf_track_tbl[i];
2472 
2473 	while (p_node) {
2474 		if (p_node->net_buf == net_buf)
2475 			return p_node;
2476 		p_node = p_node->p_next;
2477 	}
2478 
2479 	return NULL;
2480 }
2481 
2482 /**
2483  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2484  *
2485  * Return: none
2486  */
2487 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2488 				const char *func_name, uint32_t line_num)
2489 {
2490 	uint32_t i;
2491 	unsigned long irq_flag;
2492 	QDF_NBUF_TRACK *p_node;
2493 	QDF_NBUF_TRACK *new_node;
2494 
2495 	if (is_initial_mem_debug_disabled)
2496 		return;
2497 
2498 	new_node = qdf_nbuf_track_alloc();
2499 
2500 	i = qdf_net_buf_debug_hash(net_buf);
2501 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2502 
2503 	p_node = qdf_net_buf_debug_look_up(net_buf);
2504 
2505 	if (p_node) {
2506 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2507 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2508 			  net_buf, func_name, line_num);
2509 		qdf_nbuf_track_free(new_node);
2510 	} else {
2511 		p_node = new_node;
2512 		if (p_node) {
2513 			p_node->net_buf = net_buf;
2514 			qdf_str_lcopy(p_node->func_name, func_name,
2515 				      QDF_MEM_FUNC_NAME_SIZE);
2516 			p_node->line_num = line_num;
2517 			p_node->size = size;
2518 			qdf_mem_skb_inc(size);
2519 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2520 			gp_qdf_net_buf_track_tbl[i] = p_node;
2521 		} else
2522 			qdf_print(
2523 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2524 				  func_name, line_num, size);
2525 	}
2526 
2527 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2528 }
2529 qdf_export_symbol(qdf_net_buf_debug_add_node);
2530 
2531 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2532 				   uint32_t line_num)
2533 {
2534 	uint32_t i;
2535 	unsigned long irq_flag;
2536 	QDF_NBUF_TRACK *p_node;
2537 
2538 	if (is_initial_mem_debug_disabled)
2539 		return;
2540 
2541 	i = qdf_net_buf_debug_hash(net_buf);
2542 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2543 
2544 	p_node = qdf_net_buf_debug_look_up(net_buf);
2545 
2546 	if (p_node) {
2547 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2548 			      QDF_MEM_FUNC_NAME_SIZE);
2549 		p_node->line_num = line_num;
2550 	}
2551 
2552 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2553 }
2554 
2555 qdf_export_symbol(qdf_net_buf_debug_update_node);
2556 
2557 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
2558 				       const char *func_name,
2559 				       uint32_t line_num)
2560 {
2561 	uint32_t i;
2562 	unsigned long irq_flag;
2563 	QDF_NBUF_TRACK *p_node;
2564 
2565 	if (is_initial_mem_debug_disabled)
2566 		return;
2567 
2568 	i = qdf_net_buf_debug_hash(net_buf);
2569 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2570 
2571 	p_node = qdf_net_buf_debug_look_up(net_buf);
2572 
2573 	if (p_node) {
2574 		qdf_str_lcopy(p_node->map_func_name, func_name,
2575 			      QDF_MEM_FUNC_NAME_SIZE);
2576 		p_node->map_line_num = line_num;
2577 		p_node->is_nbuf_mapped = true;
2578 	}
2579 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2580 }
2581 
2582 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
2583 					 const char *func_name,
2584 					 uint32_t line_num)
2585 {
2586 	uint32_t i;
2587 	unsigned long irq_flag;
2588 	QDF_NBUF_TRACK *p_node;
2589 
2590 	if (is_initial_mem_debug_disabled)
2591 		return;
2592 
2593 	i = qdf_net_buf_debug_hash(net_buf);
2594 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2595 
2596 	p_node = qdf_net_buf_debug_look_up(net_buf);
2597 
2598 	if (p_node) {
2599 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
2600 			      QDF_MEM_FUNC_NAME_SIZE);
2601 		p_node->unmap_line_num = line_num;
2602 		p_node->is_nbuf_mapped = false;
2603 	}
2604 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2605 }
2606 
2607 /**
2608  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2609  *
2610  * Return: none
2611  */
2612 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2613 {
2614 	uint32_t i;
2615 	QDF_NBUF_TRACK *p_head;
2616 	QDF_NBUF_TRACK *p_node = NULL;
2617 	unsigned long irq_flag;
2618 	QDF_NBUF_TRACK *p_prev;
2619 
2620 	if (is_initial_mem_debug_disabled)
2621 		return;
2622 
2623 	i = qdf_net_buf_debug_hash(net_buf);
2624 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2625 
2626 	p_head = gp_qdf_net_buf_track_tbl[i];
2627 
2628 	/* Unallocated SKB */
2629 	if (!p_head)
2630 		goto done;
2631 
2632 	p_node = p_head;
2633 	/* Found at head of the table */
2634 	if (p_head->net_buf == net_buf) {
2635 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2636 		goto done;
2637 	}
2638 
2639 	/* Search in collision list */
2640 	while (p_node) {
2641 		p_prev = p_node;
2642 		p_node = p_node->p_next;
2643 		if ((p_node) && (p_node->net_buf == net_buf)) {
2644 			p_prev->p_next = p_node->p_next;
2645 			break;
2646 		}
2647 	}
2648 
2649 done:
2650 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2651 
2652 	if (p_node) {
2653 		qdf_mem_skb_dec(p_node->size);
2654 		qdf_nbuf_track_free(p_node);
2655 	} else {
2656 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2657 			  net_buf);
2658 		QDF_BUG(0);
2659 	}
2660 }
2661 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2662 
2663 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2664 				   const char *func_name, uint32_t line_num)
2665 {
2666 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2667 
2668 	if (is_initial_mem_debug_disabled)
2669 		return;
2670 
2671 	while (ext_list) {
2672 		/*
2673 		 * Take care to add if it is Jumbo packet connected using
2674 		 * frag_list
2675 		 */
2676 		qdf_nbuf_t next;
2677 
2678 		next = qdf_nbuf_queue_next(ext_list);
2679 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2680 		ext_list = next;
2681 	}
2682 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2683 }
2684 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2685 
2686 /**
2687  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2688  * @net_buf: Network buf holding head segment (single)
2689  *
2690  * WLAN driver module whose allocated SKB is freed by network stack are
2691  * suppose to call this API before returning SKB to network stack such
2692  * that the SKB is not reported as memory leak.
2693  *
2694  * Return: none
2695  */
2696 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2697 {
2698 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2699 
2700 	if (is_initial_mem_debug_disabled)
2701 		return;
2702 
2703 	while (ext_list) {
2704 		/*
2705 		 * Take care to free if it is Jumbo packet connected using
2706 		 * frag_list
2707 		 */
2708 		qdf_nbuf_t next;
2709 
2710 		next = qdf_nbuf_queue_next(ext_list);
2711 
2712 		if (qdf_nbuf_get_users(ext_list) > 1) {
2713 			ext_list = next;
2714 			continue;
2715 		}
2716 
2717 		qdf_net_buf_debug_delete_node(ext_list);
2718 		ext_list = next;
2719 	}
2720 
2721 	if (qdf_nbuf_get_users(net_buf) > 1)
2722 		return;
2723 
2724 	qdf_net_buf_debug_delete_node(net_buf);
2725 }
2726 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2727 
2728 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2729 				int reserve, int align, int prio,
2730 				const char *func, uint32_t line)
2731 {
2732 	qdf_nbuf_t nbuf;
2733 
2734 	if (is_initial_mem_debug_disabled)
2735 		return __qdf_nbuf_alloc(osdev, size,
2736 					reserve, align,
2737 					prio, func, line);
2738 
2739 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2740 
2741 	/* Store SKB in internal QDF tracking table */
2742 	if (qdf_likely(nbuf)) {
2743 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2744 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2745 	} else {
2746 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2747 	}
2748 
2749 	return nbuf;
2750 }
2751 qdf_export_symbol(qdf_nbuf_alloc_debug);
2752 
2753 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2754 {
2755 	qdf_nbuf_t ext_list;
2756 
2757 	if (qdf_unlikely(!nbuf))
2758 		return;
2759 
2760 	if (is_initial_mem_debug_disabled)
2761 		goto free_buf;
2762 
2763 	if (qdf_nbuf_get_users(nbuf) > 1)
2764 		goto free_buf;
2765 
2766 	/* Remove SKB from internal QDF tracking table */
2767 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2768 	qdf_net_buf_debug_delete_node(nbuf);
2769 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2770 
2771 	/* Take care to delete the debug entries for frag_list */
2772 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2773 	while (ext_list) {
2774 		if (qdf_nbuf_get_users(ext_list) == 1) {
2775 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2776 			qdf_net_buf_debug_delete_node(ext_list);
2777 		}
2778 
2779 		ext_list = qdf_nbuf_queue_next(ext_list);
2780 	}
2781 
2782 free_buf:
2783 	__qdf_nbuf_free(nbuf);
2784 }
2785 qdf_export_symbol(qdf_nbuf_free_debug);
2786 
2787 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2788 {
2789 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2790 
2791 	if (is_initial_mem_debug_disabled)
2792 		return cloned_buf;
2793 
2794 	if (qdf_unlikely(!cloned_buf))
2795 		return NULL;
2796 
2797 	/* Store SKB in internal QDF tracking table */
2798 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2799 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2800 
2801 	return cloned_buf;
2802 }
2803 qdf_export_symbol(qdf_nbuf_clone_debug);
2804 
2805 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2806 {
2807 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2808 
2809 	if (is_initial_mem_debug_disabled)
2810 		return copied_buf;
2811 
2812 	if (qdf_unlikely(!copied_buf))
2813 		return NULL;
2814 
2815 	/* Store SKB in internal QDF tracking table */
2816 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2817 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2818 
2819 	return copied_buf;
2820 }
2821 qdf_export_symbol(qdf_nbuf_copy_debug);
2822 
2823 qdf_nbuf_t
2824 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
2825 			   const char *func, uint32_t line)
2826 {
2827 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
2828 
2829 	if (qdf_unlikely(!copied_buf))
2830 		return NULL;
2831 
2832 	if (is_initial_mem_debug_disabled)
2833 		return copied_buf;
2834 
2835 	/* Store SKB in internal QDF tracking table */
2836 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2837 	qdf_nbuf_history_add(copied_buf, func, line,
2838 			     QDF_NBUF_ALLOC_COPY_EXPAND);
2839 
2840 	return copied_buf;
2841 }
2842 
2843 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
2844 
2845 #endif /* NBUF_MEMORY_DEBUG */
2846 
2847 #if defined(FEATURE_TSO)
2848 
2849 /**
2850  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2851  *
2852  * @ethproto: ethernet type of the msdu
2853  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2854  * @l2_len: L2 length for the msdu
2855  * @eit_hdr: pointer to EIT header
2856  * @eit_hdr_len: EIT header length for the msdu
2857  * @eit_hdr_dma_map_addr: dma addr for EIT header
2858  * @tcphdr: pointer to tcp header
2859  * @ipv4_csum_en: ipv4 checksum enable
2860  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2861  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2862  * @ip_id: IP id
2863  * @tcp_seq_num: TCP sequence number
2864  *
2865  * This structure holds the TSO common info that is common
2866  * across all the TCP segments of the jumbo packet.
2867  */
2868 struct qdf_tso_cmn_seg_info_t {
2869 	uint16_t ethproto;
2870 	uint16_t ip_tcp_hdr_len;
2871 	uint16_t l2_len;
2872 	uint8_t *eit_hdr;
2873 	uint32_t eit_hdr_len;
2874 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2875 	struct tcphdr *tcphdr;
2876 	uint16_t ipv4_csum_en;
2877 	uint16_t tcp_ipv4_csum_en;
2878 	uint16_t tcp_ipv6_csum_en;
2879 	uint16_t ip_id;
2880 	uint32_t tcp_seq_num;
2881 };
2882 
2883 /**
2884  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
2885  *
2886  * @skb: network buffer
2887  *
2888  * Return: byte offset length of 8 bytes aligned.
2889  */
2890 #ifdef WAR_TXDMA_LIMITATION
2891 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
2892 {
2893 	uint32_t eit_hdr_len;
2894 	uint8_t *eit_hdr;
2895 	uint8_t byte_8_align_offset;
2896 
2897 	/*
2898 	 * Workaround for TXDMA HW limitation.
2899 	 * ADDR0&0x1FFFFFFF8 should not equal ADDR1&0x1FFFFFFF8.
2900 	 * Otherwise, TXDMA will run into exception, which cause TX fail.
2901 	 * ADDR0: the address of last words in previous buffer;
2902 	 * ADDR1: the address of first words in next buffer;
2903 	 * To avoid this, shift several bytes for ADDR0.
2904 	 */
2905 	eit_hdr = skb->data;
2906 	eit_hdr_len = (skb_transport_header(skb)
2907 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2908 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
2909 	if (qdf_unlikely(byte_8_align_offset)) {
2910 		TSO_DEBUG("%pK,Len %d %d",
2911 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
2912 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
2913 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
2914 				  __LINE__, skb->head, skb->data,
2915 				 byte_8_align_offset);
2916 			return 0;
2917 		}
2918 		qdf_nbuf_push_head(skb, byte_8_align_offset);
2919 		qdf_mem_move(skb->data,
2920 			     skb->data + byte_8_align_offset,
2921 			     eit_hdr_len);
2922 		skb->len -= byte_8_align_offset;
2923 		skb->mac_header -= byte_8_align_offset;
2924 		skb->network_header -= byte_8_align_offset;
2925 		skb->transport_header -= byte_8_align_offset;
2926 	}
2927 	return byte_8_align_offset;
2928 }
2929 #else
2930 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
2931 {
2932 	return 0;
2933 }
2934 #endif
2935 
2936 /**
2937  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2938  * information
2939  * @osdev: qdf device handle
2940  * @skb: skb buffer
2941  * @tso_info: Parameters common to all segements
2942  *
2943  * Get the TSO information that is common across all the TCP
2944  * segments of the jumbo packet
2945  *
2946  * Return: 0 - success 1 - failure
2947  */
2948 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2949 			struct sk_buff *skb,
2950 			struct qdf_tso_cmn_seg_info_t *tso_info)
2951 {
2952 	/* Get ethernet type and ethernet header length */
2953 	tso_info->ethproto = vlan_get_protocol(skb);
2954 
2955 	/* Determine whether this is an IPv4 or IPv6 packet */
2956 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2957 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2958 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2959 
2960 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2961 		tso_info->ipv4_csum_en = 1;
2962 		tso_info->tcp_ipv4_csum_en = 1;
2963 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2964 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2965 				ipv4_hdr->protocol);
2966 			return 1;
2967 		}
2968 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2969 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2970 		tso_info->tcp_ipv6_csum_en = 1;
2971 	} else {
2972 		qdf_err("TSO: ethertype 0x%x is not supported!",
2973 			tso_info->ethproto);
2974 		return 1;
2975 	}
2976 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2977 	tso_info->tcphdr = tcp_hdr(skb);
2978 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2979 	/* get pointer to the ethernet + IP + TCP header and their length */
2980 	tso_info->eit_hdr = skb->data;
2981 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2982 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2983 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2984 							tso_info->eit_hdr,
2985 							tso_info->eit_hdr_len,
2986 							DMA_TO_DEVICE);
2987 	if (unlikely(dma_mapping_error(osdev->dev,
2988 				       tso_info->eit_hdr_dma_map_addr))) {
2989 		qdf_err("DMA mapping error!");
2990 		qdf_assert(0);
2991 		return 1;
2992 	}
2993 
2994 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2995 		/* inlcude IPv4 header length for IPV4 (total length) */
2996 		tso_info->ip_tcp_hdr_len =
2997 			tso_info->eit_hdr_len - tso_info->l2_len;
2998 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2999 		/* exclude IPv6 header length for IPv6 (payload length) */
3000 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3001 	}
3002 	/*
3003 	 * The length of the payload (application layer data) is added to
3004 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3005 	 * descriptor.
3006 	 */
3007 
3008 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3009 		tso_info->tcp_seq_num,
3010 		tso_info->eit_hdr_len,
3011 		tso_info->l2_len,
3012 		skb->len);
3013 	return 0;
3014 }
3015 
3016 
3017 /**
3018  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3019  *
3020  * @curr_seg: Segment whose contents are initialized
3021  * @tso_cmn_info: Parameters common to all segements
3022  *
3023  * Return: None
3024  */
3025 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3026 				struct qdf_tso_seg_elem_t *curr_seg,
3027 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3028 {
3029 	/* Initialize the flags to 0 */
3030 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3031 
3032 	/*
3033 	 * The following fields remain the same across all segments of
3034 	 * a jumbo packet
3035 	 */
3036 	curr_seg->seg.tso_flags.tso_enable = 1;
3037 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3038 		tso_cmn_info->ipv4_csum_en;
3039 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3040 		tso_cmn_info->tcp_ipv6_csum_en;
3041 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3042 		tso_cmn_info->tcp_ipv4_csum_en;
3043 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3044 
3045 	/* The following fields change for the segments */
3046 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3047 	tso_cmn_info->ip_id++;
3048 
3049 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3050 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3051 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3052 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3053 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3054 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3055 
3056 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3057 
3058 	/*
3059 	 * First fragment for each segment always contains the ethernet,
3060 	 * IP and TCP header
3061 	 */
3062 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3063 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3064 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3065 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3066 
3067 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3068 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3069 		   tso_cmn_info->eit_hdr_len,
3070 		   curr_seg->seg.tso_flags.tcp_seq_num,
3071 		   curr_seg->seg.total_len);
3072 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3073 }
3074 
3075 /**
3076  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3077  * into segments
3078  * @nbuf: network buffer to be segmented
3079  * @tso_info: This is the output. The information about the
3080  *           TSO segments will be populated within this.
3081  *
3082  * This function fragments a TCP jumbo packet into smaller
3083  * segments to be transmitted by the driver. It chains the TSO
3084  * segments created into a list.
3085  *
3086  * Return: number of TSO segments
3087  */
3088 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3089 		struct qdf_tso_info_t *tso_info)
3090 {
3091 	/* common across all segments */
3092 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3093 	/* segment specific */
3094 	void *tso_frag_vaddr;
3095 	qdf_dma_addr_t tso_frag_paddr = 0;
3096 	uint32_t num_seg = 0;
3097 	struct qdf_tso_seg_elem_t *curr_seg;
3098 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3099 	skb_frag_t *frag = NULL;
3100 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3101 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3102 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3103 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3104 	int j = 0; /* skb fragment index */
3105 	uint8_t byte_8_align_offset;
3106 
3107 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3108 	total_num_seg = tso_info->tso_num_seg_list;
3109 	curr_seg = tso_info->tso_seg_list;
3110 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3111 
3112 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3113 
3114 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3115 						skb, &tso_cmn_info))) {
3116 		qdf_warn("TSO: error getting common segment info");
3117 		return 0;
3118 	}
3119 
3120 	/* length of the first chunk of data in the skb */
3121 	skb_frag_len = skb_headlen(skb);
3122 
3123 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3124 	/* update the remaining skb fragment length and TSO segment length */
3125 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3126 	skb_proc -= tso_cmn_info.eit_hdr_len;
3127 
3128 	/* get the address to the next tso fragment */
3129 	tso_frag_vaddr = skb->data +
3130 			 tso_cmn_info.eit_hdr_len +
3131 			 byte_8_align_offset;
3132 	/* get the length of the next tso fragment */
3133 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3134 
3135 	if (tso_frag_len != 0) {
3136 		tso_frag_paddr = dma_map_single(osdev->dev,
3137 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
3138 	}
3139 
3140 	if (unlikely(dma_mapping_error(osdev->dev,
3141 					tso_frag_paddr))) {
3142 		qdf_err("DMA mapping error!");
3143 		qdf_assert(0);
3144 		return 0;
3145 	}
3146 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3147 		__LINE__, skb_frag_len, tso_frag_len);
3148 	num_seg = tso_info->num_segs;
3149 	tso_info->num_segs = 0;
3150 	tso_info->is_tso = 1;
3151 
3152 	while (num_seg && curr_seg) {
3153 		int i = 1; /* tso fragment index */
3154 		uint8_t more_tso_frags = 1;
3155 
3156 		curr_seg->seg.num_frags = 0;
3157 		tso_info->num_segs++;
3158 		total_num_seg->num_seg.tso_cmn_num_seg++;
3159 
3160 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3161 						 &tso_cmn_info);
3162 
3163 		/* If TCP PSH flag is set, set it in the last or only segment */
3164 		if (num_seg == 1)
3165 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3166 
3167 		if (unlikely(skb_proc == 0))
3168 			return tso_info->num_segs;
3169 
3170 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3171 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3172 		/* frag len is added to ip_len in while loop below*/
3173 
3174 		curr_seg->seg.num_frags++;
3175 
3176 		while (more_tso_frags) {
3177 			if (tso_frag_len != 0) {
3178 				curr_seg->seg.tso_frags[i].vaddr =
3179 					tso_frag_vaddr;
3180 				curr_seg->seg.tso_frags[i].length =
3181 					tso_frag_len;
3182 				curr_seg->seg.total_len += tso_frag_len;
3183 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3184 				curr_seg->seg.num_frags++;
3185 				skb_proc = skb_proc - tso_frag_len;
3186 
3187 				/* increment the TCP sequence number */
3188 
3189 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3190 				curr_seg->seg.tso_frags[i].paddr =
3191 					tso_frag_paddr;
3192 			}
3193 
3194 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3195 					__func__, __LINE__,
3196 					i,
3197 					tso_frag_len,
3198 					curr_seg->seg.total_len,
3199 					curr_seg->seg.tso_frags[i].vaddr);
3200 
3201 			/* if there is no more data left in the skb */
3202 			if (!skb_proc)
3203 				return tso_info->num_segs;
3204 
3205 			/* get the next payload fragment information */
3206 			/* check if there are more fragments in this segment */
3207 			if (tso_frag_len < tso_seg_size) {
3208 				more_tso_frags = 1;
3209 				if (tso_frag_len != 0) {
3210 					tso_seg_size = tso_seg_size -
3211 						tso_frag_len;
3212 					i++;
3213 					if (curr_seg->seg.num_frags ==
3214 								FRAG_NUM_MAX) {
3215 						more_tso_frags = 0;
3216 						/*
3217 						 * reset i and the tso
3218 						 * payload size
3219 						 */
3220 						i = 1;
3221 						tso_seg_size =
3222 							skb_shinfo(skb)->
3223 								gso_size;
3224 					}
3225 				}
3226 			} else {
3227 				more_tso_frags = 0;
3228 				/* reset i and the tso payload size */
3229 				i = 1;
3230 				tso_seg_size = skb_shinfo(skb)->gso_size;
3231 			}
3232 
3233 			/* if the next fragment is contiguous */
3234 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3235 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3236 				skb_frag_len = skb_frag_len - tso_frag_len;
3237 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3238 
3239 			} else { /* the next fragment is not contiguous */
3240 				if (skb_shinfo(skb)->nr_frags == 0) {
3241 					qdf_info("TSO: nr_frags == 0!");
3242 					qdf_assert(0);
3243 					return 0;
3244 				}
3245 				if (j >= skb_shinfo(skb)->nr_frags) {
3246 					qdf_info("TSO: nr_frags %d j %d",
3247 						 skb_shinfo(skb)->nr_frags, j);
3248 					qdf_assert(0);
3249 					return 0;
3250 				}
3251 				frag = &skb_shinfo(skb)->frags[j];
3252 				skb_frag_len = skb_frag_size(frag);
3253 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3254 				tso_frag_vaddr = skb_frag_address_safe(frag);
3255 				j++;
3256 			}
3257 
3258 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3259 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3260 				tso_seg_size);
3261 
3262 			if (!(tso_frag_vaddr)) {
3263 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3264 						__func__);
3265 				return 0;
3266 			}
3267 
3268 			tso_frag_paddr =
3269 					 dma_map_single(osdev->dev,
3270 						 tso_frag_vaddr,
3271 						 tso_frag_len,
3272 						 DMA_TO_DEVICE);
3273 			if (unlikely(dma_mapping_error(osdev->dev,
3274 							tso_frag_paddr))) {
3275 				qdf_err("DMA mapping error!");
3276 				qdf_assert(0);
3277 				return 0;
3278 			}
3279 		}
3280 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3281 				curr_seg->seg.tso_flags.tcp_seq_num);
3282 		num_seg--;
3283 		/* if TCP FIN flag was set, set it in the last segment */
3284 		if (!num_seg)
3285 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3286 
3287 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3288 		curr_seg = curr_seg->next;
3289 	}
3290 	return tso_info->num_segs;
3291 }
3292 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3293 
3294 /**
3295  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3296  *
3297  * @osdev: qdf device handle
3298  * @tso_seg: TSO segment element to be unmapped
3299  * @is_last_seg: whether this is last tso seg or not
3300  *
3301  * Return: none
3302  */
3303 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3304 			  struct qdf_tso_seg_elem_t *tso_seg,
3305 			  bool is_last_seg)
3306 {
3307 	uint32_t num_frags = 0;
3308 
3309 	if (tso_seg->seg.num_frags > 0)
3310 		num_frags = tso_seg->seg.num_frags - 1;
3311 
3312 	/*Num of frags in a tso seg cannot be less than 2 */
3313 	if (num_frags < 1) {
3314 		/*
3315 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3316 		 * this may happen when qdf_nbuf_get_tso_info failed,
3317 		 * do dma unmap for the 0th frag in this seg.
3318 		 */
3319 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3320 			goto last_seg_free_first_frag;
3321 
3322 		qdf_assert(0);
3323 		qdf_err("ERROR: num of frags in a tso segment is %d",
3324 			(num_frags + 1));
3325 		return;
3326 	}
3327 
3328 	while (num_frags) {
3329 		/*Do dma unmap the tso seg except the 0th frag */
3330 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3331 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3332 				num_frags);
3333 			qdf_assert(0);
3334 			return;
3335 		}
3336 		dma_unmap_single(osdev->dev,
3337 				 tso_seg->seg.tso_frags[num_frags].paddr,
3338 				 tso_seg->seg.tso_frags[num_frags].length,
3339 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3340 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3341 		num_frags--;
3342 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3343 	}
3344 
3345 last_seg_free_first_frag:
3346 	if (is_last_seg) {
3347 		/*Do dma unmap for the tso seg 0th frag */
3348 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3349 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3350 			qdf_assert(0);
3351 			return;
3352 		}
3353 		dma_unmap_single(osdev->dev,
3354 				 tso_seg->seg.tso_frags[0].paddr,
3355 				 tso_seg->seg.tso_frags[0].length,
3356 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3357 		tso_seg->seg.tso_frags[0].paddr = 0;
3358 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3359 	}
3360 }
3361 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3362 
3363 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3364 {
3365 	size_t packet_len;
3366 
3367 	packet_len = skb->len -
3368 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3369 		 tcp_hdrlen(skb));
3370 
3371 	return packet_len;
3372 }
3373 
3374 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3375 
3376 /**
3377  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3378  * into segments
3379  * @nbuf:   network buffer to be segmented
3380  * @tso_info:  This is the output. The information about the
3381  *      TSO segments will be populated within this.
3382  *
3383  * This function fragments a TCP jumbo packet into smaller
3384  * segments to be transmitted by the driver. It chains the TSO
3385  * segments created into a list.
3386  *
3387  * Return: 0 - success, 1 - failure
3388  */
3389 #ifndef BUILD_X86
3390 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3391 {
3392 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3393 	uint32_t remainder, num_segs = 0;
3394 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3395 	uint8_t frags_per_tso = 0;
3396 	uint32_t skb_frag_len = 0;
3397 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3398 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3399 	skb_frag_t *frag = NULL;
3400 	int j = 0;
3401 	uint32_t temp_num_seg = 0;
3402 
3403 	/* length of the first chunk of data in the skb minus eit header*/
3404 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3405 
3406 	/* Calculate num of segs for skb's first chunk of data*/
3407 	remainder = skb_frag_len % tso_seg_size;
3408 	num_segs = skb_frag_len / tso_seg_size;
3409 	/**
3410 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3411 	 * In that case, one more tso seg is required to accommodate
3412 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3413 	 * then remaining data will be accomodated while doing the calculation
3414 	 * for nr_frags data. Hence, frags_per_tso++.
3415 	 */
3416 	if (remainder) {
3417 		if (!skb_nr_frags)
3418 			num_segs++;
3419 		else
3420 			frags_per_tso++;
3421 	}
3422 
3423 	while (skb_nr_frags) {
3424 		if (j >= skb_shinfo(skb)->nr_frags) {
3425 			qdf_info("TSO: nr_frags %d j %d",
3426 				 skb_shinfo(skb)->nr_frags, j);
3427 			qdf_assert(0);
3428 			return 0;
3429 		}
3430 		/**
3431 		 * Calculate the number of tso seg for nr_frags data:
3432 		 * Get the length of each frag in skb_frag_len, add to
3433 		 * remainder.Get the number of segments by dividing it to
3434 		 * tso_seg_size and calculate the new remainder.
3435 		 * Decrement the nr_frags value and keep
3436 		 * looping all the skb_fragments.
3437 		 */
3438 		frag = &skb_shinfo(skb)->frags[j];
3439 		skb_frag_len = skb_frag_size(frag);
3440 		temp_num_seg = num_segs;
3441 		remainder += skb_frag_len;
3442 		num_segs += remainder / tso_seg_size;
3443 		remainder = remainder % tso_seg_size;
3444 		skb_nr_frags--;
3445 		if (remainder) {
3446 			if (num_segs > temp_num_seg)
3447 				frags_per_tso = 0;
3448 			/**
3449 			 * increment the tso per frags whenever remainder is
3450 			 * positive. If frags_per_tso reaches the (max-1),
3451 			 * [First frags always have EIT header, therefore max-1]
3452 			 * increment the num_segs as no more data can be
3453 			 * accomodated in the curr tso seg. Reset the remainder
3454 			 * and frags per tso and keep looping.
3455 			 */
3456 			frags_per_tso++;
3457 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3458 				num_segs++;
3459 				frags_per_tso = 0;
3460 				remainder = 0;
3461 			}
3462 			/**
3463 			 * If this is the last skb frag and still remainder is
3464 			 * non-zero(frags_per_tso is not reached to the max-1)
3465 			 * then increment the num_segs to take care of the
3466 			 * remaining length.
3467 			 */
3468 			if (!skb_nr_frags && remainder) {
3469 				num_segs++;
3470 				frags_per_tso = 0;
3471 			}
3472 		} else {
3473 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3474 			frags_per_tso = 0;
3475 		}
3476 		j++;
3477 	}
3478 
3479 	return num_segs;
3480 }
3481 #elif !defined(QCA_WIFI_QCN9000)
3482 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3483 {
3484 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3485 	skb_frag_t *frag = NULL;
3486 
3487 	/*
3488 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3489 	 * region which cannot be accessed by Target
3490 	 */
3491 	if (virt_to_phys(skb->data) < 0x50000040) {
3492 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3493 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3494 				virt_to_phys(skb->data));
3495 		goto fail;
3496 
3497 	}
3498 
3499 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3500 		frag = &skb_shinfo(skb)->frags[i];
3501 
3502 		if (!frag)
3503 			goto fail;
3504 
3505 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3506 			goto fail;
3507 	}
3508 
3509 
3510 	gso_size = skb_shinfo(skb)->gso_size;
3511 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3512 			+ tcp_hdrlen(skb));
3513 	while (tmp_len) {
3514 		num_segs++;
3515 		if (tmp_len > gso_size)
3516 			tmp_len -= gso_size;
3517 		else
3518 			break;
3519 	}
3520 
3521 	return num_segs;
3522 
3523 	/*
3524 	 * Do not free this frame, just do socket level accounting
3525 	 * so that this is not reused.
3526 	 */
3527 fail:
3528 	if (skb->sk)
3529 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3530 
3531 	return 0;
3532 }
3533 #else
3534 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3535 {
3536 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3537 	skb_frag_t *frag = NULL;
3538 
3539 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3540 		frag = &skb_shinfo(skb)->frags[i];
3541 
3542 		if (!frag)
3543 			goto fail;
3544 	}
3545 
3546 	gso_size = skb_shinfo(skb)->gso_size;
3547 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3548 			+ tcp_hdrlen(skb));
3549 	while (tmp_len) {
3550 		num_segs++;
3551 		if (tmp_len > gso_size)
3552 			tmp_len -= gso_size;
3553 		else
3554 			break;
3555 	}
3556 
3557 	return num_segs;
3558 
3559 	/*
3560 	 * Do not free this frame, just do socket level accounting
3561 	 * so that this is not reused.
3562 	 */
3563 fail:
3564 	if (skb->sk)
3565 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3566 
3567 	return 0;
3568 }
3569 #endif
3570 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3571 
3572 #endif /* FEATURE_TSO */
3573 
3574 /**
3575  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3576  *
3577  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3578  *
3579  * Return: N/A
3580  */
3581 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3582 			  uint32_t *lo, uint32_t *hi)
3583 {
3584 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3585 		*lo = lower_32_bits(dmaaddr);
3586 		*hi = upper_32_bits(dmaaddr);
3587 	} else {
3588 		*lo = dmaaddr;
3589 		*hi = 0;
3590 	}
3591 }
3592 
3593 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3594 
3595 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3596 {
3597 	qdf_nbuf_users_inc(&skb->users);
3598 	return skb;
3599 }
3600 qdf_export_symbol(__qdf_nbuf_inc_users);
3601 
3602 int __qdf_nbuf_get_users(struct sk_buff *skb)
3603 {
3604 	return qdf_nbuf_users_read(&skb->users);
3605 }
3606 qdf_export_symbol(__qdf_nbuf_get_users);
3607 
3608 /**
3609  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3610  * @skb: sk_buff handle
3611  *
3612  * Return: none
3613  */
3614 
3615 void __qdf_nbuf_ref(struct sk_buff *skb)
3616 {
3617 	skb_get(skb);
3618 }
3619 qdf_export_symbol(__qdf_nbuf_ref);
3620 
3621 /**
3622  * __qdf_nbuf_shared() - Check whether the buffer is shared
3623  *  @skb: sk_buff buffer
3624  *
3625  *  Return: true if more than one person has a reference to this buffer.
3626  */
3627 int __qdf_nbuf_shared(struct sk_buff *skb)
3628 {
3629 	return skb_shared(skb);
3630 }
3631 qdf_export_symbol(__qdf_nbuf_shared);
3632 
3633 /**
3634  * __qdf_nbuf_dmamap_create() - create a DMA map.
3635  * @osdev: qdf device handle
3636  * @dmap: dma map handle
3637  *
3638  * This can later be used to map networking buffers. They :
3639  * - need space in adf_drv's software descriptor
3640  * - are typically created during adf_drv_create
3641  * - need to be created before any API(qdf_nbuf_map) that uses them
3642  *
3643  * Return: QDF STATUS
3644  */
3645 QDF_STATUS
3646 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3647 {
3648 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3649 	/*
3650 	 * driver can tell its SG capablity, it must be handled.
3651 	 * Bounce buffers if they are there
3652 	 */
3653 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3654 	if (!(*dmap))
3655 		error = QDF_STATUS_E_NOMEM;
3656 
3657 	return error;
3658 }
3659 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3660 /**
3661  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3662  * @osdev: qdf device handle
3663  * @dmap: dma map handle
3664  *
3665  * Return: none
3666  */
3667 void
3668 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3669 {
3670 	kfree(dmap);
3671 }
3672 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3673 
3674 /**
3675  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3676  * @osdev: os device
3677  * @skb: skb handle
3678  * @dir: dma direction
3679  * @nbytes: number of bytes to be mapped
3680  *
3681  * Return: QDF_STATUS
3682  */
3683 #ifdef QDF_OS_DEBUG
3684 QDF_STATUS
3685 __qdf_nbuf_map_nbytes(
3686 	qdf_device_t osdev,
3687 	struct sk_buff *skb,
3688 	qdf_dma_dir_t dir,
3689 	int nbytes)
3690 {
3691 	struct skb_shared_info  *sh = skb_shinfo(skb);
3692 
3693 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3694 
3695 	/*
3696 	 * Assume there's only a single fragment.
3697 	 * To support multiple fragments, it would be necessary to change
3698 	 * adf_nbuf_t to be a separate object that stores meta-info
3699 	 * (including the bus address for each fragment) and a pointer
3700 	 * to the underlying sk_buff.
3701 	 */
3702 	qdf_assert(sh->nr_frags == 0);
3703 
3704 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3705 }
3706 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3707 #else
3708 QDF_STATUS
3709 __qdf_nbuf_map_nbytes(
3710 	qdf_device_t osdev,
3711 	struct sk_buff *skb,
3712 	qdf_dma_dir_t dir,
3713 	int nbytes)
3714 {
3715 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3716 }
3717 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3718 #endif
3719 /**
3720  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3721  * @osdev: OS device
3722  * @skb: skb handle
3723  * @dir: direction
3724  * @nbytes: number of bytes
3725  *
3726  * Return: none
3727  */
3728 void
3729 __qdf_nbuf_unmap_nbytes(
3730 	qdf_device_t osdev,
3731 	struct sk_buff *skb,
3732 	qdf_dma_dir_t dir,
3733 	int nbytes)
3734 {
3735 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3736 
3737 	/*
3738 	 * Assume there's a single fragment.
3739 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3740 	 */
3741 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3742 }
3743 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3744 
3745 /**
3746  * __qdf_nbuf_dma_map_info() - return the dma map info
3747  * @bmap: dma map
3748  * @sg: dma map info
3749  *
3750  * Return: none
3751  */
3752 void
3753 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3754 {
3755 	qdf_assert(bmap->mapped);
3756 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3757 
3758 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3759 			sizeof(struct __qdf_segment));
3760 	sg->nsegs = bmap->nsegs;
3761 }
3762 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3763 /**
3764  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3765  *			specified by the index
3766  * @skb: sk buff
3767  * @sg: scatter/gather list of all the frags
3768  *
3769  * Return: none
3770  */
3771 #if defined(__QDF_SUPPORT_FRAG_MEM)
3772 void
3773 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3774 {
3775 	qdf_assert(skb);
3776 	sg->sg_segs[0].vaddr = skb->data;
3777 	sg->sg_segs[0].len   = skb->len;
3778 	sg->nsegs            = 1;
3779 
3780 	for (int i = 1; i <= sh->nr_frags; i++) {
3781 		skb_frag_t    *f        = &sh->frags[i - 1];
3782 
3783 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3784 			f->page_offset);
3785 		sg->sg_segs[i].len      = f->size;
3786 
3787 		qdf_assert(i < QDF_MAX_SGLIST);
3788 	}
3789 	sg->nsegs += i;
3790 
3791 }
3792 qdf_export_symbol(__qdf_nbuf_frag_info);
3793 #else
3794 #ifdef QDF_OS_DEBUG
3795 void
3796 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3797 {
3798 
3799 	struct skb_shared_info  *sh = skb_shinfo(skb);
3800 
3801 	qdf_assert(skb);
3802 	sg->sg_segs[0].vaddr = skb->data;
3803 	sg->sg_segs[0].len   = skb->len;
3804 	sg->nsegs            = 1;
3805 
3806 	qdf_assert(sh->nr_frags == 0);
3807 }
3808 qdf_export_symbol(__qdf_nbuf_frag_info);
3809 #else
3810 void
3811 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3812 {
3813 	sg->sg_segs[0].vaddr = skb->data;
3814 	sg->sg_segs[0].len   = skb->len;
3815 	sg->nsegs            = 1;
3816 }
3817 qdf_export_symbol(__qdf_nbuf_frag_info);
3818 #endif
3819 #endif
3820 /**
3821  * __qdf_nbuf_get_frag_size() - get frag size
3822  * @nbuf: sk buffer
3823  * @cur_frag: current frag
3824  *
3825  * Return: frag size
3826  */
3827 uint32_t
3828 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3829 {
3830 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3831 	const skb_frag_t *frag = sh->frags + cur_frag;
3832 
3833 	return skb_frag_size(frag);
3834 }
3835 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3836 
3837 /**
3838  * __qdf_nbuf_frag_map() - dma map frag
3839  * @osdev: os device
3840  * @nbuf: sk buff
3841  * @offset: offset
3842  * @dir: direction
3843  * @cur_frag: current fragment
3844  *
3845  * Return: QDF status
3846  */
3847 #ifdef A_SIMOS_DEVHOST
3848 QDF_STATUS __qdf_nbuf_frag_map(
3849 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3850 	int offset, qdf_dma_dir_t dir, int cur_frag)
3851 {
3852 	int32_t paddr, frag_len;
3853 
3854 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3855 	return QDF_STATUS_SUCCESS;
3856 }
3857 qdf_export_symbol(__qdf_nbuf_frag_map);
3858 #else
3859 QDF_STATUS __qdf_nbuf_frag_map(
3860 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3861 	int offset, qdf_dma_dir_t dir, int cur_frag)
3862 {
3863 	dma_addr_t paddr, frag_len;
3864 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3865 	const skb_frag_t *frag = sh->frags + cur_frag;
3866 
3867 	frag_len = skb_frag_size(frag);
3868 
3869 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3870 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3871 					__qdf_dma_dir_to_os(dir));
3872 	return dma_mapping_error(osdev->dev, paddr) ?
3873 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3874 }
3875 qdf_export_symbol(__qdf_nbuf_frag_map);
3876 #endif
3877 /**
3878  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3879  * @dmap: dma map
3880  * @cb: callback
3881  * @arg: argument
3882  *
3883  * Return: none
3884  */
3885 void
3886 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3887 {
3888 	return;
3889 }
3890 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3891 
3892 
3893 /**
3894  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3895  * @osdev: os device
3896  * @buf: sk buff
3897  * @dir: direction
3898  *
3899  * Return: none
3900  */
3901 #if defined(A_SIMOS_DEVHOST)
3902 static void __qdf_nbuf_sync_single_for_cpu(
3903 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3904 {
3905 	return;
3906 }
3907 #else
3908 static void __qdf_nbuf_sync_single_for_cpu(
3909 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3910 {
3911 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3912 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3913 		return;
3914 	}
3915 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3916 		skb_end_offset(buf) - skb_headroom(buf),
3917 		__qdf_dma_dir_to_os(dir));
3918 }
3919 #endif
3920 /**
3921  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3922  * @osdev: os device
3923  * @skb: sk buff
3924  * @dir: direction
3925  *
3926  * Return: none
3927  */
3928 void
3929 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3930 	struct sk_buff *skb, qdf_dma_dir_t dir)
3931 {
3932 	qdf_assert(
3933 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3934 
3935 	/*
3936 	 * Assume there's a single fragment.
3937 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3938 	 */
3939 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3940 }
3941 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3942 
3943 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3944 /**
3945  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3946  * @rx_status: Pointer to rx_status.
3947  * @rtap_buf: Buf to which VHT info has to be updated.
3948  * @rtap_len: Current length of radiotap buffer
3949  *
3950  * Return: Length of radiotap after VHT flags updated.
3951  */
3952 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3953 					struct mon_rx_status *rx_status,
3954 					int8_t *rtap_buf,
3955 					uint32_t rtap_len)
3956 {
3957 	uint16_t vht_flags = 0;
3958 
3959 	rtap_len = qdf_align(rtap_len, 2);
3960 
3961 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3962 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3963 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3964 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3965 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3966 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3967 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3968 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3969 	rtap_len += 2;
3970 
3971 	rtap_buf[rtap_len] |=
3972 		(rx_status->is_stbc ?
3973 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3974 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3975 		(rx_status->ldpc ?
3976 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3977 		(rx_status->beamformed ?
3978 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3979 	rtap_len += 1;
3980 	switch (rx_status->vht_flag_values2) {
3981 	case IEEE80211_RADIOTAP_VHT_BW_20:
3982 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3983 		break;
3984 	case IEEE80211_RADIOTAP_VHT_BW_40:
3985 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3986 		break;
3987 	case IEEE80211_RADIOTAP_VHT_BW_80:
3988 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3989 		break;
3990 	case IEEE80211_RADIOTAP_VHT_BW_160:
3991 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3992 		break;
3993 	}
3994 	rtap_len += 1;
3995 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3996 	rtap_len += 1;
3997 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3998 	rtap_len += 1;
3999 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4000 	rtap_len += 1;
4001 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4002 	rtap_len += 1;
4003 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4004 	rtap_len += 1;
4005 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4006 	rtap_len += 1;
4007 	put_unaligned_le16(rx_status->vht_flag_values6,
4008 			   &rtap_buf[rtap_len]);
4009 	rtap_len += 2;
4010 
4011 	return rtap_len;
4012 }
4013 
4014 /**
4015  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4016  * @rx_status: Pointer to rx_status.
4017  * @rtap_buf: buffer to which radiotap has to be updated
4018  * @rtap_len: radiotap length
4019  *
4020  * API update high-efficiency (11ax) fields in the radiotap header
4021  *
4022  * Return: length of rtap_len updated.
4023  */
4024 static unsigned int
4025 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4026 				     int8_t *rtap_buf, uint32_t rtap_len)
4027 {
4028 	/*
4029 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4030 	 * Enable all "known" HE radiotap flags for now
4031 	 */
4032 	rtap_len = qdf_align(rtap_len, 2);
4033 
4034 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4035 	rtap_len += 2;
4036 
4037 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4038 	rtap_len += 2;
4039 
4040 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4041 	rtap_len += 2;
4042 
4043 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4044 	rtap_len += 2;
4045 
4046 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4047 	rtap_len += 2;
4048 
4049 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4050 	rtap_len += 2;
4051 	qdf_rl_debug("he data %x %x %x %x %x %x",
4052 		     rx_status->he_data1,
4053 		     rx_status->he_data2, rx_status->he_data3,
4054 		     rx_status->he_data4, rx_status->he_data5,
4055 		     rx_status->he_data6);
4056 	return rtap_len;
4057 }
4058 
4059 
4060 /**
4061  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4062  * @rx_status: Pointer to rx_status.
4063  * @rtap_buf: buffer to which radiotap has to be updated
4064  * @rtap_len: radiotap length
4065  *
4066  * API update HE-MU fields in the radiotap header
4067  *
4068  * Return: length of rtap_len updated.
4069  */
4070 static unsigned int
4071 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4072 				     int8_t *rtap_buf, uint32_t rtap_len)
4073 {
4074 	rtap_len = qdf_align(rtap_len, 2);
4075 
4076 	/*
4077 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4078 	 * Enable all "known" he-mu radiotap flags for now
4079 	 */
4080 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4081 	rtap_len += 2;
4082 
4083 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4084 	rtap_len += 2;
4085 
4086 	rtap_buf[rtap_len] = rx_status->he_RU[0];
4087 	rtap_len += 1;
4088 
4089 	rtap_buf[rtap_len] = rx_status->he_RU[1];
4090 	rtap_len += 1;
4091 
4092 	rtap_buf[rtap_len] = rx_status->he_RU[2];
4093 	rtap_len += 1;
4094 
4095 	rtap_buf[rtap_len] = rx_status->he_RU[3];
4096 	rtap_len += 1;
4097 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4098 		  rx_status->he_flags1,
4099 		  rx_status->he_flags2, rx_status->he_RU[0],
4100 		  rx_status->he_RU[1], rx_status->he_RU[2],
4101 		  rx_status->he_RU[3]);
4102 
4103 	return rtap_len;
4104 }
4105 
4106 /**
4107  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4108  * @rx_status: Pointer to rx_status.
4109  * @rtap_buf: buffer to which radiotap has to be updated
4110  * @rtap_len: radiotap length
4111  *
4112  * API update he-mu-other fields in the radiotap header
4113  *
4114  * Return: length of rtap_len updated.
4115  */
4116 static unsigned int
4117 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4118 				     int8_t *rtap_buf, uint32_t rtap_len)
4119 {
4120 	rtap_len = qdf_align(rtap_len, 2);
4121 
4122 	/*
4123 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4124 	 * Enable all "known" he-mu-other radiotap flags for now
4125 	 */
4126 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
4127 	rtap_len += 2;
4128 
4129 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
4130 	rtap_len += 2;
4131 
4132 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
4133 	rtap_len += 1;
4134 
4135 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4136 	rtap_len += 1;
4137 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4138 		  rx_status->he_per_user_1,
4139 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4140 		  rx_status->he_per_user_known);
4141 	return rtap_len;
4142 }
4143 
4144 #define IEEE80211_RADIOTAP_TX_STATUS 0
4145 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
4146 
4147 /**
4148  * This is the length for radiotap, combined length
4149  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
4150  * cannot be more than available headroom_sz.
4151  * increase this when we add more radiotap elements.
4152  * Number after '+' indicates maximum possible increase due to alignment
4153  */
4154 
4155 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
4156 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4157 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4158 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4159 #define RADIOTAP_FIXED_HEADER_LEN 17
4160 #define RADIOTAP_HT_FLAGS_LEN 3
4161 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4162 #define RADIOTAP_VENDOR_NS_LEN \
4163 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4164 /* This is Radio Tap Header Extension Length.
4165  * 4 Bytes for Extended it_present bit map +
4166  * 4 bytes padding for alignment
4167  */
4168 #define RADIOTAP_HEADER_EXT_LEN (2 * sizeof(uint32_t))
4169 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4170 				RADIOTAP_FIXED_HEADER_LEN + \
4171 				RADIOTAP_HT_FLAGS_LEN + \
4172 				RADIOTAP_VHT_FLAGS_LEN + \
4173 				RADIOTAP_AMPDU_STATUS_LEN + \
4174 				RADIOTAP_HE_FLAGS_LEN + \
4175 				RADIOTAP_HE_MU_FLAGS_LEN + \
4176 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4177 				RADIOTAP_VENDOR_NS_LEN + \
4178 				RADIOTAP_HEADER_EXT_LEN)
4179 
4180 #define IEEE80211_RADIOTAP_HE 23
4181 #define IEEE80211_RADIOTAP_HE_MU	24
4182 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4183 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4184 
4185 /**
4186  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4187  * @rx_status: Pointer to rx_status.
4188  * @rtap_buf: Buf to which AMPDU info has to be updated.
4189  * @rtap_len: Current length of radiotap buffer
4190  *
4191  * Return: Length of radiotap after AMPDU flags updated.
4192  */
4193 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4194 					struct mon_rx_status *rx_status,
4195 					uint8_t *rtap_buf,
4196 					uint32_t rtap_len)
4197 {
4198 	/*
4199 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4200 	 * First 32 bits of AMPDU represents the reference number
4201 	 */
4202 
4203 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4204 	uint16_t ampdu_flags = 0;
4205 	uint16_t ampdu_reserved_flags = 0;
4206 
4207 	rtap_len = qdf_align(rtap_len, 4);
4208 
4209 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4210 	rtap_len += 4;
4211 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4212 	rtap_len += 2;
4213 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4214 	rtap_len += 2;
4215 
4216 	return rtap_len;
4217 }
4218 
4219 /**
4220  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4221  * @rx_status: Pointer to rx_status.
4222  * @nbuf:      nbuf pointer to which radiotap has to be updated
4223  * @headroom_sz: Available headroom size.
4224  *
4225  * Return: length of rtap_len updated.
4226  */
4227 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4228 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4229 {
4230 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4231 	struct ieee80211_radiotap_header *rthdr =
4232 		(struct ieee80211_radiotap_header *)rtap_buf;
4233 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4234 	uint32_t rtap_len = rtap_hdr_len;
4235 	uint8_t length = rtap_len;
4236 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4237 	uint32_t *rtap_ext = NULL;
4238 
4239 	/* Adding Extended Header space */
4240 	if (rx_status->add_rtap_ext) {
4241 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
4242 		rtap_len = rtap_hdr_len;
4243 	}
4244 	length = rtap_len;
4245 
4246 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4247 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4248 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4249 	rtap_len += 8;
4250 
4251 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4252 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4253 
4254 	if (rx_status->rs_fcs_err)
4255 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4256 
4257 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4258 	rtap_len += 1;
4259 
4260 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4261 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4262 	    !rx_status->he_flags) {
4263 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4264 		rtap_buf[rtap_len] = rx_status->rate;
4265 	} else
4266 		rtap_buf[rtap_len] = 0;
4267 	rtap_len += 1;
4268 
4269 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4270 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4271 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4272 	rtap_len += 2;
4273 	/* Channel flags. */
4274 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
4275 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4276 	else
4277 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4278 	if (rx_status->cck_flag)
4279 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4280 	if (rx_status->ofdm_flag)
4281 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4282 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4283 	rtap_len += 2;
4284 
4285 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4286 	 *					(dBm)
4287 	 */
4288 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4289 	/*
4290 	 * rssi_comb is int dB, need to convert it to dBm.
4291 	 * normalize value to noise floor of -96 dBm
4292 	 */
4293 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4294 	rtap_len += 1;
4295 
4296 	/* RX signal noise floor */
4297 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4298 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4299 	rtap_len += 1;
4300 
4301 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4302 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4303 	rtap_buf[rtap_len] = rx_status->nr_ant;
4304 	rtap_len += 1;
4305 
4306 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4307 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4308 		return 0;
4309 	}
4310 
4311 	if (rx_status->ht_flags) {
4312 		length = rtap_len;
4313 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4314 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4315 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4316 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4317 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4318 		rtap_len += 1;
4319 
4320 		if (rx_status->sgi)
4321 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4322 		if (rx_status->bw)
4323 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4324 		else
4325 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4326 		rtap_len += 1;
4327 
4328 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4329 		rtap_len += 1;
4330 
4331 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4332 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4333 			return 0;
4334 		}
4335 	}
4336 
4337 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4338 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4339 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4340 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4341 								rtap_buf,
4342 								rtap_len);
4343 	}
4344 
4345 	if (rx_status->vht_flags) {
4346 		length = rtap_len;
4347 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4348 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4349 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4350 								rtap_buf,
4351 								rtap_len);
4352 
4353 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4354 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4355 			return 0;
4356 		}
4357 	}
4358 
4359 	if (rx_status->he_flags) {
4360 		length = rtap_len;
4361 		/* IEEE80211_RADIOTAP_HE */
4362 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4363 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4364 								rtap_buf,
4365 								rtap_len);
4366 
4367 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4368 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4369 			return 0;
4370 		}
4371 	}
4372 
4373 	if (rx_status->he_mu_flags) {
4374 		length = rtap_len;
4375 		/* IEEE80211_RADIOTAP_HE-MU */
4376 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4377 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4378 								rtap_buf,
4379 								rtap_len);
4380 
4381 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4382 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4383 			return 0;
4384 		}
4385 	}
4386 
4387 	if (rx_status->he_mu_other_flags) {
4388 		length = rtap_len;
4389 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4390 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4391 		rtap_len =
4392 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4393 								rtap_buf,
4394 								rtap_len);
4395 
4396 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4397 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4398 			return 0;
4399 		}
4400 	}
4401 
4402 	rtap_len = qdf_align(rtap_len, 2);
4403 	/*
4404 	 * Radiotap Vendor Namespace
4405 	 */
4406 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4407 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4408 					(rtap_buf + rtap_len);
4409 	/*
4410 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4411 	 */
4412 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4413 	/*
4414 	 * Name space selector = 0
4415 	 * We only will have one namespace for now
4416 	 */
4417 	radiotap_vendor_ns_ath->hdr.selector = 0;
4418 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4419 					sizeof(*radiotap_vendor_ns_ath) -
4420 					sizeof(radiotap_vendor_ns_ath->hdr));
4421 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4422 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4423 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4424 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4425 				cpu_to_le32(rx_status->ppdu_timestamp);
4426 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4427 
4428 	/* Add Extension to Radiotap Header & corresponding data */
4429 	if (rx_status->add_rtap_ext) {
4430 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_EXT);
4431 		rtap_ext = (uint32_t *)&rthdr->it_present;
4432 		rtap_ext++;
4433 		*rtap_ext = cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_STATUS);
4434 		*rtap_ext |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RETRY_COUNT);
4435 
4436 		rtap_buf[rtap_len] = rx_status->tx_status;
4437 		rtap_len += 1;
4438 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
4439 		rtap_len += 1;
4440 	}
4441 
4442 	rthdr->it_len = cpu_to_le16(rtap_len);
4443 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4444 
4445 	if (headroom_sz < rtap_len) {
4446 		qdf_err("ERROR: not enough space to update radiotap");
4447 		return 0;
4448 	}
4449 	qdf_nbuf_push_head(nbuf, rtap_len);
4450 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4451 	return rtap_len;
4452 }
4453 #else
4454 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4455 					struct mon_rx_status *rx_status,
4456 					int8_t *rtap_buf,
4457 					uint32_t rtap_len)
4458 {
4459 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4460 	return 0;
4461 }
4462 
4463 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4464 				      int8_t *rtap_buf, uint32_t rtap_len)
4465 {
4466 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4467 	return 0;
4468 }
4469 
4470 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4471 					struct mon_rx_status *rx_status,
4472 					uint8_t *rtap_buf,
4473 					uint32_t rtap_len)
4474 {
4475 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4476 	return 0;
4477 }
4478 
4479 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4480 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4481 {
4482 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4483 	return 0;
4484 }
4485 #endif
4486 qdf_export_symbol(qdf_nbuf_update_radiotap);
4487 
4488 /**
4489  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4490  * @cb_func_ptr: function pointer to the nbuf free callback
4491  *
4492  * This function registers a callback function for nbuf free.
4493  *
4494  * Return: none
4495  */
4496 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4497 {
4498 	nbuf_free_cb = cb_func_ptr;
4499 }
4500 
4501 /**
4502  * qdf_nbuf_classify_pkt() - classify packet
4503  * @skb - sk buff
4504  *
4505  * Return: none
4506  */
4507 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4508 {
4509 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4510 
4511 	/* check destination mac address is broadcast/multicast */
4512 	if (is_broadcast_ether_addr((uint8_t *)eh))
4513 		QDF_NBUF_CB_SET_BCAST(skb);
4514 	else if (is_multicast_ether_addr((uint8_t *)eh))
4515 		QDF_NBUF_CB_SET_MCAST(skb);
4516 
4517 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4518 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4519 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4520 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4521 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4522 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4523 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4524 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4525 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4526 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4527 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4528 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4529 }
4530 qdf_export_symbol(qdf_nbuf_classify_pkt);
4531 
4532 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4533 {
4534 	qdf_nbuf_users_set(&nbuf->users, 1);
4535 	nbuf->data = nbuf->head + NET_SKB_PAD;
4536 	skb_reset_tail_pointer(nbuf);
4537 }
4538 qdf_export_symbol(__qdf_nbuf_init);
4539 
4540 #ifdef WLAN_FEATURE_FASTPATH
4541 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4542 {
4543 	qdf_nbuf_users_set(&nbuf->users, 1);
4544 	nbuf->data = nbuf->head + NET_SKB_PAD;
4545 	skb_reset_tail_pointer(nbuf);
4546 }
4547 qdf_export_symbol(qdf_nbuf_init_fast);
4548 #endif /* WLAN_FEATURE_FASTPATH */
4549 
4550 
4551 #ifdef QDF_NBUF_GLOBAL_COUNT
4552 /**
4553  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4554  *
4555  * Return void
4556  */
4557 void __qdf_nbuf_mod_init(void)
4558 {
4559 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
4560 	qdf_atomic_init(&nbuf_count);
4561 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4562 }
4563 
4564 /**
4565  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4566  *
4567  * Return void
4568  */
4569 void __qdf_nbuf_mod_exit(void)
4570 {
4571 }
4572 #endif
4573