xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision a32b831887c9adafcbdfd4f312e90624082a4809)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define FREQ_MULTIPLIER_CONST_5MHZ 5
88 #define FREQ_MULTIPLIER_CONST_20MHZ 20
89 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
90 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
91 #define RADIOTAP_CCK_CHANNEL 0x0020
92 #define RADIOTAP_OFDM_CHANNEL 0x0040
93 
94 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
95 #include <qdf_mc_timer.h>
96 
97 struct qdf_track_timer {
98 	qdf_mc_timer_t track_timer;
99 	qdf_atomic_t alloc_fail_cnt;
100 };
101 
102 static struct qdf_track_timer alloc_track_timer;
103 
104 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
105 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
106 #endif
107 
108 /* Packet Counter */
109 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
110 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
111 #ifdef QDF_NBUF_GLOBAL_COUNT
112 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
113 static qdf_atomic_t nbuf_count;
114 #endif
115 
116 /**
117  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
118  *
119  * Return: none
120  */
121 void qdf_nbuf_tx_desc_count_display(void)
122 {
123 	qdf_debug("Current Snapshot of the Driver:");
124 	qdf_debug("Data Packets:");
125 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
126 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
127 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
129 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
136 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
144 	qdf_debug("Mgmt Packets:");
145 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
146 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
147 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
159 }
160 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
161 
162 /**
163  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
164  * @packet_type   : packet type either mgmt/data
165  * @current_state : layer at which the packet currently present
166  *
167  * Return: none
168  */
169 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
170 			uint8_t current_state)
171 {
172 	switch (packet_type) {
173 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
174 		nbuf_tx_mgmt[current_state]++;
175 		break;
176 	case QDF_NBUF_TX_PKT_DATA_TRACK:
177 		nbuf_tx_data[current_state]++;
178 		break;
179 	default:
180 		break;
181 	}
182 }
183 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
184 
185 /**
186  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
187  *
188  * Return: none
189  */
190 void qdf_nbuf_tx_desc_count_clear(void)
191 {
192 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
193 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
194 }
195 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
196 
197 /**
198  * qdf_nbuf_set_state() - Updates the packet state
199  * @nbuf:            network buffer
200  * @current_state :  layer at which the packet currently is
201  *
202  * This function updates the packet state to the layer at which the packet
203  * currently is
204  *
205  * Return: none
206  */
207 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
208 {
209 	/*
210 	 * Only Mgmt, Data Packets are tracked. WMI messages
211 	 * such as scan commands are not tracked
212 	 */
213 	uint8_t packet_type;
214 
215 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
216 
217 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
218 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
219 		return;
220 	}
221 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
222 	qdf_nbuf_tx_desc_count_update(packet_type,
223 					current_state);
224 }
225 qdf_export_symbol(qdf_nbuf_set_state);
226 
227 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
228 /**
229  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
230  *
231  * This function starts the alloc fail replenish timer.
232  *
233  * Return: void
234  */
235 static void __qdf_nbuf_start_replenish_timer(void)
236 {
237 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
238 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
239 	    QDF_TIMER_STATE_RUNNING)
240 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
241 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
242 }
243 
244 /**
245  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
246  *
247  * This function stops the alloc fail replenish timer.
248  *
249  * Return: void
250  */
251 static void __qdf_nbuf_stop_replenish_timer(void)
252 {
253 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
254 		return;
255 
256 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
257 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
258 	    QDF_TIMER_STATE_RUNNING)
259 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
260 }
261 
262 /**
263  * qdf_replenish_expire_handler - Replenish expire handler
264  *
265  * This function triggers when the alloc fail replenish timer expires.
266  *
267  * Return: void
268  */
269 static void qdf_replenish_expire_handler(void *arg)
270 {
271 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
272 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
273 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
274 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
275 
276 		/* Error handling here */
277 	}
278 }
279 
280 /**
281  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
282  *
283  * This function initializes the nbuf alloc fail replenish timer.
284  *
285  * Return: void
286  */
287 void __qdf_nbuf_init_replenish_timer(void)
288 {
289 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
290 			  qdf_replenish_expire_handler, NULL);
291 }
292 
293 /**
294  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
295  *
296  * This function deinitializes the nbuf alloc fail replenish timer.
297  *
298  * Return: void
299  */
300 void __qdf_nbuf_deinit_replenish_timer(void)
301 {
302 	__qdf_nbuf_stop_replenish_timer();
303 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
304 }
305 #else
306 
307 static inline void __qdf_nbuf_start_replenish_timer(void) {}
308 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
309 #endif
310 
311 /* globals do not need to be initialized to NULL/0 */
312 qdf_nbuf_trace_update_t qdf_trace_update_cb;
313 qdf_nbuf_free_t nbuf_free_cb;
314 
315 #ifdef QDF_NBUF_GLOBAL_COUNT
316 
317 /**
318  * __qdf_nbuf_count_get() - get nbuf global count
319  *
320  * Return: nbuf global count
321  */
322 int __qdf_nbuf_count_get(void)
323 {
324 	return qdf_atomic_read(&nbuf_count);
325 }
326 qdf_export_symbol(__qdf_nbuf_count_get);
327 
328 /**
329  * __qdf_nbuf_count_inc() - increment nbuf global count
330  *
331  * @buf: sk buff
332  *
333  * Return: void
334  */
335 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
336 {
337 	int num_nbuf = 1;
338 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(nbuf);
339 
340 	/* Take care to account for frag_list */
341 	while (ext_list) {
342 		++num_nbuf;
343 		ext_list = qdf_nbuf_queue_next(ext_list);
344 	}
345 
346 	qdf_atomic_add(num_nbuf, &nbuf_count);
347 }
348 qdf_export_symbol(__qdf_nbuf_count_inc);
349 
350 /**
351  * __qdf_nbuf_count_dec() - decrement nbuf global count
352  *
353  * @buf: sk buff
354  *
355  * Return: void
356  */
357 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
358 {
359 	qdf_nbuf_t ext_list;
360 	int num_nbuf;
361 
362 	if (qdf_nbuf_get_users(nbuf) > 1)
363 		return;
364 
365 	num_nbuf = 1;
366 
367 	/* Take care to account for frag_list */
368 	ext_list = qdf_nbuf_get_ext_list(nbuf);
369 	while (ext_list) {
370 		if (qdf_nbuf_get_users(ext_list) == 1)
371 			++num_nbuf;
372 		ext_list = qdf_nbuf_queue_next(ext_list);
373 	}
374 
375 	qdf_atomic_sub(num_nbuf, &nbuf_count);
376 }
377 qdf_export_symbol(__qdf_nbuf_count_dec);
378 #endif
379 
380 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) && \
381 	!defined(QCA_WIFI_QCN9000)
382 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
383 				 int align, int prio, const char *func,
384 				 uint32_t line)
385 {
386 	struct sk_buff *skb;
387 	unsigned long offset;
388 	uint32_t lowmem_alloc_tries = 0;
389 
390 	if (align)
391 		size += (align - 1);
392 
393 realloc:
394 	skb = dev_alloc_skb(size);
395 
396 	if (skb)
397 		goto skb_alloc;
398 
399 	skb = pld_nbuf_pre_alloc(size);
400 
401 	if (!skb) {
402 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
403 				size, func, line);
404 		return NULL;
405 	}
406 
407 skb_alloc:
408 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
409 	 * Though we are trying to reserve low memory upfront to prevent this,
410 	 * we sometimes see SKBs allocated from low memory.
411 	 */
412 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
413 		lowmem_alloc_tries++;
414 		if (lowmem_alloc_tries > 100) {
415 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
416 				     size, func, line);
417 			return NULL;
418 		} else {
419 			/* Not freeing to make sure it
420 			 * will not get allocated again
421 			 */
422 			goto realloc;
423 		}
424 	}
425 	memset(skb->cb, 0x0, sizeof(skb->cb));
426 
427 	/*
428 	 * The default is for netbuf fragments to be interpreted
429 	 * as wordstreams rather than bytestreams.
430 	 */
431 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
432 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
433 
434 	/*
435 	 * XXX:how about we reserve first then align
436 	 * Align & make sure that the tail & data are adjusted properly
437 	 */
438 
439 	if (align) {
440 		offset = ((unsigned long)skb->data) % align;
441 		if (offset)
442 			skb_reserve(skb, align - offset);
443 	}
444 
445 	/*
446 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
447 	 * pointer
448 	 */
449 	skb_reserve(skb, reserve);
450 	qdf_nbuf_count_inc(skb);
451 
452 	return skb;
453 }
454 #else
455 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
456 				 int align, int prio, const char *func,
457 				 uint32_t line)
458 {
459 	struct sk_buff *skb;
460 	unsigned long offset;
461 	int flags = GFP_KERNEL;
462 
463 	if (align)
464 		size += (align - 1);
465 
466 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
467 		flags = GFP_ATOMIC;
468 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
469 		/*
470 		 * Observed that kcompactd burns out CPU to make order-3 page.
471 		 *__netdev_alloc_skb has 4k page fallback option just in case of
472 		 * failing high order page allocation so we don't need to be
473 		 * hard. Make kcompactd rest in piece.
474 		 */
475 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
476 #endif
477 	}
478 
479 	skb = __netdev_alloc_skb(NULL, size, flags);
480 
481 	if (skb)
482 		goto skb_alloc;
483 
484 	skb = pld_nbuf_pre_alloc(size);
485 
486 	if (!skb) {
487 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
488 				size, func, line);
489 		__qdf_nbuf_start_replenish_timer();
490 		return NULL;
491 	} else {
492 		__qdf_nbuf_stop_replenish_timer();
493 	}
494 
495 skb_alloc:
496 	memset(skb->cb, 0x0, sizeof(skb->cb));
497 
498 	/*
499 	 * The default is for netbuf fragments to be interpreted
500 	 * as wordstreams rather than bytestreams.
501 	 */
502 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
503 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
504 
505 	/*
506 	 * XXX:how about we reserve first then align
507 	 * Align & make sure that the tail & data are adjusted properly
508 	 */
509 
510 	if (align) {
511 		offset = ((unsigned long)skb->data) % align;
512 		if (offset)
513 			skb_reserve(skb, align - offset);
514 	}
515 
516 	/*
517 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
518 	 * pointer
519 	 */
520 	skb_reserve(skb, reserve);
521 	qdf_nbuf_count_inc(skb);
522 
523 	return skb;
524 }
525 #endif
526 qdf_export_symbol(__qdf_nbuf_alloc);
527 
528 /**
529  * __qdf_nbuf_free() - free the nbuf its interrupt safe
530  * @skb: Pointer to network buffer
531  *
532  * Return: none
533  */
534 
535 void __qdf_nbuf_free(struct sk_buff *skb)
536 {
537 	if (pld_nbuf_pre_alloc_free(skb))
538 		return;
539 
540 	qdf_nbuf_count_dec(skb);
541 	if (nbuf_free_cb)
542 		nbuf_free_cb(skb);
543 	else
544 		dev_kfree_skb_any(skb);
545 }
546 
547 qdf_export_symbol(__qdf_nbuf_free);
548 
549 #ifdef NBUF_MEMORY_DEBUG
550 enum qdf_nbuf_event_type {
551 	QDF_NBUF_ALLOC,
552 	QDF_NBUF_ALLOC_CLONE,
553 	QDF_NBUF_ALLOC_COPY,
554 	QDF_NBUF_ALLOC_FAILURE,
555 	QDF_NBUF_FREE,
556 	QDF_NBUF_MAP,
557 	QDF_NBUF_UNMAP,
558 };
559 
560 struct qdf_nbuf_event {
561 	qdf_nbuf_t nbuf;
562 	char func[QDF_MEM_FUNC_NAME_SIZE];
563 	uint32_t line;
564 	enum qdf_nbuf_event_type type;
565 	uint64_t timestamp;
566 };
567 
568 #define QDF_NBUF_HISTORY_SIZE 4096
569 static qdf_atomic_t qdf_nbuf_history_index;
570 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
571 
572 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
573 {
574 	int32_t next = qdf_atomic_inc_return(index);
575 
576 	if (next == size)
577 		qdf_atomic_sub(size, index);
578 
579 	return next % size;
580 }
581 
582 static void
583 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
584 		     enum qdf_nbuf_event_type type)
585 {
586 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
587 						   QDF_NBUF_HISTORY_SIZE);
588 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
589 
590 	event->nbuf = nbuf;
591 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
592 	event->line = line;
593 	event->type = type;
594 	event->timestamp = qdf_get_log_timestamp();
595 }
596 #endif /* NBUF_MEMORY_DEBUG */
597 
598 #ifdef NBUF_MAP_UNMAP_DEBUG
599 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
600 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
601 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
602 
603 static void qdf_nbuf_map_tracking_init(void)
604 {
605 	qdf_tracker_init(&qdf_nbuf_map_tracker);
606 }
607 
608 static void qdf_nbuf_map_tracking_deinit(void)
609 {
610 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
611 }
612 
613 static QDF_STATUS
614 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
615 {
616 	QDF_STATUS status;
617 
618 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
619 	if (QDF_IS_STATUS_ERROR(status))
620 		return status;
621 
622 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
623 
624 	return QDF_STATUS_SUCCESS;
625 }
626 
627 static void
628 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
629 {
630 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
631 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
632 }
633 
634 void qdf_nbuf_map_check_for_leaks(void)
635 {
636 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
637 }
638 
639 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
640 			      qdf_nbuf_t buf,
641 			      qdf_dma_dir_t dir,
642 			      const char *func,
643 			      uint32_t line)
644 {
645 	QDF_STATUS status;
646 
647 	status = qdf_nbuf_track_map(buf, func, line);
648 	if (QDF_IS_STATUS_ERROR(status))
649 		return status;
650 
651 	status = __qdf_nbuf_map(osdev, buf, dir);
652 	if (QDF_IS_STATUS_ERROR(status))
653 		qdf_nbuf_untrack_map(buf, func, line);
654 
655 	return status;
656 }
657 
658 qdf_export_symbol(qdf_nbuf_map_debug);
659 
660 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
661 			  qdf_nbuf_t buf,
662 			  qdf_dma_dir_t dir,
663 			  const char *func,
664 			  uint32_t line)
665 {
666 	qdf_nbuf_untrack_map(buf, func, line);
667 	__qdf_nbuf_unmap_single(osdev, buf, dir);
668 }
669 
670 qdf_export_symbol(qdf_nbuf_unmap_debug);
671 
672 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
673 				     qdf_nbuf_t buf,
674 				     qdf_dma_dir_t dir,
675 				     const char *func,
676 				     uint32_t line)
677 {
678 	QDF_STATUS status;
679 
680 	status = qdf_nbuf_track_map(buf, func, line);
681 	if (QDF_IS_STATUS_ERROR(status))
682 		return status;
683 
684 	status = __qdf_nbuf_map_single(osdev, buf, dir);
685 	if (QDF_IS_STATUS_ERROR(status))
686 		qdf_nbuf_untrack_map(buf, func, line);
687 
688 	return status;
689 }
690 
691 qdf_export_symbol(qdf_nbuf_map_single_debug);
692 
693 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
694 				 qdf_nbuf_t buf,
695 				 qdf_dma_dir_t dir,
696 				 const char *func,
697 				 uint32_t line)
698 {
699 	qdf_nbuf_untrack_map(buf, func, line);
700 	__qdf_nbuf_unmap_single(osdev, buf, dir);
701 }
702 
703 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
704 
705 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
706 				     qdf_nbuf_t buf,
707 				     qdf_dma_dir_t dir,
708 				     int nbytes,
709 				     const char *func,
710 				     uint32_t line)
711 {
712 	QDF_STATUS status;
713 
714 	status = qdf_nbuf_track_map(buf, func, line);
715 	if (QDF_IS_STATUS_ERROR(status))
716 		return status;
717 
718 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
719 	if (QDF_IS_STATUS_ERROR(status))
720 		qdf_nbuf_untrack_map(buf, func, line);
721 
722 	return status;
723 }
724 
725 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
726 
727 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
728 				 qdf_nbuf_t buf,
729 				 qdf_dma_dir_t dir,
730 				 int nbytes,
731 				 const char *func,
732 				 uint32_t line)
733 {
734 	qdf_nbuf_untrack_map(buf, func, line);
735 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
736 }
737 
738 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
739 
740 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
741 					    qdf_nbuf_t buf,
742 					    qdf_dma_dir_t dir,
743 					    int nbytes,
744 					    const char *func,
745 					    uint32_t line)
746 {
747 	QDF_STATUS status;
748 
749 	status = qdf_nbuf_track_map(buf, func, line);
750 	if (QDF_IS_STATUS_ERROR(status))
751 		return status;
752 
753 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
754 	if (QDF_IS_STATUS_ERROR(status))
755 		qdf_nbuf_untrack_map(buf, func, line);
756 
757 	return status;
758 }
759 
760 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
761 
762 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
763 					qdf_nbuf_t buf,
764 					qdf_dma_dir_t dir,
765 					int nbytes,
766 					const char *func,
767 					uint32_t line)
768 {
769 	qdf_nbuf_untrack_map(buf, func, line);
770 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
771 }
772 
773 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
774 
775 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
776 					     const char *func,
777 					     uint32_t line)
778 {
779 	char map_func[QDF_TRACKER_FUNC_SIZE];
780 	uint32_t map_line;
781 
782 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
783 				&map_func, &map_line))
784 		return;
785 
786 	QDF_DEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
787 			func, line, map_func, map_line);
788 }
789 #else
790 static inline void qdf_nbuf_map_tracking_init(void)
791 {
792 }
793 
794 static inline void qdf_nbuf_map_tracking_deinit(void)
795 {
796 }
797 
798 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
799 						    const char *func,
800 						    uint32_t line)
801 {
802 }
803 #endif /* NBUF_MAP_UNMAP_DEBUG */
804 
805 /**
806  * __qdf_nbuf_map() - map a buffer to local bus address space
807  * @osdev: OS device
808  * @bmap: Bitmap
809  * @skb: Pointer to network buffer
810  * @dir: Direction
811  *
812  * Return: QDF_STATUS
813  */
814 #ifdef QDF_OS_DEBUG
815 QDF_STATUS
816 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
817 {
818 	struct skb_shared_info *sh = skb_shinfo(skb);
819 
820 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
821 			|| (dir == QDF_DMA_FROM_DEVICE));
822 
823 	/*
824 	 * Assume there's only a single fragment.
825 	 * To support multiple fragments, it would be necessary to change
826 	 * qdf_nbuf_t to be a separate object that stores meta-info
827 	 * (including the bus address for each fragment) and a pointer
828 	 * to the underlying sk_buff.
829 	 */
830 	qdf_assert(sh->nr_frags == 0);
831 
832 	return __qdf_nbuf_map_single(osdev, skb, dir);
833 }
834 qdf_export_symbol(__qdf_nbuf_map);
835 
836 #else
837 QDF_STATUS
838 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
839 {
840 	return __qdf_nbuf_map_single(osdev, skb, dir);
841 }
842 qdf_export_symbol(__qdf_nbuf_map);
843 #endif
844 /**
845  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
846  * @osdev: OS device
847  * @skb: Pointer to network buffer
848  * @dir: dma direction
849  *
850  * Return: none
851  */
852 void
853 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
854 			qdf_dma_dir_t dir)
855 {
856 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
857 		   || (dir == QDF_DMA_FROM_DEVICE));
858 
859 	/*
860 	 * Assume there's a single fragment.
861 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
862 	 */
863 	__qdf_nbuf_unmap_single(osdev, skb, dir);
864 }
865 qdf_export_symbol(__qdf_nbuf_unmap);
866 
867 /**
868  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
869  * @osdev: OS device
870  * @skb: Pointer to network buffer
871  * @dir: Direction
872  *
873  * Return: QDF_STATUS
874  */
875 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
876 QDF_STATUS
877 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
878 {
879 	qdf_dma_addr_t paddr;
880 
881 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
882 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
883 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
884 	return QDF_STATUS_SUCCESS;
885 }
886 qdf_export_symbol(__qdf_nbuf_map_single);
887 #else
888 QDF_STATUS
889 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
890 {
891 	qdf_dma_addr_t paddr;
892 
893 	/* assume that the OS only provides a single fragment */
894 	QDF_NBUF_CB_PADDR(buf) = paddr =
895 		dma_map_single(osdev->dev, buf->data,
896 				skb_end_pointer(buf) - buf->data,
897 				__qdf_dma_dir_to_os(dir));
898 	return dma_mapping_error(osdev->dev, paddr)
899 		? QDF_STATUS_E_FAILURE
900 		: QDF_STATUS_SUCCESS;
901 }
902 qdf_export_symbol(__qdf_nbuf_map_single);
903 #endif
904 /**
905  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
906  * @osdev: OS device
907  * @skb: Pointer to network buffer
908  * @dir: Direction
909  *
910  * Return: none
911  */
912 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
913 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
914 				qdf_dma_dir_t dir)
915 {
916 }
917 #else
918 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
919 					qdf_dma_dir_t dir)
920 {
921 	if (QDF_NBUF_CB_PADDR(buf))
922 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
923 			skb_end_pointer(buf) - buf->data,
924 			__qdf_dma_dir_to_os(dir));
925 }
926 #endif
927 qdf_export_symbol(__qdf_nbuf_unmap_single);
928 
929 /**
930  * __qdf_nbuf_set_rx_cksum() - set rx checksum
931  * @skb: Pointer to network buffer
932  * @cksum: Pointer to checksum value
933  *
934  * Return: QDF_STATUS
935  */
936 QDF_STATUS
937 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
938 {
939 	switch (cksum->l4_result) {
940 	case QDF_NBUF_RX_CKSUM_NONE:
941 		skb->ip_summed = CHECKSUM_NONE;
942 		break;
943 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
944 		skb->ip_summed = CHECKSUM_UNNECESSARY;
945 		break;
946 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
947 		skb->ip_summed = CHECKSUM_PARTIAL;
948 		skb->csum = cksum->val;
949 		break;
950 	default:
951 		pr_err("Unknown checksum type\n");
952 		qdf_assert(0);
953 		return QDF_STATUS_E_NOSUPPORT;
954 	}
955 	return QDF_STATUS_SUCCESS;
956 }
957 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
958 
959 /**
960  * __qdf_nbuf_get_tx_cksum() - get tx checksum
961  * @skb: Pointer to network buffer
962  *
963  * Return: TX checksum value
964  */
965 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
966 {
967 	switch (skb->ip_summed) {
968 	case CHECKSUM_NONE:
969 		return QDF_NBUF_TX_CKSUM_NONE;
970 	case CHECKSUM_PARTIAL:
971 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
972 	case CHECKSUM_COMPLETE:
973 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
974 	default:
975 		return QDF_NBUF_TX_CKSUM_NONE;
976 	}
977 }
978 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
979 
980 /**
981  * __qdf_nbuf_get_tid() - get tid
982  * @skb: Pointer to network buffer
983  *
984  * Return: tid
985  */
986 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
987 {
988 	return skb->priority;
989 }
990 qdf_export_symbol(__qdf_nbuf_get_tid);
991 
992 /**
993  * __qdf_nbuf_set_tid() - set tid
994  * @skb: Pointer to network buffer
995  *
996  * Return: none
997  */
998 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
999 {
1000 	skb->priority = tid;
1001 }
1002 qdf_export_symbol(__qdf_nbuf_set_tid);
1003 
1004 /**
1005  * __qdf_nbuf_set_tid() - set tid
1006  * @skb: Pointer to network buffer
1007  *
1008  * Return: none
1009  */
1010 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1011 {
1012 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1013 }
1014 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1015 
1016 /**
1017  * __qdf_nbuf_reg_trace_cb() - register trace callback
1018  * @cb_func_ptr: Pointer to trace callback function
1019  *
1020  * Return: none
1021  */
1022 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1023 {
1024 	qdf_trace_update_cb = cb_func_ptr;
1025 }
1026 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1027 
1028 /**
1029  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1030  *              of DHCP packet.
1031  * @data: Pointer to DHCP packet data buffer
1032  *
1033  * This func. returns the subtype of DHCP packet.
1034  *
1035  * Return: subtype of the DHCP packet.
1036  */
1037 enum qdf_proto_subtype
1038 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1039 {
1040 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1041 
1042 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1043 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1044 					QDF_DHCP_OPTION53_LENGTH)) {
1045 
1046 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1047 		case QDF_DHCP_DISCOVER:
1048 			subtype = QDF_PROTO_DHCP_DISCOVER;
1049 			break;
1050 		case QDF_DHCP_REQUEST:
1051 			subtype = QDF_PROTO_DHCP_REQUEST;
1052 			break;
1053 		case QDF_DHCP_OFFER:
1054 			subtype = QDF_PROTO_DHCP_OFFER;
1055 			break;
1056 		case QDF_DHCP_ACK:
1057 			subtype = QDF_PROTO_DHCP_ACK;
1058 			break;
1059 		case QDF_DHCP_NAK:
1060 			subtype = QDF_PROTO_DHCP_NACK;
1061 			break;
1062 		case QDF_DHCP_RELEASE:
1063 			subtype = QDF_PROTO_DHCP_RELEASE;
1064 			break;
1065 		case QDF_DHCP_INFORM:
1066 			subtype = QDF_PROTO_DHCP_INFORM;
1067 			break;
1068 		case QDF_DHCP_DECLINE:
1069 			subtype = QDF_PROTO_DHCP_DECLINE;
1070 			break;
1071 		default:
1072 			break;
1073 		}
1074 	}
1075 
1076 	return subtype;
1077 }
1078 
1079 /**
1080  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1081  *            of EAPOL packet.
1082  * @data: Pointer to EAPOL packet data buffer
1083  *
1084  * This func. returns the subtype of EAPOL packet.
1085  *
1086  * Return: subtype of the EAPOL packet.
1087  */
1088 enum qdf_proto_subtype
1089 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1090 {
1091 	uint16_t eapol_key_info;
1092 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1093 	uint16_t mask;
1094 
1095 	eapol_key_info = (uint16_t)(*(uint16_t *)
1096 			(data + EAPOL_KEY_INFO_OFFSET));
1097 
1098 	mask = eapol_key_info & EAPOL_MASK;
1099 	switch (mask) {
1100 	case EAPOL_M1_BIT_MASK:
1101 		subtype = QDF_PROTO_EAPOL_M1;
1102 		break;
1103 	case EAPOL_M2_BIT_MASK:
1104 		subtype = QDF_PROTO_EAPOL_M2;
1105 		break;
1106 	case EAPOL_M3_BIT_MASK:
1107 		subtype = QDF_PROTO_EAPOL_M3;
1108 		break;
1109 	case EAPOL_M4_BIT_MASK:
1110 		subtype = QDF_PROTO_EAPOL_M4;
1111 		break;
1112 	default:
1113 		break;
1114 	}
1115 
1116 	return subtype;
1117 }
1118 
1119 /**
1120  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1121  *            of ARP packet.
1122  * @data: Pointer to ARP packet data buffer
1123  *
1124  * This func. returns the subtype of ARP packet.
1125  *
1126  * Return: subtype of the ARP packet.
1127  */
1128 enum qdf_proto_subtype
1129 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1130 {
1131 	uint16_t subtype;
1132 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1133 
1134 	subtype = (uint16_t)(*(uint16_t *)
1135 			(data + ARP_SUB_TYPE_OFFSET));
1136 
1137 	switch (QDF_SWAP_U16(subtype)) {
1138 	case ARP_REQUEST:
1139 		proto_subtype = QDF_PROTO_ARP_REQ;
1140 		break;
1141 	case ARP_RESPONSE:
1142 		proto_subtype = QDF_PROTO_ARP_RES;
1143 		break;
1144 	default:
1145 		break;
1146 	}
1147 
1148 	return proto_subtype;
1149 }
1150 
1151 /**
1152  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1153  *            of IPV4 ICMP packet.
1154  * @data: Pointer to IPV4 ICMP packet data buffer
1155  *
1156  * This func. returns the subtype of ICMP packet.
1157  *
1158  * Return: subtype of the ICMP packet.
1159  */
1160 enum qdf_proto_subtype
1161 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1162 {
1163 	uint8_t subtype;
1164 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1165 
1166 	subtype = (uint8_t)(*(uint8_t *)
1167 			(data + ICMP_SUBTYPE_OFFSET));
1168 
1169 	switch (subtype) {
1170 	case ICMP_REQUEST:
1171 		proto_subtype = QDF_PROTO_ICMP_REQ;
1172 		break;
1173 	case ICMP_RESPONSE:
1174 		proto_subtype = QDF_PROTO_ICMP_RES;
1175 		break;
1176 	default:
1177 		break;
1178 	}
1179 
1180 	return proto_subtype;
1181 }
1182 
1183 /**
1184  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1185  *            of IPV6 ICMPV6 packet.
1186  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1187  *
1188  * This func. returns the subtype of ICMPV6 packet.
1189  *
1190  * Return: subtype of the ICMPV6 packet.
1191  */
1192 enum qdf_proto_subtype
1193 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1194 {
1195 	uint8_t subtype;
1196 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1197 
1198 	subtype = (uint8_t)(*(uint8_t *)
1199 			(data + ICMPV6_SUBTYPE_OFFSET));
1200 
1201 	switch (subtype) {
1202 	case ICMPV6_REQUEST:
1203 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1204 		break;
1205 	case ICMPV6_RESPONSE:
1206 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1207 		break;
1208 	case ICMPV6_RS:
1209 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1210 		break;
1211 	case ICMPV6_RA:
1212 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1213 		break;
1214 	case ICMPV6_NS:
1215 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1216 		break;
1217 	case ICMPV6_NA:
1218 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1219 		break;
1220 	default:
1221 		break;
1222 	}
1223 
1224 	return proto_subtype;
1225 }
1226 
1227 /**
1228  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1229  *            of IPV4 packet.
1230  * @data: Pointer to IPV4 packet data buffer
1231  *
1232  * This func. returns the proto type of IPV4 packet.
1233  *
1234  * Return: proto type of IPV4 packet.
1235  */
1236 uint8_t
1237 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1238 {
1239 	uint8_t proto_type;
1240 
1241 	proto_type = (uint8_t)(*(uint8_t *)(data +
1242 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1243 	return proto_type;
1244 }
1245 
1246 /**
1247  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1248  *            of IPV6 packet.
1249  * @data: Pointer to IPV6 packet data buffer
1250  *
1251  * This func. returns the proto type of IPV6 packet.
1252  *
1253  * Return: proto type of IPV6 packet.
1254  */
1255 uint8_t
1256 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1257 {
1258 	uint8_t proto_type;
1259 
1260 	proto_type = (uint8_t)(*(uint8_t *)(data +
1261 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1262 	return proto_type;
1263 }
1264 
1265 /**
1266  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1267  * @data: Pointer to network data
1268  *
1269  * This api is for Tx packets.
1270  *
1271  * Return: true if packet is ipv4 packet
1272  *	   false otherwise
1273  */
1274 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1275 {
1276 	uint16_t ether_type;
1277 
1278 	ether_type = (uint16_t)(*(uint16_t *)(data +
1279 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1280 
1281 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1282 		return true;
1283 	else
1284 		return false;
1285 }
1286 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1287 
1288 /**
1289  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1290  * @data: Pointer to network data buffer
1291  *
1292  * This api is for ipv4 packet.
1293  *
1294  * Return: true if packet is DHCP packet
1295  *	   false otherwise
1296  */
1297 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1298 {
1299 	uint16_t sport;
1300 	uint16_t dport;
1301 
1302 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1303 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1304 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1305 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1306 					 sizeof(uint16_t)));
1307 
1308 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1309 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1310 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1311 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1312 		return true;
1313 	else
1314 		return false;
1315 }
1316 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1317 
1318 /**
1319  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1320  * @data: Pointer to network data buffer
1321  *
1322  * This api is for ipv4 packet.
1323  *
1324  * Return: true if packet is EAPOL packet
1325  *	   false otherwise.
1326  */
1327 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1328 {
1329 	uint16_t ether_type;
1330 
1331 	ether_type = (uint16_t)(*(uint16_t *)(data +
1332 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1333 
1334 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1335 		return true;
1336 	else
1337 		return false;
1338 }
1339 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1340 
1341 /**
1342  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1343  * @skb: Pointer to network buffer
1344  *
1345  * This api is for ipv4 packet.
1346  *
1347  * Return: true if packet is WAPI packet
1348  *	   false otherwise.
1349  */
1350 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1351 {
1352 	uint16_t ether_type;
1353 
1354 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1355 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1356 
1357 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1358 		return true;
1359 	else
1360 		return false;
1361 }
1362 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1363 
1364 /**
1365  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1366  * @skb: Pointer to network buffer
1367  *
1368  * This api is for ipv4 packet.
1369  *
1370  * Return: true if packet is tdls packet
1371  *	   false otherwise.
1372  */
1373 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1374 {
1375 	uint16_t ether_type;
1376 
1377 	ether_type = *(uint16_t *)(skb->data +
1378 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1379 
1380 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1381 		return true;
1382 	else
1383 		return false;
1384 }
1385 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1386 
1387 /**
1388  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1389  * @data: Pointer to network data buffer
1390  *
1391  * This api is for ipv4 packet.
1392  *
1393  * Return: true if packet is ARP packet
1394  *	   false otherwise.
1395  */
1396 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1397 {
1398 	uint16_t ether_type;
1399 
1400 	ether_type = (uint16_t)(*(uint16_t *)(data +
1401 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1402 
1403 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1404 		return true;
1405 	else
1406 		return false;
1407 }
1408 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1409 
1410 /**
1411  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1412  * @data: Pointer to network data buffer
1413  *
1414  * This api is for ipv4 packet.
1415  *
1416  * Return: true if packet is ARP request
1417  *	   false otherwise.
1418  */
1419 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1420 {
1421 	uint16_t op_code;
1422 
1423 	op_code = (uint16_t)(*(uint16_t *)(data +
1424 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1425 
1426 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1427 		return true;
1428 	return false;
1429 }
1430 
1431 /**
1432  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1433  * @data: Pointer to network data buffer
1434  *
1435  * This api is for ipv4 packet.
1436  *
1437  * Return: true if packet is ARP response
1438  *	   false otherwise.
1439  */
1440 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1441 {
1442 	uint16_t op_code;
1443 
1444 	op_code = (uint16_t)(*(uint16_t *)(data +
1445 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1446 
1447 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1448 		return true;
1449 	return false;
1450 }
1451 
1452 /**
1453  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1454  * @data: Pointer to network data buffer
1455  *
1456  * This api is for ipv4 packet.
1457  *
1458  * Return: ARP packet source IP value.
1459  */
1460 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1461 {
1462 	uint32_t src_ip;
1463 
1464 	src_ip = (uint32_t)(*(uint32_t *)(data +
1465 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1466 
1467 	return src_ip;
1468 }
1469 
1470 /**
1471  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1472  * @data: Pointer to network data buffer
1473  *
1474  * This api is for ipv4 packet.
1475  *
1476  * Return: ARP packet target IP value.
1477  */
1478 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1479 {
1480 	uint32_t tgt_ip;
1481 
1482 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1483 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1484 
1485 	return tgt_ip;
1486 }
1487 
1488 /**
1489  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1490  * @data: Pointer to network data buffer
1491  * @len: length to copy
1492  *
1493  * This api is for dns domain name
1494  *
1495  * Return: dns domain name.
1496  */
1497 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1498 {
1499 	uint8_t *domain_name;
1500 
1501 	domain_name = (uint8_t *)
1502 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1503 	return domain_name;
1504 }
1505 
1506 
1507 /**
1508  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1509  * @data: Pointer to network data buffer
1510  *
1511  * This api is for dns query packet.
1512  *
1513  * Return: true if packet is dns query packet.
1514  *	   false otherwise.
1515  */
1516 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1517 {
1518 	uint16_t op_code;
1519 	uint16_t tgt_port;
1520 
1521 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1522 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1523 	/* Standard DNS query always happen on Dest Port 53. */
1524 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1525 		op_code = (uint16_t)(*(uint16_t *)(data +
1526 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1527 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1528 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1529 			return true;
1530 	}
1531 	return false;
1532 }
1533 
1534 /**
1535  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1536  * @data: Pointer to network data buffer
1537  *
1538  * This api is for dns query response.
1539  *
1540  * Return: true if packet is dns response packet.
1541  *	   false otherwise.
1542  */
1543 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1544 {
1545 	uint16_t op_code;
1546 	uint16_t src_port;
1547 
1548 	src_port = (uint16_t)(*(uint16_t *)(data +
1549 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1550 	/* Standard DNS response always comes on Src Port 53. */
1551 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1552 		op_code = (uint16_t)(*(uint16_t *)(data +
1553 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1554 
1555 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1556 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1557 			return true;
1558 	}
1559 	return false;
1560 }
1561 
1562 /**
1563  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1564  * @data: Pointer to network data buffer
1565  *
1566  * This api is for tcp syn packet.
1567  *
1568  * Return: true if packet is tcp syn packet.
1569  *	   false otherwise.
1570  */
1571 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1572 {
1573 	uint8_t op_code;
1574 
1575 	op_code = (uint8_t)(*(uint8_t *)(data +
1576 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1577 
1578 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1579 		return true;
1580 	return false;
1581 }
1582 
1583 /**
1584  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1585  * @data: Pointer to network data buffer
1586  *
1587  * This api is for tcp syn ack packet.
1588  *
1589  * Return: true if packet is tcp syn ack packet.
1590  *	   false otherwise.
1591  */
1592 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1593 {
1594 	uint8_t op_code;
1595 
1596 	op_code = (uint8_t)(*(uint8_t *)(data +
1597 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1598 
1599 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1600 		return true;
1601 	return false;
1602 }
1603 
1604 /**
1605  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1606  * @data: Pointer to network data buffer
1607  *
1608  * This api is for tcp ack packet.
1609  *
1610  * Return: true if packet is tcp ack packet.
1611  *	   false otherwise.
1612  */
1613 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1614 {
1615 	uint8_t op_code;
1616 
1617 	op_code = (uint8_t)(*(uint8_t *)(data +
1618 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1619 
1620 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1621 		return true;
1622 	return false;
1623 }
1624 
1625 /**
1626  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1627  * @data: Pointer to network data buffer
1628  *
1629  * This api is for tcp packet.
1630  *
1631  * Return: tcp source port value.
1632  */
1633 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1634 {
1635 	uint16_t src_port;
1636 
1637 	src_port = (uint16_t)(*(uint16_t *)(data +
1638 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1639 
1640 	return src_port;
1641 }
1642 
1643 /**
1644  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1645  * @data: Pointer to network data buffer
1646  *
1647  * This api is for tcp packet.
1648  *
1649  * Return: tcp destination port value.
1650  */
1651 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1652 {
1653 	uint16_t tgt_port;
1654 
1655 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1656 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1657 
1658 	return tgt_port;
1659 }
1660 
1661 /**
1662  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1663  * @data: Pointer to network data buffer
1664  *
1665  * This api is for ipv4 req packet.
1666  *
1667  * Return: true if packet is icmpv4 request
1668  *	   false otherwise.
1669  */
1670 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1671 {
1672 	uint8_t op_code;
1673 
1674 	op_code = (uint8_t)(*(uint8_t *)(data +
1675 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1676 
1677 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1678 		return true;
1679 	return false;
1680 }
1681 
1682 /**
1683  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1684  * @data: Pointer to network data buffer
1685  *
1686  * This api is for ipv4 res packet.
1687  *
1688  * Return: true if packet is icmpv4 response
1689  *	   false otherwise.
1690  */
1691 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1692 {
1693 	uint8_t op_code;
1694 
1695 	op_code = (uint8_t)(*(uint8_t *)(data +
1696 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1697 
1698 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1699 		return true;
1700 	return false;
1701 }
1702 
1703 /**
1704  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1705  * @data: Pointer to network data buffer
1706  *
1707  * This api is for ipv4 packet.
1708  *
1709  * Return: icmpv4 packet source IP value.
1710  */
1711 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1712 {
1713 	uint32_t src_ip;
1714 
1715 	src_ip = (uint32_t)(*(uint32_t *)(data +
1716 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1717 
1718 	return src_ip;
1719 }
1720 
1721 /**
1722  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1723  * @data: Pointer to network data buffer
1724  *
1725  * This api is for ipv4 packet.
1726  *
1727  * Return: icmpv4 packet target IP value.
1728  */
1729 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1730 {
1731 	uint32_t tgt_ip;
1732 
1733 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1734 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1735 
1736 	return tgt_ip;
1737 }
1738 
1739 
1740 /**
1741  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1742  * @data: Pointer to IPV6 packet data buffer
1743  *
1744  * This func. checks whether it is a IPV6 packet or not.
1745  *
1746  * Return: TRUE if it is a IPV6 packet
1747  *         FALSE if not
1748  */
1749 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1750 {
1751 	uint16_t ether_type;
1752 
1753 	ether_type = (uint16_t)(*(uint16_t *)(data +
1754 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1755 
1756 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1757 		return true;
1758 	else
1759 		return false;
1760 }
1761 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1762 
1763 /**
1764  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1765  * @data: Pointer to network data buffer
1766  *
1767  * This api is for ipv6 packet.
1768  *
1769  * Return: true if packet is DHCP packet
1770  *	   false otherwise
1771  */
1772 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1773 {
1774 	uint16_t sport;
1775 	uint16_t dport;
1776 
1777 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1778 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1779 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1780 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1781 					sizeof(uint16_t));
1782 
1783 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1784 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1785 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1786 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1787 		return true;
1788 	else
1789 		return false;
1790 }
1791 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1792 
1793 /**
1794  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
1795  * @data: Pointer to network data buffer
1796  *
1797  * This api is for ipv6 packet.
1798  *
1799  * Return: true if packet is MDNS packet
1800  *	   false otherwise
1801  */
1802 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
1803 {
1804 	uint16_t sport;
1805 	uint16_t dport;
1806 
1807 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1808 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1809 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1810 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1811 					sizeof(uint16_t));
1812 
1813 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
1814 	    dport == sport)
1815 		return true;
1816 	else
1817 		return false;
1818 }
1819 
1820 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
1821 
1822 /**
1823  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1824  * @data: Pointer to IPV4 packet data buffer
1825  *
1826  * This func. checks whether it is a IPV4 multicast packet or not.
1827  *
1828  * Return: TRUE if it is a IPV4 multicast packet
1829  *         FALSE if not
1830  */
1831 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1832 {
1833 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1834 		uint32_t *dst_addr =
1835 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1836 
1837 		/*
1838 		 * Check first word of the IPV4 address and if it is
1839 		 * equal to 0xE then it represents multicast IP.
1840 		 */
1841 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1842 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1843 			return true;
1844 		else
1845 			return false;
1846 	} else
1847 		return false;
1848 }
1849 
1850 /**
1851  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1852  * @data: Pointer to IPV6 packet data buffer
1853  *
1854  * This func. checks whether it is a IPV6 multicast packet or not.
1855  *
1856  * Return: TRUE if it is a IPV6 multicast packet
1857  *         FALSE if not
1858  */
1859 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1860 {
1861 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1862 		uint16_t *dst_addr;
1863 
1864 		dst_addr = (uint16_t *)
1865 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1866 
1867 		/*
1868 		 * Check first byte of the IP address and if it
1869 		 * 0xFF00 then it is a IPV6 mcast packet.
1870 		 */
1871 		if (*dst_addr ==
1872 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1873 			return true;
1874 		else
1875 			return false;
1876 	} else
1877 		return false;
1878 }
1879 
1880 /**
1881  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1882  * @data: Pointer to IPV4 ICMP packet data buffer
1883  *
1884  * This func. checks whether it is a ICMP packet or not.
1885  *
1886  * Return: TRUE if it is a ICMP packet
1887  *         FALSE if not
1888  */
1889 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1890 {
1891 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1892 		uint8_t pkt_type;
1893 
1894 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1895 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1896 
1897 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1898 			return true;
1899 		else
1900 			return false;
1901 	} else
1902 		return false;
1903 }
1904 
1905 /**
1906  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1907  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1908  *
1909  * This func. checks whether it is a ICMPV6 packet or not.
1910  *
1911  * Return: TRUE if it is a ICMPV6 packet
1912  *         FALSE if not
1913  */
1914 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1915 {
1916 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1917 		uint8_t pkt_type;
1918 
1919 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1920 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1921 
1922 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1923 			return true;
1924 		else
1925 			return false;
1926 	} else
1927 		return false;
1928 }
1929 
1930 /**
1931  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1932  * @data: Pointer to IPV4 UDP packet data buffer
1933  *
1934  * This func. checks whether it is a IPV4 UDP packet or not.
1935  *
1936  * Return: TRUE if it is a IPV4 UDP packet
1937  *         FALSE if not
1938  */
1939 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1940 {
1941 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1942 		uint8_t pkt_type;
1943 
1944 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1945 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1946 
1947 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1948 			return true;
1949 		else
1950 			return false;
1951 	} else
1952 		return false;
1953 }
1954 
1955 /**
1956  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1957  * @data: Pointer to IPV4 TCP packet data buffer
1958  *
1959  * This func. checks whether it is a IPV4 TCP packet or not.
1960  *
1961  * Return: TRUE if it is a IPV4 TCP packet
1962  *         FALSE if not
1963  */
1964 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1965 {
1966 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1967 		uint8_t pkt_type;
1968 
1969 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1970 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1971 
1972 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1973 			return true;
1974 		else
1975 			return false;
1976 	} else
1977 		return false;
1978 }
1979 
1980 /**
1981  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
1982  * @data: Pointer to IPV6 UDP packet data buffer
1983  *
1984  * This func. checks whether it is a IPV6 UDP packet or not.
1985  *
1986  * Return: TRUE if it is a IPV6 UDP packet
1987  *         FALSE if not
1988  */
1989 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
1990 {
1991 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1992 		uint8_t pkt_type;
1993 
1994 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1995 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1996 
1997 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1998 			return true;
1999 		else
2000 			return false;
2001 	} else
2002 		return false;
2003 }
2004 
2005 /**
2006  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2007  * @data: Pointer to IPV6 TCP packet data buffer
2008  *
2009  * This func. checks whether it is a IPV6 TCP packet or not.
2010  *
2011  * Return: TRUE if it is a IPV6 TCP packet
2012  *         FALSE if not
2013  */
2014 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2015 {
2016 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2017 		uint8_t pkt_type;
2018 
2019 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2020 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2021 
2022 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2023 			return true;
2024 		else
2025 			return false;
2026 	} else
2027 		return false;
2028 }
2029 
2030 /**
2031  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2032  * @nbuf - sk buff
2033  *
2034  * Return: true if packet is broadcast
2035  *	   false otherwise
2036  */
2037 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2038 {
2039 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2040 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2041 }
2042 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2043 
2044 #ifdef NBUF_MEMORY_DEBUG
2045 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2046 
2047 /**
2048  * struct qdf_nbuf_track_t - Network buffer track structure
2049  *
2050  * @p_next: Pointer to next
2051  * @net_buf: Pointer to network buffer
2052  * @func_name: Function name
2053  * @line_num: Line number
2054  * @size: Size
2055  */
2056 struct qdf_nbuf_track_t {
2057 	struct qdf_nbuf_track_t *p_next;
2058 	qdf_nbuf_t net_buf;
2059 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2060 	uint32_t line_num;
2061 	size_t size;
2062 };
2063 
2064 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2065 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2066 
2067 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2068 static struct kmem_cache *nbuf_tracking_cache;
2069 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2070 static spinlock_t qdf_net_buf_track_free_list_lock;
2071 static uint32_t qdf_net_buf_track_free_list_count;
2072 static uint32_t qdf_net_buf_track_used_list_count;
2073 static uint32_t qdf_net_buf_track_max_used;
2074 static uint32_t qdf_net_buf_track_max_free;
2075 static uint32_t qdf_net_buf_track_max_allocated;
2076 
2077 /**
2078  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2079  *
2080  * tracks the max number of network buffers that the wlan driver was tracking
2081  * at any one time.
2082  *
2083  * Return: none
2084  */
2085 static inline void update_max_used(void)
2086 {
2087 	int sum;
2088 
2089 	if (qdf_net_buf_track_max_used <
2090 	    qdf_net_buf_track_used_list_count)
2091 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2092 	sum = qdf_net_buf_track_free_list_count +
2093 		qdf_net_buf_track_used_list_count;
2094 	if (qdf_net_buf_track_max_allocated < sum)
2095 		qdf_net_buf_track_max_allocated = sum;
2096 }
2097 
2098 /**
2099  * update_max_free() - update qdf_net_buf_track_free_list_count
2100  *
2101  * tracks the max number tracking buffers kept in the freelist.
2102  *
2103  * Return: none
2104  */
2105 static inline void update_max_free(void)
2106 {
2107 	if (qdf_net_buf_track_max_free <
2108 	    qdf_net_buf_track_free_list_count)
2109 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2110 }
2111 
2112 /**
2113  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2114  *
2115  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2116  * This function also ads fexibility to adjust the allocation and freelist
2117  * scheems.
2118  *
2119  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2120  */
2121 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2122 {
2123 	int flags = GFP_KERNEL;
2124 	unsigned long irq_flag;
2125 	QDF_NBUF_TRACK *new_node = NULL;
2126 
2127 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2128 	qdf_net_buf_track_used_list_count++;
2129 	if (qdf_net_buf_track_free_list) {
2130 		new_node = qdf_net_buf_track_free_list;
2131 		qdf_net_buf_track_free_list =
2132 			qdf_net_buf_track_free_list->p_next;
2133 		qdf_net_buf_track_free_list_count--;
2134 	}
2135 	update_max_used();
2136 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2137 
2138 	if (new_node)
2139 		return new_node;
2140 
2141 	if (in_interrupt() || irqs_disabled() || in_atomic())
2142 		flags = GFP_ATOMIC;
2143 
2144 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2145 }
2146 
2147 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2148 #define FREEQ_POOLSIZE 2048
2149 
2150 /**
2151  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2152  *
2153  * Matches calls to qdf_nbuf_track_alloc.
2154  * Either frees the tracking cookie to kernel or an internal
2155  * freelist based on the size of the freelist.
2156  *
2157  * Return: none
2158  */
2159 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2160 {
2161 	unsigned long irq_flag;
2162 
2163 	if (!node)
2164 		return;
2165 
2166 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2167 	 * only shrink the freelist if it is bigger than twice the number of
2168 	 * nbufs in use. If the driver is stalling in a consistent bursty
2169 	 * fasion, this will keep 3/4 of thee allocations from the free list
2170 	 * while also allowing the system to recover memory as less frantic
2171 	 * traffic occurs.
2172 	 */
2173 
2174 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2175 
2176 	qdf_net_buf_track_used_list_count--;
2177 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2178 	   (qdf_net_buf_track_free_list_count >
2179 	    qdf_net_buf_track_used_list_count << 1)) {
2180 		kmem_cache_free(nbuf_tracking_cache, node);
2181 	} else {
2182 		node->p_next = qdf_net_buf_track_free_list;
2183 		qdf_net_buf_track_free_list = node;
2184 		qdf_net_buf_track_free_list_count++;
2185 	}
2186 	update_max_free();
2187 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2188 }
2189 
2190 /**
2191  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2192  *
2193  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2194  * the freelist first makes it performant for the first iperf udp burst
2195  * as well as steady state.
2196  *
2197  * Return: None
2198  */
2199 static void qdf_nbuf_track_prefill(void)
2200 {
2201 	int i;
2202 	QDF_NBUF_TRACK *node, *head;
2203 
2204 	/* prepopulate the freelist */
2205 	head = NULL;
2206 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2207 		node = qdf_nbuf_track_alloc();
2208 		if (!node)
2209 			continue;
2210 		node->p_next = head;
2211 		head = node;
2212 	}
2213 	while (head) {
2214 		node = head->p_next;
2215 		qdf_nbuf_track_free(head);
2216 		head = node;
2217 	}
2218 
2219 	/* prefilled buffers should not count as used */
2220 	qdf_net_buf_track_max_used = 0;
2221 }
2222 
2223 /**
2224  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2225  *
2226  * This initializes the memory manager for the nbuf tracking cookies.  Because
2227  * these cookies are all the same size and only used in this feature, we can
2228  * use a kmem_cache to provide tracking as well as to speed up allocations.
2229  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2230  * features) a freelist is prepopulated here.
2231  *
2232  * Return: None
2233  */
2234 static void qdf_nbuf_track_memory_manager_create(void)
2235 {
2236 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2237 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2238 						sizeof(QDF_NBUF_TRACK),
2239 						0, 0, NULL);
2240 
2241 	qdf_nbuf_track_prefill();
2242 }
2243 
2244 /**
2245  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2246  *
2247  * Empty the freelist and print out usage statistics when it is no longer
2248  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2249  * any nbuf tracking cookies were leaked.
2250  *
2251  * Return: None
2252  */
2253 static void qdf_nbuf_track_memory_manager_destroy(void)
2254 {
2255 	QDF_NBUF_TRACK *node, *tmp;
2256 	unsigned long irq_flag;
2257 
2258 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2259 	node = qdf_net_buf_track_free_list;
2260 
2261 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2262 		qdf_print("%s: unexpectedly large max_used count %d",
2263 			  __func__, qdf_net_buf_track_max_used);
2264 
2265 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2266 		qdf_print("%s: %d unused trackers were allocated",
2267 			  __func__,
2268 			  qdf_net_buf_track_max_allocated -
2269 			  qdf_net_buf_track_max_used);
2270 
2271 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2272 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2273 		qdf_print("%s: check freelist shrinking functionality",
2274 			  __func__);
2275 
2276 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2277 		  "%s: %d residual freelist size",
2278 		  __func__, qdf_net_buf_track_free_list_count);
2279 
2280 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2281 		  "%s: %d max freelist size observed",
2282 		  __func__, qdf_net_buf_track_max_free);
2283 
2284 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2285 		  "%s: %d max buffers used observed",
2286 		  __func__, qdf_net_buf_track_max_used);
2287 
2288 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2289 		  "%s: %d max buffers allocated observed",
2290 		  __func__, qdf_net_buf_track_max_allocated);
2291 
2292 	while (node) {
2293 		tmp = node;
2294 		node = node->p_next;
2295 		kmem_cache_free(nbuf_tracking_cache, tmp);
2296 		qdf_net_buf_track_free_list_count--;
2297 	}
2298 
2299 	if (qdf_net_buf_track_free_list_count != 0)
2300 		qdf_info("%d unfreed tracking memory lost in freelist",
2301 			 qdf_net_buf_track_free_list_count);
2302 
2303 	if (qdf_net_buf_track_used_list_count != 0)
2304 		qdf_info("%d unfreed tracking memory still in use",
2305 			 qdf_net_buf_track_used_list_count);
2306 
2307 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2308 	kmem_cache_destroy(nbuf_tracking_cache);
2309 	qdf_net_buf_track_free_list = NULL;
2310 }
2311 
2312 /**
2313  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2314  *
2315  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2316  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2317  * WLAN driver module whose allocated SKB is freed by network stack are
2318  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2319  * reported as memory leak.
2320  *
2321  * Return: none
2322  */
2323 void qdf_net_buf_debug_init(void)
2324 {
2325 	uint32_t i;
2326 
2327 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2328 
2329 	qdf_nbuf_map_tracking_init();
2330 	qdf_nbuf_track_memory_manager_create();
2331 
2332 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2333 		gp_qdf_net_buf_track_tbl[i] = NULL;
2334 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2335 	}
2336 }
2337 qdf_export_symbol(qdf_net_buf_debug_init);
2338 
2339 /**
2340  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2341  *
2342  * Exit network buffer tracking debug functionality and log SKB memory leaks
2343  * As part of exiting the functionality, free the leaked memory and
2344  * cleanup the tracking buffers.
2345  *
2346  * Return: none
2347  */
2348 void qdf_net_buf_debug_exit(void)
2349 {
2350 	uint32_t i;
2351 	uint32_t count = 0;
2352 	unsigned long irq_flag;
2353 	QDF_NBUF_TRACK *p_node;
2354 	QDF_NBUF_TRACK *p_prev;
2355 
2356 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2357 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2358 		p_node = gp_qdf_net_buf_track_tbl[i];
2359 		while (p_node) {
2360 			p_prev = p_node;
2361 			p_node = p_node->p_next;
2362 			count++;
2363 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2364 				 p_prev->func_name, p_prev->line_num,
2365 				 p_prev->size, p_prev->net_buf);
2366 			qdf_nbuf_track_free(p_prev);
2367 		}
2368 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2369 	}
2370 
2371 	qdf_nbuf_track_memory_manager_destroy();
2372 	qdf_nbuf_map_tracking_deinit();
2373 
2374 #ifdef CONFIG_HALT_KMEMLEAK
2375 	if (count) {
2376 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2377 		QDF_BUG(0);
2378 	}
2379 #endif
2380 }
2381 qdf_export_symbol(qdf_net_buf_debug_exit);
2382 
2383 /**
2384  * qdf_net_buf_debug_hash() - hash network buffer pointer
2385  *
2386  * Return: hash value
2387  */
2388 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2389 {
2390 	uint32_t i;
2391 
2392 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2393 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2394 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2395 
2396 	return i;
2397 }
2398 
2399 /**
2400  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2401  *
2402  * Return: If skb is found in hash table then return pointer to network buffer
2403  *	else return %NULL
2404  */
2405 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2406 {
2407 	uint32_t i;
2408 	QDF_NBUF_TRACK *p_node;
2409 
2410 	i = qdf_net_buf_debug_hash(net_buf);
2411 	p_node = gp_qdf_net_buf_track_tbl[i];
2412 
2413 	while (p_node) {
2414 		if (p_node->net_buf == net_buf)
2415 			return p_node;
2416 		p_node = p_node->p_next;
2417 	}
2418 
2419 	return NULL;
2420 }
2421 
2422 /**
2423  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2424  *
2425  * Return: none
2426  */
2427 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2428 				const char *func_name, uint32_t line_num)
2429 {
2430 	uint32_t i;
2431 	unsigned long irq_flag;
2432 	QDF_NBUF_TRACK *p_node;
2433 	QDF_NBUF_TRACK *new_node;
2434 
2435 	new_node = qdf_nbuf_track_alloc();
2436 
2437 	i = qdf_net_buf_debug_hash(net_buf);
2438 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2439 
2440 	p_node = qdf_net_buf_debug_look_up(net_buf);
2441 
2442 	if (p_node) {
2443 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2444 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2445 			  net_buf, func_name, line_num);
2446 		qdf_nbuf_track_free(new_node);
2447 	} else {
2448 		p_node = new_node;
2449 		if (p_node) {
2450 			p_node->net_buf = net_buf;
2451 			qdf_str_lcopy(p_node->func_name, func_name,
2452 				      QDF_MEM_FUNC_NAME_SIZE);
2453 			p_node->line_num = line_num;
2454 			p_node->size = size;
2455 			qdf_mem_skb_inc(size);
2456 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2457 			gp_qdf_net_buf_track_tbl[i] = p_node;
2458 		} else
2459 			qdf_print(
2460 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2461 				  func_name, line_num, size);
2462 	}
2463 
2464 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2465 }
2466 qdf_export_symbol(qdf_net_buf_debug_add_node);
2467 
2468 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2469 				   uint32_t line_num)
2470 {
2471 	uint32_t i;
2472 	unsigned long irq_flag;
2473 	QDF_NBUF_TRACK *p_node;
2474 
2475 	i = qdf_net_buf_debug_hash(net_buf);
2476 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2477 
2478 	p_node = qdf_net_buf_debug_look_up(net_buf);
2479 
2480 	if (p_node) {
2481 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2482 			      QDF_MEM_FUNC_NAME_SIZE);
2483 		p_node->line_num = line_num;
2484 	}
2485 
2486 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2487 }
2488 
2489 qdf_export_symbol(qdf_net_buf_debug_update_node);
2490 
2491 /**
2492  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2493  *
2494  * Return: none
2495  */
2496 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2497 {
2498 	uint32_t i;
2499 	QDF_NBUF_TRACK *p_head;
2500 	QDF_NBUF_TRACK *p_node = NULL;
2501 	unsigned long irq_flag;
2502 	QDF_NBUF_TRACK *p_prev;
2503 
2504 	i = qdf_net_buf_debug_hash(net_buf);
2505 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2506 
2507 	p_head = gp_qdf_net_buf_track_tbl[i];
2508 
2509 	/* Unallocated SKB */
2510 	if (!p_head)
2511 		goto done;
2512 
2513 	p_node = p_head;
2514 	/* Found at head of the table */
2515 	if (p_head->net_buf == net_buf) {
2516 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2517 		goto done;
2518 	}
2519 
2520 	/* Search in collision list */
2521 	while (p_node) {
2522 		p_prev = p_node;
2523 		p_node = p_node->p_next;
2524 		if ((p_node) && (p_node->net_buf == net_buf)) {
2525 			p_prev->p_next = p_node->p_next;
2526 			break;
2527 		}
2528 	}
2529 
2530 done:
2531 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2532 
2533 	if (p_node) {
2534 		qdf_mem_skb_dec(p_node->size);
2535 		qdf_nbuf_track_free(p_node);
2536 	} else {
2537 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2538 			  net_buf);
2539 		QDF_BUG(0);
2540 	}
2541 }
2542 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2543 
2544 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2545 				   const char *func_name, uint32_t line_num)
2546 {
2547 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2548 
2549 	while (ext_list) {
2550 		/*
2551 		 * Take care to add if it is Jumbo packet connected using
2552 		 * frag_list
2553 		 */
2554 		qdf_nbuf_t next;
2555 
2556 		next = qdf_nbuf_queue_next(ext_list);
2557 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2558 		ext_list = next;
2559 	}
2560 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2561 }
2562 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2563 
2564 /**
2565  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2566  * @net_buf: Network buf holding head segment (single)
2567  *
2568  * WLAN driver module whose allocated SKB is freed by network stack are
2569  * suppose to call this API before returning SKB to network stack such
2570  * that the SKB is not reported as memory leak.
2571  *
2572  * Return: none
2573  */
2574 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2575 {
2576 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2577 
2578 	while (ext_list) {
2579 		/*
2580 		 * Take care to free if it is Jumbo packet connected using
2581 		 * frag_list
2582 		 */
2583 		qdf_nbuf_t next;
2584 
2585 		next = qdf_nbuf_queue_next(ext_list);
2586 
2587 		if (qdf_nbuf_get_users(ext_list) > 1) {
2588 			ext_list = next;
2589 			continue;
2590 		}
2591 
2592 		qdf_net_buf_debug_delete_node(ext_list);
2593 		ext_list = next;
2594 	}
2595 
2596 	if (qdf_nbuf_get_users(net_buf) > 1)
2597 		return;
2598 
2599 	qdf_net_buf_debug_delete_node(net_buf);
2600 }
2601 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2602 
2603 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2604 				int reserve, int align, int prio,
2605 				const char *func, uint32_t line)
2606 {
2607 	qdf_nbuf_t nbuf;
2608 
2609 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2610 
2611 	/* Store SKB in internal QDF tracking table */
2612 	if (qdf_likely(nbuf)) {
2613 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2614 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2615 	} else {
2616 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2617 	}
2618 
2619 	return nbuf;
2620 }
2621 qdf_export_symbol(qdf_nbuf_alloc_debug);
2622 
2623 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2624 {
2625 	qdf_nbuf_t ext_list;
2626 
2627 	if (qdf_unlikely(!nbuf))
2628 		return;
2629 
2630 	if (qdf_nbuf_get_users(nbuf) > 1)
2631 		goto free_buf;
2632 
2633 	/* Remove SKB from internal QDF tracking table */
2634 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2635 	qdf_net_buf_debug_delete_node(nbuf);
2636 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2637 
2638 	/* Take care to delete the debug entries for frag_list */
2639 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2640 	while (ext_list) {
2641 		if (qdf_nbuf_get_users(ext_list) == 1) {
2642 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2643 			qdf_net_buf_debug_delete_node(ext_list);
2644 		}
2645 
2646 		ext_list = qdf_nbuf_queue_next(ext_list);
2647 	}
2648 
2649 free_buf:
2650 	__qdf_nbuf_free(nbuf);
2651 }
2652 qdf_export_symbol(qdf_nbuf_free_debug);
2653 
2654 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2655 {
2656 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2657 
2658 	if (qdf_unlikely(!cloned_buf))
2659 		return NULL;
2660 
2661 	/* Store SKB in internal QDF tracking table */
2662 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2663 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2664 
2665 	return cloned_buf;
2666 }
2667 qdf_export_symbol(qdf_nbuf_clone_debug);
2668 
2669 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2670 {
2671 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2672 
2673 	if (qdf_unlikely(!copied_buf))
2674 		return NULL;
2675 
2676 	/* Store SKB in internal QDF tracking table */
2677 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2678 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2679 
2680 	return copied_buf;
2681 }
2682 qdf_export_symbol(qdf_nbuf_copy_debug);
2683 
2684 #endif /* NBUF_MEMORY_DEBUG */
2685 
2686 #if defined(FEATURE_TSO)
2687 
2688 /**
2689  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2690  *
2691  * @ethproto: ethernet type of the msdu
2692  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2693  * @l2_len: L2 length for the msdu
2694  * @eit_hdr: pointer to EIT header
2695  * @eit_hdr_len: EIT header length for the msdu
2696  * @eit_hdr_dma_map_addr: dma addr for EIT header
2697  * @tcphdr: pointer to tcp header
2698  * @ipv4_csum_en: ipv4 checksum enable
2699  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2700  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2701  * @ip_id: IP id
2702  * @tcp_seq_num: TCP sequence number
2703  *
2704  * This structure holds the TSO common info that is common
2705  * across all the TCP segments of the jumbo packet.
2706  */
2707 struct qdf_tso_cmn_seg_info_t {
2708 	uint16_t ethproto;
2709 	uint16_t ip_tcp_hdr_len;
2710 	uint16_t l2_len;
2711 	uint8_t *eit_hdr;
2712 	uint32_t eit_hdr_len;
2713 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2714 	struct tcphdr *tcphdr;
2715 	uint16_t ipv4_csum_en;
2716 	uint16_t tcp_ipv4_csum_en;
2717 	uint16_t tcp_ipv6_csum_en;
2718 	uint16_t ip_id;
2719 	uint32_t tcp_seq_num;
2720 };
2721 
2722 /**
2723  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2724  * information
2725  * @osdev: qdf device handle
2726  * @skb: skb buffer
2727  * @tso_info: Parameters common to all segements
2728  *
2729  * Get the TSO information that is common across all the TCP
2730  * segments of the jumbo packet
2731  *
2732  * Return: 0 - success 1 - failure
2733  */
2734 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2735 			struct sk_buff *skb,
2736 			struct qdf_tso_cmn_seg_info_t *tso_info)
2737 {
2738 	/* Get ethernet type and ethernet header length */
2739 	tso_info->ethproto = vlan_get_protocol(skb);
2740 
2741 	/* Determine whether this is an IPv4 or IPv6 packet */
2742 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2743 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2744 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2745 
2746 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2747 		tso_info->ipv4_csum_en = 1;
2748 		tso_info->tcp_ipv4_csum_en = 1;
2749 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2750 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2751 				ipv4_hdr->protocol);
2752 			return 1;
2753 		}
2754 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2755 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2756 		tso_info->tcp_ipv6_csum_en = 1;
2757 	} else {
2758 		qdf_err("TSO: ethertype 0x%x is not supported!",
2759 			tso_info->ethproto);
2760 		return 1;
2761 	}
2762 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2763 	tso_info->tcphdr = tcp_hdr(skb);
2764 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2765 	/* get pointer to the ethernet + IP + TCP header and their length */
2766 	tso_info->eit_hdr = skb->data;
2767 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2768 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2769 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2770 							tso_info->eit_hdr,
2771 							tso_info->eit_hdr_len,
2772 							DMA_TO_DEVICE);
2773 	if (unlikely(dma_mapping_error(osdev->dev,
2774 				       tso_info->eit_hdr_dma_map_addr))) {
2775 		qdf_err("DMA mapping error!");
2776 		qdf_assert(0);
2777 		return 1;
2778 	}
2779 
2780 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2781 		/* inlcude IPv4 header length for IPV4 (total length) */
2782 		tso_info->ip_tcp_hdr_len =
2783 			tso_info->eit_hdr_len - tso_info->l2_len;
2784 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2785 		/* exclude IPv6 header length for IPv6 (payload length) */
2786 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2787 	}
2788 	/*
2789 	 * The length of the payload (application layer data) is added to
2790 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2791 	 * descriptor.
2792 	 */
2793 
2794 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2795 		tso_info->tcp_seq_num,
2796 		tso_info->eit_hdr_len,
2797 		tso_info->l2_len,
2798 		skb->len);
2799 	return 0;
2800 }
2801 
2802 
2803 /**
2804  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2805  *
2806  * @curr_seg: Segment whose contents are initialized
2807  * @tso_cmn_info: Parameters common to all segements
2808  *
2809  * Return: None
2810  */
2811 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2812 				struct qdf_tso_seg_elem_t *curr_seg,
2813 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2814 {
2815 	/* Initialize the flags to 0 */
2816 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2817 
2818 	/*
2819 	 * The following fields remain the same across all segments of
2820 	 * a jumbo packet
2821 	 */
2822 	curr_seg->seg.tso_flags.tso_enable = 1;
2823 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2824 		tso_cmn_info->ipv4_csum_en;
2825 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2826 		tso_cmn_info->tcp_ipv6_csum_en;
2827 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2828 		tso_cmn_info->tcp_ipv4_csum_en;
2829 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2830 
2831 	/* The following fields change for the segments */
2832 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2833 	tso_cmn_info->ip_id++;
2834 
2835 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2836 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2837 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2838 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2839 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2840 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2841 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2842 
2843 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2844 
2845 	/*
2846 	 * First fragment for each segment always contains the ethernet,
2847 	 * IP and TCP header
2848 	 */
2849 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2850 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2851 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2852 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2853 
2854 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2855 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2856 		   tso_cmn_info->eit_hdr_len,
2857 		   curr_seg->seg.tso_flags.tcp_seq_num,
2858 		   curr_seg->seg.total_len);
2859 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2860 }
2861 
2862 /**
2863  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2864  * into segments
2865  * @nbuf: network buffer to be segmented
2866  * @tso_info: This is the output. The information about the
2867  *           TSO segments will be populated within this.
2868  *
2869  * This function fragments a TCP jumbo packet into smaller
2870  * segments to be transmitted by the driver. It chains the TSO
2871  * segments created into a list.
2872  *
2873  * Return: number of TSO segments
2874  */
2875 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2876 		struct qdf_tso_info_t *tso_info)
2877 {
2878 	/* common across all segments */
2879 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2880 	/* segment specific */
2881 	void *tso_frag_vaddr;
2882 	qdf_dma_addr_t tso_frag_paddr = 0;
2883 	uint32_t num_seg = 0;
2884 	struct qdf_tso_seg_elem_t *curr_seg;
2885 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2886 	skb_frag_t *frag = NULL;
2887 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2888 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2889 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2890 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2891 	int j = 0; /* skb fragment index */
2892 
2893 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2894 	total_num_seg = tso_info->tso_num_seg_list;
2895 	curr_seg = tso_info->tso_seg_list;
2896 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2897 
2898 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2899 						skb, &tso_cmn_info))) {
2900 		qdf_warn("TSO: error getting common segment info");
2901 		return 0;
2902 	}
2903 
2904 	/* length of the first chunk of data in the skb */
2905 	skb_frag_len = skb_headlen(skb);
2906 
2907 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2908 	/* update the remaining skb fragment length and TSO segment length */
2909 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2910 	skb_proc -= tso_cmn_info.eit_hdr_len;
2911 
2912 	/* get the address to the next tso fragment */
2913 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2914 	/* get the length of the next tso fragment */
2915 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2916 
2917 	if (tso_frag_len != 0) {
2918 		tso_frag_paddr = dma_map_single(osdev->dev,
2919 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2920 	}
2921 
2922 	if (unlikely(dma_mapping_error(osdev->dev,
2923 					tso_frag_paddr))) {
2924 		qdf_err("DMA mapping error!");
2925 		qdf_assert(0);
2926 		return 0;
2927 	}
2928 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2929 		__LINE__, skb_frag_len, tso_frag_len);
2930 	num_seg = tso_info->num_segs;
2931 	tso_info->num_segs = 0;
2932 	tso_info->is_tso = 1;
2933 
2934 	while (num_seg && curr_seg) {
2935 		int i = 1; /* tso fragment index */
2936 		uint8_t more_tso_frags = 1;
2937 
2938 		curr_seg->seg.num_frags = 0;
2939 		tso_info->num_segs++;
2940 		total_num_seg->num_seg.tso_cmn_num_seg++;
2941 
2942 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2943 						 &tso_cmn_info);
2944 
2945 		if (unlikely(skb_proc == 0))
2946 			return tso_info->num_segs;
2947 
2948 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2949 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2950 		/* frag len is added to ip_len in while loop below*/
2951 
2952 		curr_seg->seg.num_frags++;
2953 
2954 		while (more_tso_frags) {
2955 			if (tso_frag_len != 0) {
2956 				curr_seg->seg.tso_frags[i].vaddr =
2957 					tso_frag_vaddr;
2958 				curr_seg->seg.tso_frags[i].length =
2959 					tso_frag_len;
2960 				curr_seg->seg.total_len += tso_frag_len;
2961 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2962 				curr_seg->seg.num_frags++;
2963 				skb_proc = skb_proc - tso_frag_len;
2964 
2965 				/* increment the TCP sequence number */
2966 
2967 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2968 				curr_seg->seg.tso_frags[i].paddr =
2969 					tso_frag_paddr;
2970 			}
2971 
2972 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2973 					__func__, __LINE__,
2974 					i,
2975 					tso_frag_len,
2976 					curr_seg->seg.total_len,
2977 					curr_seg->seg.tso_frags[i].vaddr);
2978 
2979 			/* if there is no more data left in the skb */
2980 			if (!skb_proc)
2981 				return tso_info->num_segs;
2982 
2983 			/* get the next payload fragment information */
2984 			/* check if there are more fragments in this segment */
2985 			if (tso_frag_len < tso_seg_size) {
2986 				more_tso_frags = 1;
2987 				if (tso_frag_len != 0) {
2988 					tso_seg_size = tso_seg_size -
2989 						tso_frag_len;
2990 					i++;
2991 					if (curr_seg->seg.num_frags ==
2992 								FRAG_NUM_MAX) {
2993 						more_tso_frags = 0;
2994 						/*
2995 						 * reset i and the tso
2996 						 * payload size
2997 						 */
2998 						i = 1;
2999 						tso_seg_size =
3000 							skb_shinfo(skb)->
3001 								gso_size;
3002 					}
3003 				}
3004 			} else {
3005 				more_tso_frags = 0;
3006 				/* reset i and the tso payload size */
3007 				i = 1;
3008 				tso_seg_size = skb_shinfo(skb)->gso_size;
3009 			}
3010 
3011 			/* if the next fragment is contiguous */
3012 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3013 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3014 				skb_frag_len = skb_frag_len - tso_frag_len;
3015 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3016 
3017 			} else { /* the next fragment is not contiguous */
3018 				if (skb_shinfo(skb)->nr_frags == 0) {
3019 					qdf_info("TSO: nr_frags == 0!");
3020 					qdf_assert(0);
3021 					return 0;
3022 				}
3023 				if (j >= skb_shinfo(skb)->nr_frags) {
3024 					qdf_info("TSO: nr_frags %d j %d",
3025 						 skb_shinfo(skb)->nr_frags, j);
3026 					qdf_assert(0);
3027 					return 0;
3028 				}
3029 				frag = &skb_shinfo(skb)->frags[j];
3030 				skb_frag_len = skb_frag_size(frag);
3031 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3032 				tso_frag_vaddr = skb_frag_address_safe(frag);
3033 				j++;
3034 			}
3035 
3036 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3037 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3038 				tso_seg_size);
3039 
3040 			if (!(tso_frag_vaddr)) {
3041 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3042 						__func__);
3043 				return 0;
3044 			}
3045 
3046 			tso_frag_paddr =
3047 					 dma_map_single(osdev->dev,
3048 						 tso_frag_vaddr,
3049 						 tso_frag_len,
3050 						 DMA_TO_DEVICE);
3051 			if (unlikely(dma_mapping_error(osdev->dev,
3052 							tso_frag_paddr))) {
3053 				qdf_err("DMA mapping error!");
3054 				qdf_assert(0);
3055 				return 0;
3056 			}
3057 		}
3058 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3059 				curr_seg->seg.tso_flags.tcp_seq_num);
3060 		num_seg--;
3061 		/* if TCP FIN flag was set, set it in the last segment */
3062 		if (!num_seg)
3063 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3064 
3065 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3066 		curr_seg = curr_seg->next;
3067 	}
3068 	return tso_info->num_segs;
3069 }
3070 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3071 
3072 /**
3073  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3074  *
3075  * @osdev: qdf device handle
3076  * @tso_seg: TSO segment element to be unmapped
3077  * @is_last_seg: whether this is last tso seg or not
3078  *
3079  * Return: none
3080  */
3081 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3082 			  struct qdf_tso_seg_elem_t *tso_seg,
3083 			  bool is_last_seg)
3084 {
3085 	uint32_t num_frags = 0;
3086 
3087 	if (tso_seg->seg.num_frags > 0)
3088 		num_frags = tso_seg->seg.num_frags - 1;
3089 
3090 	/*Num of frags in a tso seg cannot be less than 2 */
3091 	if (num_frags < 1) {
3092 		/*
3093 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3094 		 * this may happen when qdf_nbuf_get_tso_info failed,
3095 		 * do dma unmap for the 0th frag in this seg.
3096 		 */
3097 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3098 			goto last_seg_free_first_frag;
3099 
3100 		qdf_assert(0);
3101 		qdf_err("ERROR: num of frags in a tso segment is %d",
3102 			(num_frags + 1));
3103 		return;
3104 	}
3105 
3106 	while (num_frags) {
3107 		/*Do dma unmap the tso seg except the 0th frag */
3108 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3109 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3110 				num_frags);
3111 			qdf_assert(0);
3112 			return;
3113 		}
3114 		dma_unmap_single(osdev->dev,
3115 				 tso_seg->seg.tso_frags[num_frags].paddr,
3116 				 tso_seg->seg.tso_frags[num_frags].length,
3117 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3118 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3119 		num_frags--;
3120 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3121 	}
3122 
3123 last_seg_free_first_frag:
3124 	if (is_last_seg) {
3125 		/*Do dma unmap for the tso seg 0th frag */
3126 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3127 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3128 			qdf_assert(0);
3129 			return;
3130 		}
3131 		dma_unmap_single(osdev->dev,
3132 				 tso_seg->seg.tso_frags[0].paddr,
3133 				 tso_seg->seg.tso_frags[0].length,
3134 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3135 		tso_seg->seg.tso_frags[0].paddr = 0;
3136 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3137 	}
3138 }
3139 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3140 
3141 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3142 {
3143 	size_t packet_len;
3144 
3145 	packet_len = skb->len -
3146 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3147 		 tcp_hdrlen(skb));
3148 
3149 	return packet_len;
3150 }
3151 
3152 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3153 
3154 /**
3155  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3156  * into segments
3157  * @nbuf:   network buffer to be segmented
3158  * @tso_info:  This is the output. The information about the
3159  *      TSO segments will be populated within this.
3160  *
3161  * This function fragments a TCP jumbo packet into smaller
3162  * segments to be transmitted by the driver. It chains the TSO
3163  * segments created into a list.
3164  *
3165  * Return: 0 - success, 1 - failure
3166  */
3167 #ifndef BUILD_X86
3168 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3169 {
3170 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3171 	uint32_t remainder, num_segs = 0;
3172 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3173 	uint8_t frags_per_tso = 0;
3174 	uint32_t skb_frag_len = 0;
3175 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3176 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3177 	skb_frag_t *frag = NULL;
3178 	int j = 0;
3179 	uint32_t temp_num_seg = 0;
3180 
3181 	/* length of the first chunk of data in the skb minus eit header*/
3182 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3183 
3184 	/* Calculate num of segs for skb's first chunk of data*/
3185 	remainder = skb_frag_len % tso_seg_size;
3186 	num_segs = skb_frag_len / tso_seg_size;
3187 	/**
3188 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3189 	 * In that case, one more tso seg is required to accommodate
3190 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3191 	 * then remaining data will be accomodated while doing the calculation
3192 	 * for nr_frags data. Hence, frags_per_tso++.
3193 	 */
3194 	if (remainder) {
3195 		if (!skb_nr_frags)
3196 			num_segs++;
3197 		else
3198 			frags_per_tso++;
3199 	}
3200 
3201 	while (skb_nr_frags) {
3202 		if (j >= skb_shinfo(skb)->nr_frags) {
3203 			qdf_info("TSO: nr_frags %d j %d",
3204 				 skb_shinfo(skb)->nr_frags, j);
3205 			qdf_assert(0);
3206 			return 0;
3207 		}
3208 		/**
3209 		 * Calculate the number of tso seg for nr_frags data:
3210 		 * Get the length of each frag in skb_frag_len, add to
3211 		 * remainder.Get the number of segments by dividing it to
3212 		 * tso_seg_size and calculate the new remainder.
3213 		 * Decrement the nr_frags value and keep
3214 		 * looping all the skb_fragments.
3215 		 */
3216 		frag = &skb_shinfo(skb)->frags[j];
3217 		skb_frag_len = skb_frag_size(frag);
3218 		temp_num_seg = num_segs;
3219 		remainder += skb_frag_len;
3220 		num_segs += remainder / tso_seg_size;
3221 		remainder = remainder % tso_seg_size;
3222 		skb_nr_frags--;
3223 		if (remainder) {
3224 			if (num_segs > temp_num_seg)
3225 				frags_per_tso = 0;
3226 			/**
3227 			 * increment the tso per frags whenever remainder is
3228 			 * positive. If frags_per_tso reaches the (max-1),
3229 			 * [First frags always have EIT header, therefore max-1]
3230 			 * increment the num_segs as no more data can be
3231 			 * accomodated in the curr tso seg. Reset the remainder
3232 			 * and frags per tso and keep looping.
3233 			 */
3234 			frags_per_tso++;
3235 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3236 				num_segs++;
3237 				frags_per_tso = 0;
3238 				remainder = 0;
3239 			}
3240 			/**
3241 			 * If this is the last skb frag and still remainder is
3242 			 * non-zero(frags_per_tso is not reached to the max-1)
3243 			 * then increment the num_segs to take care of the
3244 			 * remaining length.
3245 			 */
3246 			if (!skb_nr_frags && remainder) {
3247 				num_segs++;
3248 				frags_per_tso = 0;
3249 			}
3250 		} else {
3251 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3252 			frags_per_tso = 0;
3253 		}
3254 		j++;
3255 	}
3256 
3257 	return num_segs;
3258 }
3259 #elif !defined(QCA_WIFI_QCN9000)
3260 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3261 {
3262 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3263 	skb_frag_t *frag = NULL;
3264 
3265 	/*
3266 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3267 	 * region which cannot be accessed by Target
3268 	 */
3269 	if (virt_to_phys(skb->data) < 0x50000040) {
3270 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3271 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3272 				virt_to_phys(skb->data));
3273 		goto fail;
3274 
3275 	}
3276 
3277 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3278 		frag = &skb_shinfo(skb)->frags[i];
3279 
3280 		if (!frag)
3281 			goto fail;
3282 
3283 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3284 			goto fail;
3285 	}
3286 
3287 
3288 	gso_size = skb_shinfo(skb)->gso_size;
3289 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3290 			+ tcp_hdrlen(skb));
3291 	while (tmp_len) {
3292 		num_segs++;
3293 		if (tmp_len > gso_size)
3294 			tmp_len -= gso_size;
3295 		else
3296 			break;
3297 	}
3298 
3299 	return num_segs;
3300 
3301 	/*
3302 	 * Do not free this frame, just do socket level accounting
3303 	 * so that this is not reused.
3304 	 */
3305 fail:
3306 	if (skb->sk)
3307 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3308 
3309 	return 0;
3310 }
3311 #else
3312 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3313 {
3314 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3315 	skb_frag_t *frag = NULL;
3316 
3317 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3318 		frag = &skb_shinfo(skb)->frags[i];
3319 
3320 		if (!frag)
3321 			goto fail;
3322 	}
3323 
3324 	gso_size = skb_shinfo(skb)->gso_size;
3325 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3326 			+ tcp_hdrlen(skb));
3327 	while (tmp_len) {
3328 		num_segs++;
3329 		if (tmp_len > gso_size)
3330 			tmp_len -= gso_size;
3331 		else
3332 			break;
3333 	}
3334 
3335 	return num_segs;
3336 
3337 	/*
3338 	 * Do not free this frame, just do socket level accounting
3339 	 * so that this is not reused.
3340 	 */
3341 fail:
3342 	if (skb->sk)
3343 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3344 
3345 	return 0;
3346 }
3347 #endif
3348 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3349 
3350 #endif /* FEATURE_TSO */
3351 
3352 /**
3353  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3354  *
3355  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3356  *
3357  * Return: N/A
3358  */
3359 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3360 			  uint32_t *lo, uint32_t *hi)
3361 {
3362 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3363 		*lo = lower_32_bits(dmaaddr);
3364 		*hi = upper_32_bits(dmaaddr);
3365 	} else {
3366 		*lo = dmaaddr;
3367 		*hi = 0;
3368 	}
3369 }
3370 
3371 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3372 
3373 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3374 {
3375 	qdf_nbuf_users_inc(&skb->users);
3376 	return skb;
3377 }
3378 qdf_export_symbol(__qdf_nbuf_inc_users);
3379 
3380 int __qdf_nbuf_get_users(struct sk_buff *skb)
3381 {
3382 	return qdf_nbuf_users_read(&skb->users);
3383 }
3384 qdf_export_symbol(__qdf_nbuf_get_users);
3385 
3386 /**
3387  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3388  * @skb: sk_buff handle
3389  *
3390  * Return: none
3391  */
3392 
3393 void __qdf_nbuf_ref(struct sk_buff *skb)
3394 {
3395 	skb_get(skb);
3396 }
3397 qdf_export_symbol(__qdf_nbuf_ref);
3398 
3399 /**
3400  * __qdf_nbuf_shared() - Check whether the buffer is shared
3401  *  @skb: sk_buff buffer
3402  *
3403  *  Return: true if more than one person has a reference to this buffer.
3404  */
3405 int __qdf_nbuf_shared(struct sk_buff *skb)
3406 {
3407 	return skb_shared(skb);
3408 }
3409 qdf_export_symbol(__qdf_nbuf_shared);
3410 
3411 /**
3412  * __qdf_nbuf_dmamap_create() - create a DMA map.
3413  * @osdev: qdf device handle
3414  * @dmap: dma map handle
3415  *
3416  * This can later be used to map networking buffers. They :
3417  * - need space in adf_drv's software descriptor
3418  * - are typically created during adf_drv_create
3419  * - need to be created before any API(qdf_nbuf_map) that uses them
3420  *
3421  * Return: QDF STATUS
3422  */
3423 QDF_STATUS
3424 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3425 {
3426 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3427 	/*
3428 	 * driver can tell its SG capablity, it must be handled.
3429 	 * Bounce buffers if they are there
3430 	 */
3431 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3432 	if (!(*dmap))
3433 		error = QDF_STATUS_E_NOMEM;
3434 
3435 	return error;
3436 }
3437 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3438 /**
3439  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3440  * @osdev: qdf device handle
3441  * @dmap: dma map handle
3442  *
3443  * Return: none
3444  */
3445 void
3446 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3447 {
3448 	kfree(dmap);
3449 }
3450 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3451 
3452 /**
3453  * __qdf_nbuf_map_nbytes_single() - map nbytes
3454  * @osdev: os device
3455  * @buf: buffer
3456  * @dir: direction
3457  * @nbytes: number of bytes
3458  *
3459  * Return: QDF_STATUS
3460  */
3461 #ifdef A_SIMOS_DEVHOST
3462 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3463 		qdf_device_t osdev, struct sk_buff *buf,
3464 		 qdf_dma_dir_t dir, int nbytes)
3465 {
3466 	qdf_dma_addr_t paddr;
3467 
3468 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3469 	return QDF_STATUS_SUCCESS;
3470 }
3471 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3472 #else
3473 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3474 		qdf_device_t osdev, struct sk_buff *buf,
3475 		 qdf_dma_dir_t dir, int nbytes)
3476 {
3477 	qdf_dma_addr_t paddr;
3478 
3479 	/* assume that the OS only provides a single fragment */
3480 	QDF_NBUF_CB_PADDR(buf) = paddr =
3481 		dma_map_single(osdev->dev, buf->data,
3482 			nbytes, __qdf_dma_dir_to_os(dir));
3483 	return dma_mapping_error(osdev->dev, paddr) ?
3484 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3485 }
3486 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3487 #endif
3488 /**
3489  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3490  * @osdev: os device
3491  * @buf: buffer
3492  * @dir: direction
3493  * @nbytes: number of bytes
3494  *
3495  * Return: none
3496  */
3497 #if defined(A_SIMOS_DEVHOST)
3498 void
3499 __qdf_nbuf_unmap_nbytes_single(
3500 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3501 {
3502 }
3503 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3504 
3505 #else
3506 void
3507 __qdf_nbuf_unmap_nbytes_single(
3508 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3509 {
3510 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3511 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3512 		return;
3513 	}
3514 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3515 			nbytes, __qdf_dma_dir_to_os(dir));
3516 }
3517 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3518 #endif
3519 /**
3520  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3521  * @osdev: os device
3522  * @skb: skb handle
3523  * @dir: dma direction
3524  * @nbytes: number of bytes to be mapped
3525  *
3526  * Return: QDF_STATUS
3527  */
3528 #ifdef QDF_OS_DEBUG
3529 QDF_STATUS
3530 __qdf_nbuf_map_nbytes(
3531 	qdf_device_t osdev,
3532 	struct sk_buff *skb,
3533 	qdf_dma_dir_t dir,
3534 	int nbytes)
3535 {
3536 	struct skb_shared_info  *sh = skb_shinfo(skb);
3537 
3538 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3539 
3540 	/*
3541 	 * Assume there's only a single fragment.
3542 	 * To support multiple fragments, it would be necessary to change
3543 	 * adf_nbuf_t to be a separate object that stores meta-info
3544 	 * (including the bus address for each fragment) and a pointer
3545 	 * to the underlying sk_buff.
3546 	 */
3547 	qdf_assert(sh->nr_frags == 0);
3548 
3549 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3550 }
3551 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3552 #else
3553 QDF_STATUS
3554 __qdf_nbuf_map_nbytes(
3555 	qdf_device_t osdev,
3556 	struct sk_buff *skb,
3557 	qdf_dma_dir_t dir,
3558 	int nbytes)
3559 {
3560 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3561 }
3562 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3563 #endif
3564 /**
3565  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3566  * @osdev: OS device
3567  * @skb: skb handle
3568  * @dir: direction
3569  * @nbytes: number of bytes
3570  *
3571  * Return: none
3572  */
3573 void
3574 __qdf_nbuf_unmap_nbytes(
3575 	qdf_device_t osdev,
3576 	struct sk_buff *skb,
3577 	qdf_dma_dir_t dir,
3578 	int nbytes)
3579 {
3580 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3581 
3582 	/*
3583 	 * Assume there's a single fragment.
3584 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3585 	 */
3586 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3587 }
3588 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3589 
3590 /**
3591  * __qdf_nbuf_dma_map_info() - return the dma map info
3592  * @bmap: dma map
3593  * @sg: dma map info
3594  *
3595  * Return: none
3596  */
3597 void
3598 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3599 {
3600 	qdf_assert(bmap->mapped);
3601 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3602 
3603 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3604 			sizeof(struct __qdf_segment));
3605 	sg->nsegs = bmap->nsegs;
3606 }
3607 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3608 /**
3609  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3610  *			specified by the index
3611  * @skb: sk buff
3612  * @sg: scatter/gather list of all the frags
3613  *
3614  * Return: none
3615  */
3616 #if defined(__QDF_SUPPORT_FRAG_MEM)
3617 void
3618 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3619 {
3620 	qdf_assert(skb);
3621 	sg->sg_segs[0].vaddr = skb->data;
3622 	sg->sg_segs[0].len   = skb->len;
3623 	sg->nsegs            = 1;
3624 
3625 	for (int i = 1; i <= sh->nr_frags; i++) {
3626 		skb_frag_t    *f        = &sh->frags[i - 1];
3627 
3628 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3629 			f->page_offset);
3630 		sg->sg_segs[i].len      = f->size;
3631 
3632 		qdf_assert(i < QDF_MAX_SGLIST);
3633 	}
3634 	sg->nsegs += i;
3635 
3636 }
3637 qdf_export_symbol(__qdf_nbuf_frag_info);
3638 #else
3639 #ifdef QDF_OS_DEBUG
3640 void
3641 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3642 {
3643 
3644 	struct skb_shared_info  *sh = skb_shinfo(skb);
3645 
3646 	qdf_assert(skb);
3647 	sg->sg_segs[0].vaddr = skb->data;
3648 	sg->sg_segs[0].len   = skb->len;
3649 	sg->nsegs            = 1;
3650 
3651 	qdf_assert(sh->nr_frags == 0);
3652 }
3653 qdf_export_symbol(__qdf_nbuf_frag_info);
3654 #else
3655 void
3656 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3657 {
3658 	sg->sg_segs[0].vaddr = skb->data;
3659 	sg->sg_segs[0].len   = skb->len;
3660 	sg->nsegs            = 1;
3661 }
3662 qdf_export_symbol(__qdf_nbuf_frag_info);
3663 #endif
3664 #endif
3665 /**
3666  * __qdf_nbuf_get_frag_size() - get frag size
3667  * @nbuf: sk buffer
3668  * @cur_frag: current frag
3669  *
3670  * Return: frag size
3671  */
3672 uint32_t
3673 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3674 {
3675 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3676 	const skb_frag_t *frag = sh->frags + cur_frag;
3677 
3678 	return skb_frag_size(frag);
3679 }
3680 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3681 
3682 /**
3683  * __qdf_nbuf_frag_map() - dma map frag
3684  * @osdev: os device
3685  * @nbuf: sk buff
3686  * @offset: offset
3687  * @dir: direction
3688  * @cur_frag: current fragment
3689  *
3690  * Return: QDF status
3691  */
3692 #ifdef A_SIMOS_DEVHOST
3693 QDF_STATUS __qdf_nbuf_frag_map(
3694 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3695 	int offset, qdf_dma_dir_t dir, int cur_frag)
3696 {
3697 	int32_t paddr, frag_len;
3698 
3699 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3700 	return QDF_STATUS_SUCCESS;
3701 }
3702 qdf_export_symbol(__qdf_nbuf_frag_map);
3703 #else
3704 QDF_STATUS __qdf_nbuf_frag_map(
3705 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3706 	int offset, qdf_dma_dir_t dir, int cur_frag)
3707 {
3708 	dma_addr_t paddr, frag_len;
3709 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3710 	const skb_frag_t *frag = sh->frags + cur_frag;
3711 
3712 	frag_len = skb_frag_size(frag);
3713 
3714 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3715 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3716 					__qdf_dma_dir_to_os(dir));
3717 	return dma_mapping_error(osdev->dev, paddr) ?
3718 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3719 }
3720 qdf_export_symbol(__qdf_nbuf_frag_map);
3721 #endif
3722 /**
3723  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3724  * @dmap: dma map
3725  * @cb: callback
3726  * @arg: argument
3727  *
3728  * Return: none
3729  */
3730 void
3731 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3732 {
3733 	return;
3734 }
3735 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3736 
3737 
3738 /**
3739  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3740  * @osdev: os device
3741  * @buf: sk buff
3742  * @dir: direction
3743  *
3744  * Return: none
3745  */
3746 #if defined(A_SIMOS_DEVHOST)
3747 static void __qdf_nbuf_sync_single_for_cpu(
3748 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3749 {
3750 	return;
3751 }
3752 #else
3753 static void __qdf_nbuf_sync_single_for_cpu(
3754 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3755 {
3756 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3757 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3758 		return;
3759 	}
3760 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3761 		skb_end_offset(buf) - skb_headroom(buf),
3762 		__qdf_dma_dir_to_os(dir));
3763 }
3764 #endif
3765 /**
3766  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3767  * @osdev: os device
3768  * @skb: sk buff
3769  * @dir: direction
3770  *
3771  * Return: none
3772  */
3773 void
3774 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3775 	struct sk_buff *skb, qdf_dma_dir_t dir)
3776 {
3777 	qdf_assert(
3778 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3779 
3780 	/*
3781 	 * Assume there's a single fragment.
3782 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3783 	 */
3784 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3785 }
3786 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3787 
3788 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3789 /**
3790  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3791  * @rx_status: Pointer to rx_status.
3792  * @rtap_buf: Buf to which VHT info has to be updated.
3793  * @rtap_len: Current length of radiotap buffer
3794  *
3795  * Return: Length of radiotap after VHT flags updated.
3796  */
3797 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3798 					struct mon_rx_status *rx_status,
3799 					int8_t *rtap_buf,
3800 					uint32_t rtap_len)
3801 {
3802 	uint16_t vht_flags = 0;
3803 
3804 	rtap_len = qdf_align(rtap_len, 2);
3805 
3806 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3807 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3808 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3809 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3810 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3811 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3812 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3813 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3814 	rtap_len += 2;
3815 
3816 	rtap_buf[rtap_len] |=
3817 		(rx_status->is_stbc ?
3818 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3819 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3820 		(rx_status->ldpc ?
3821 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3822 		(rx_status->beamformed ?
3823 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3824 	rtap_len += 1;
3825 	switch (rx_status->vht_flag_values2) {
3826 	case IEEE80211_RADIOTAP_VHT_BW_20:
3827 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3828 		break;
3829 	case IEEE80211_RADIOTAP_VHT_BW_40:
3830 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3831 		break;
3832 	case IEEE80211_RADIOTAP_VHT_BW_80:
3833 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3834 		break;
3835 	case IEEE80211_RADIOTAP_VHT_BW_160:
3836 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3837 		break;
3838 	}
3839 	rtap_len += 1;
3840 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3841 	rtap_len += 1;
3842 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3843 	rtap_len += 1;
3844 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3845 	rtap_len += 1;
3846 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3847 	rtap_len += 1;
3848 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3849 	rtap_len += 1;
3850 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3851 	rtap_len += 1;
3852 	put_unaligned_le16(rx_status->vht_flag_values6,
3853 			   &rtap_buf[rtap_len]);
3854 	rtap_len += 2;
3855 
3856 	return rtap_len;
3857 }
3858 
3859 /**
3860  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3861  * @rx_status: Pointer to rx_status.
3862  * @rtap_buf: buffer to which radiotap has to be updated
3863  * @rtap_len: radiotap length
3864  *
3865  * API update high-efficiency (11ax) fields in the radiotap header
3866  *
3867  * Return: length of rtap_len updated.
3868  */
3869 static unsigned int
3870 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3871 				     int8_t *rtap_buf, uint32_t rtap_len)
3872 {
3873 	/*
3874 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3875 	 * Enable all "known" HE radiotap flags for now
3876 	 */
3877 	rtap_len = qdf_align(rtap_len, 2);
3878 
3879 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3880 	rtap_len += 2;
3881 
3882 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3883 	rtap_len += 2;
3884 
3885 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3886 	rtap_len += 2;
3887 
3888 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3889 	rtap_len += 2;
3890 
3891 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3892 	rtap_len += 2;
3893 
3894 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3895 	rtap_len += 2;
3896 	qdf_debug("he data %x %x %x %x %x %x",
3897 		  rx_status->he_data1,
3898 		  rx_status->he_data2, rx_status->he_data3,
3899 		  rx_status->he_data4, rx_status->he_data5,
3900 		  rx_status->he_data6);
3901 	return rtap_len;
3902 }
3903 
3904 
3905 /**
3906  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3907  * @rx_status: Pointer to rx_status.
3908  * @rtap_buf: buffer to which radiotap has to be updated
3909  * @rtap_len: radiotap length
3910  *
3911  * API update HE-MU fields in the radiotap header
3912  *
3913  * Return: length of rtap_len updated.
3914  */
3915 static unsigned int
3916 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3917 				     int8_t *rtap_buf, uint32_t rtap_len)
3918 {
3919 	rtap_len = qdf_align(rtap_len, 2);
3920 
3921 	/*
3922 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3923 	 * Enable all "known" he-mu radiotap flags for now
3924 	 */
3925 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3926 	rtap_len += 2;
3927 
3928 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3929 	rtap_len += 2;
3930 
3931 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3932 	rtap_len += 1;
3933 
3934 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3935 	rtap_len += 1;
3936 
3937 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3938 	rtap_len += 1;
3939 
3940 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3941 	rtap_len += 1;
3942 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3943 		  rx_status->he_flags1,
3944 		  rx_status->he_flags2, rx_status->he_RU[0],
3945 		  rx_status->he_RU[1], rx_status->he_RU[2],
3946 		  rx_status->he_RU[3]);
3947 
3948 	return rtap_len;
3949 }
3950 
3951 /**
3952  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3953  * @rx_status: Pointer to rx_status.
3954  * @rtap_buf: buffer to which radiotap has to be updated
3955  * @rtap_len: radiotap length
3956  *
3957  * API update he-mu-other fields in the radiotap header
3958  *
3959  * Return: length of rtap_len updated.
3960  */
3961 static unsigned int
3962 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3963 				     int8_t *rtap_buf, uint32_t rtap_len)
3964 {
3965 	rtap_len = qdf_align(rtap_len, 2);
3966 
3967 	/*
3968 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3969 	 * Enable all "known" he-mu-other radiotap flags for now
3970 	 */
3971 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3972 	rtap_len += 2;
3973 
3974 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3975 	rtap_len += 2;
3976 
3977 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3978 	rtap_len += 1;
3979 
3980 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3981 	rtap_len += 1;
3982 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3983 		  rx_status->he_per_user_1,
3984 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3985 		  rx_status->he_per_user_known);
3986 	return rtap_len;
3987 }
3988 
3989 
3990 /**
3991  * This is the length for radiotap, combined length
3992  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3993  * cannot be more than available headroom_sz.
3994  * increase this when we add more radiotap elements.
3995  * Number after '+' indicates maximum possible increase due to alignment
3996  */
3997 
3998 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3999 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4000 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4001 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4002 #define RADIOTAP_FIXED_HEADER_LEN 17
4003 #define RADIOTAP_HT_FLAGS_LEN 3
4004 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4005 #define RADIOTAP_VENDOR_NS_LEN \
4006 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4007 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4008 				RADIOTAP_FIXED_HEADER_LEN + \
4009 				RADIOTAP_HT_FLAGS_LEN + \
4010 				RADIOTAP_VHT_FLAGS_LEN + \
4011 				RADIOTAP_AMPDU_STATUS_LEN + \
4012 				RADIOTAP_HE_FLAGS_LEN + \
4013 				RADIOTAP_HE_MU_FLAGS_LEN + \
4014 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4015 				RADIOTAP_VENDOR_NS_LEN)
4016 
4017 #define IEEE80211_RADIOTAP_HE 23
4018 #define IEEE80211_RADIOTAP_HE_MU	24
4019 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4020 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4021 
4022 /**
4023  * radiotap_num_to_freq() - Get frequency from chan number
4024  * @chan_num - Input channel number
4025  *
4026  * Return - Channel frequency in Mhz
4027  */
4028 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
4029 {
4030 	if (chan_num == CHANNEL_NUM_14)
4031 		return CHANNEL_FREQ_2484;
4032 	if (chan_num < CHANNEL_NUM_14)
4033 		return CHANNEL_FREQ_2407 +
4034 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4035 
4036 	if (chan_num < CHANNEL_NUM_27)
4037 		return CHANNEL_FREQ_2512 +
4038 			((chan_num - CHANNEL_NUM_15) *
4039 			 FREQ_MULTIPLIER_CONST_20MHZ);
4040 
4041 	if (chan_num > CHANNEL_NUM_182 &&
4042 			chan_num < CHANNEL_NUM_197)
4043 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
4044 			CHANNEL_FREQ_4000);
4045 
4046 	return CHANNEL_FREQ_5000 +
4047 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4048 }
4049 
4050 /**
4051  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4052  * @rx_status: Pointer to rx_status.
4053  * @rtap_buf: Buf to which AMPDU info has to be updated.
4054  * @rtap_len: Current length of radiotap buffer
4055  *
4056  * Return: Length of radiotap after AMPDU flags updated.
4057  */
4058 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4059 					struct mon_rx_status *rx_status,
4060 					uint8_t *rtap_buf,
4061 					uint32_t rtap_len)
4062 {
4063 	/*
4064 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4065 	 * First 32 bits of AMPDU represents the reference number
4066 	 */
4067 
4068 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4069 	uint16_t ampdu_flags = 0;
4070 	uint16_t ampdu_reserved_flags = 0;
4071 
4072 	rtap_len = qdf_align(rtap_len, 4);
4073 
4074 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4075 	rtap_len += 4;
4076 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4077 	rtap_len += 2;
4078 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4079 	rtap_len += 2;
4080 
4081 	return rtap_len;
4082 }
4083 
4084 /**
4085  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4086  * @rx_status: Pointer to rx_status.
4087  * @nbuf:      nbuf pointer to which radiotap has to be updated
4088  * @headroom_sz: Available headroom size.
4089  *
4090  * Return: length of rtap_len updated.
4091  */
4092 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4093 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4094 {
4095 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4096 	struct ieee80211_radiotap_header *rthdr =
4097 		(struct ieee80211_radiotap_header *)rtap_buf;
4098 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4099 	uint32_t rtap_len = rtap_hdr_len;
4100 	uint8_t length = rtap_len;
4101 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4102 
4103 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4104 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4105 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4106 	rtap_len += 8;
4107 
4108 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4109 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4110 
4111 	if (rx_status->rs_fcs_err)
4112 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4113 
4114 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4115 	rtap_len += 1;
4116 
4117 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4118 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4119 	    !rx_status->he_flags) {
4120 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4121 		rtap_buf[rtap_len] = rx_status->rate;
4122 	} else
4123 		rtap_buf[rtap_len] = 0;
4124 	rtap_len += 1;
4125 
4126 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4127 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4128 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4129 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4130 	rtap_len += 2;
4131 	/* Channel flags. */
4132 	if (rx_status->chan_num > CHANNEL_NUM_35)
4133 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4134 	else
4135 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4136 	if (rx_status->cck_flag)
4137 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4138 	if (rx_status->ofdm_flag)
4139 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4140 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4141 	rtap_len += 2;
4142 
4143 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4144 	 *					(dBm)
4145 	 */
4146 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4147 	/*
4148 	 * rssi_comb is int dB, need to convert it to dBm.
4149 	 * normalize value to noise floor of -96 dBm
4150 	 */
4151 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4152 	rtap_len += 1;
4153 
4154 	/* RX signal noise floor */
4155 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4156 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4157 	rtap_len += 1;
4158 
4159 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4160 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4161 	rtap_buf[rtap_len] = rx_status->nr_ant;
4162 	rtap_len += 1;
4163 
4164 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4165 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4166 		return 0;
4167 	}
4168 
4169 	if (rx_status->ht_flags) {
4170 		length = rtap_len;
4171 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4172 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4173 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4174 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4175 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4176 		rtap_len += 1;
4177 
4178 		if (rx_status->sgi)
4179 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4180 		if (rx_status->bw)
4181 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4182 		else
4183 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4184 		rtap_len += 1;
4185 
4186 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4187 		rtap_len += 1;
4188 
4189 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4190 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4191 			return 0;
4192 		}
4193 	}
4194 
4195 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4196 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4197 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4198 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4199 								rtap_buf,
4200 								rtap_len);
4201 	}
4202 
4203 	if (rx_status->vht_flags) {
4204 		length = rtap_len;
4205 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4206 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4207 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4208 								rtap_buf,
4209 								rtap_len);
4210 
4211 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4212 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4213 			return 0;
4214 		}
4215 	}
4216 
4217 	if (rx_status->he_flags) {
4218 		length = rtap_len;
4219 		/* IEEE80211_RADIOTAP_HE */
4220 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4221 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4222 								rtap_buf,
4223 								rtap_len);
4224 
4225 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4226 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4227 			return 0;
4228 		}
4229 	}
4230 
4231 	if (rx_status->he_mu_flags) {
4232 		length = rtap_len;
4233 		/* IEEE80211_RADIOTAP_HE-MU */
4234 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4235 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4236 								rtap_buf,
4237 								rtap_len);
4238 
4239 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4240 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4241 			return 0;
4242 		}
4243 	}
4244 
4245 	if (rx_status->he_mu_other_flags) {
4246 		length = rtap_len;
4247 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4248 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4249 		rtap_len =
4250 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4251 								rtap_buf,
4252 								rtap_len);
4253 
4254 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4255 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4256 			return 0;
4257 		}
4258 	}
4259 
4260 	rtap_len = qdf_align(rtap_len, 2);
4261 	/*
4262 	 * Radiotap Vendor Namespace
4263 	 */
4264 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4265 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4266 					(rtap_buf + rtap_len);
4267 	/*
4268 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4269 	 */
4270 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4271 	/*
4272 	 * Name space selector = 0
4273 	 * We only will have one namespace for now
4274 	 */
4275 	radiotap_vendor_ns_ath->hdr.selector = 0;
4276 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4277 					sizeof(*radiotap_vendor_ns_ath) -
4278 					sizeof(radiotap_vendor_ns_ath->hdr));
4279 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4280 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4281 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4282 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4283 				cpu_to_le32(rx_status->ppdu_timestamp);
4284 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4285 
4286 	rthdr->it_len = cpu_to_le16(rtap_len);
4287 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4288 
4289 	if (headroom_sz < rtap_len) {
4290 		qdf_err("ERROR: not enough space to update radiotap");
4291 		return 0;
4292 	}
4293 	qdf_nbuf_push_head(nbuf, rtap_len);
4294 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4295 	return rtap_len;
4296 }
4297 #else
4298 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4299 					struct mon_rx_status *rx_status,
4300 					int8_t *rtap_buf,
4301 					uint32_t rtap_len)
4302 {
4303 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4304 	return 0;
4305 }
4306 
4307 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4308 				      int8_t *rtap_buf, uint32_t rtap_len)
4309 {
4310 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4311 	return 0;
4312 }
4313 
4314 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4315 					struct mon_rx_status *rx_status,
4316 					uint8_t *rtap_buf,
4317 					uint32_t rtap_len)
4318 {
4319 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4320 	return 0;
4321 }
4322 
4323 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4324 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4325 {
4326 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4327 	return 0;
4328 }
4329 #endif
4330 qdf_export_symbol(qdf_nbuf_update_radiotap);
4331 
4332 /**
4333  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4334  * @cb_func_ptr: function pointer to the nbuf free callback
4335  *
4336  * This function registers a callback function for nbuf free.
4337  *
4338  * Return: none
4339  */
4340 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4341 {
4342 	nbuf_free_cb = cb_func_ptr;
4343 }
4344 
4345 /**
4346  * qdf_nbuf_classify_pkt() - classify packet
4347  * @skb - sk buff
4348  *
4349  * Return: none
4350  */
4351 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4352 {
4353 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4354 
4355 	/* check destination mac address is broadcast/multicast */
4356 	if (is_broadcast_ether_addr((uint8_t *)eh))
4357 		QDF_NBUF_CB_SET_BCAST(skb);
4358 	else if (is_multicast_ether_addr((uint8_t *)eh))
4359 		QDF_NBUF_CB_SET_MCAST(skb);
4360 
4361 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4362 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4363 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4364 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4365 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4366 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4367 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4368 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4369 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4370 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4371 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4372 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4373 }
4374 qdf_export_symbol(qdf_nbuf_classify_pkt);
4375 
4376 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4377 {
4378 	qdf_nbuf_users_set(&nbuf->users, 1);
4379 	nbuf->data = nbuf->head + NET_SKB_PAD;
4380 	skb_reset_tail_pointer(nbuf);
4381 }
4382 qdf_export_symbol(__qdf_nbuf_init);
4383 
4384 #ifdef WLAN_FEATURE_FASTPATH
4385 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4386 {
4387 	qdf_nbuf_users_set(&nbuf->users, 1);
4388 	nbuf->data = nbuf->head + NET_SKB_PAD;
4389 	skb_reset_tail_pointer(nbuf);
4390 }
4391 qdf_export_symbol(qdf_nbuf_init_fast);
4392 #endif /* WLAN_FEATURE_FASTPATH */
4393 
4394 
4395 #ifdef QDF_NBUF_GLOBAL_COUNT
4396 /**
4397  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4398  *
4399  * Return void
4400  */
4401 void __qdf_nbuf_mod_init(void)
4402 {
4403 	qdf_atomic_init(&nbuf_count);
4404 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4405 }
4406 
4407 /**
4408  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4409  *
4410  * Return void
4411  */
4412 void __qdf_nbuf_mod_exit(void)
4413 {
4414 }
4415 #endif
4416