xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 #include "qdf_str.h"
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* channel number to freq conversion */
78 #define CHANNEL_NUM_14 14
79 #define CHANNEL_NUM_15 15
80 #define CHANNEL_NUM_27 27
81 #define CHANNEL_NUM_35 35
82 #define CHANNEL_NUM_182 182
83 #define CHANNEL_NUM_197 197
84 #define CHANNEL_FREQ_2484 2484
85 #define CHANNEL_FREQ_2407 2407
86 #define CHANNEL_FREQ_2512 2512
87 #define CHANNEL_FREQ_5000 5000
88 #define CHANNEL_FREQ_4000 4000
89 #define FREQ_MULTIPLIER_CONST_5MHZ 5
90 #define FREQ_MULTIPLIER_CONST_20MHZ 20
91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
93 #define RADIOTAP_CCK_CHANNEL 0x0020
94 #define RADIOTAP_OFDM_CHANNEL 0x0040
95 
96 #ifdef CONFIG_MCL
97 #include <qdf_mc_timer.h>
98 
99 struct qdf_track_timer {
100 	qdf_mc_timer_t track_timer;
101 	qdf_atomic_t alloc_fail_cnt;
102 };
103 
104 static struct qdf_track_timer alloc_track_timer;
105 
106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
108 #endif
109 
110 /* Packet Counter */
111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
113 #ifdef QDF_NBUF_GLOBAL_COUNT
114 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
115 static qdf_atomic_t nbuf_count;
116 #endif
117 
118 /**
119  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
120  *
121  * Return: none
122  */
123 void qdf_nbuf_tx_desc_count_display(void)
124 {
125 	qdf_debug("Current Snapshot of the Driver:");
126 	qdf_debug("Data Packets:");
127 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
129 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
138 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
146 	qdf_debug("Mgmt Packets:");
147 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
161 }
162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
163 
164 /**
165  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
166  * @packet_type   : packet type either mgmt/data
167  * @current_state : layer at which the packet currently present
168  *
169  * Return: none
170  */
171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
172 			uint8_t current_state)
173 {
174 	switch (packet_type) {
175 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
176 		nbuf_tx_mgmt[current_state]++;
177 		break;
178 	case QDF_NBUF_TX_PKT_DATA_TRACK:
179 		nbuf_tx_data[current_state]++;
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
186 
187 /**
188  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_clear(void)
193 {
194 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
195 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
196 }
197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
198 
199 /**
200  * qdf_nbuf_set_state() - Updates the packet state
201  * @nbuf:            network buffer
202  * @current_state :  layer at which the packet currently is
203  *
204  * This function updates the packet state to the layer at which the packet
205  * currently is
206  *
207  * Return: none
208  */
209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
210 {
211 	/*
212 	 * Only Mgmt, Data Packets are tracked. WMI messages
213 	 * such as scan commands are not tracked
214 	 */
215 	uint8_t packet_type;
216 
217 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
218 
219 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
220 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
221 		return;
222 	}
223 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
224 	qdf_nbuf_tx_desc_count_update(packet_type,
225 					current_state);
226 }
227 qdf_export_symbol(qdf_nbuf_set_state);
228 
229 #ifdef CONFIG_MCL
230 /**
231  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
232  *
233  * This function starts the alloc fail replenish timer.
234  *
235  * Return: void
236  */
237 static void __qdf_nbuf_start_replenish_timer(void)
238 {
239 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
240 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
241 	    QDF_TIMER_STATE_RUNNING)
242 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
243 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
244 }
245 
246 /**
247  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
248  *
249  * This function stops the alloc fail replenish timer.
250  *
251  * Return: void
252  */
253 static void __qdf_nbuf_stop_replenish_timer(void)
254 {
255 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
256 		return;
257 
258 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
259 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
260 	    QDF_TIMER_STATE_RUNNING)
261 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
262 }
263 
264 /**
265  * qdf_replenish_expire_handler - Replenish expire handler
266  *
267  * This function triggers when the alloc fail replenish timer expires.
268  *
269  * Return: void
270  */
271 static void qdf_replenish_expire_handler(void *arg)
272 {
273 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
274 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
275 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
276 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
277 
278 		/* Error handling here */
279 	}
280 }
281 
282 /**
283  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
284  *
285  * This function initializes the nbuf alloc fail replenish timer.
286  *
287  * Return: void
288  */
289 void __qdf_nbuf_init_replenish_timer(void)
290 {
291 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
292 			  qdf_replenish_expire_handler, NULL);
293 }
294 
295 /**
296  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
297  *
298  * This function deinitializes the nbuf alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 void __qdf_nbuf_deinit_replenish_timer(void)
303 {
304 	__qdf_nbuf_stop_replenish_timer();
305 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
306 }
307 #else
308 
309 static inline void __qdf_nbuf_start_replenish_timer(void) {}
310 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
311 #endif
312 
313 /* globals do not need to be initialized to NULL/0 */
314 qdf_nbuf_trace_update_t qdf_trace_update_cb;
315 qdf_nbuf_free_t nbuf_free_cb;
316 
317 #ifdef QDF_NBUF_GLOBAL_COUNT
318 
319 /**
320  * __qdf_nbuf_count_get() - get nbuf global count
321  *
322  * Return: nbuf global count
323  */
324 int __qdf_nbuf_count_get(void)
325 {
326 	return qdf_atomic_read(&nbuf_count);
327 }
328 qdf_export_symbol(__qdf_nbuf_count_get);
329 
330 /**
331  * __qdf_nbuf_count_inc() - increment nbuf global count
332  *
333  * @buf: sk buff
334  *
335  * Return: void
336  */
337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
338 {
339 	qdf_atomic_inc(&nbuf_count);
340 }
341 qdf_export_symbol(__qdf_nbuf_count_inc);
342 
343 /**
344  * __qdf_nbuf_count_dec() - decrement nbuf global count
345  *
346  * @buf: sk buff
347  *
348  * Return: void
349  */
350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
351 {
352 	qdf_atomic_dec(&nbuf_count);
353 }
354 qdf_export_symbol(__qdf_nbuf_count_dec);
355 #endif
356 
357 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
358 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
359 				 int align, int prio, const char *func,
360 				 uint32_t line)
361 {
362 	struct sk_buff *skb;
363 	unsigned long offset;
364 	uint32_t lowmem_alloc_tries = 0;
365 
366 	if (align)
367 		size += (align - 1);
368 
369 realloc:
370 	skb = dev_alloc_skb(size);
371 
372 	if (skb)
373 		goto skb_alloc;
374 
375 	skb = pld_nbuf_pre_alloc(size);
376 
377 	if (!skb) {
378 		qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
379 			     size, func, line);
380 		return NULL;
381 	}
382 
383 skb_alloc:
384 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
385 	 * Though we are trying to reserve low memory upfront to prevent this,
386 	 * we sometimes see SKBs allocated from low memory.
387 	 */
388 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
389 		lowmem_alloc_tries++;
390 		if (lowmem_alloc_tries > 100) {
391 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
392 				     size, func, line);
393 			return NULL;
394 		} else {
395 			/* Not freeing to make sure it
396 			 * will not get allocated again
397 			 */
398 			goto realloc;
399 		}
400 	}
401 	memset(skb->cb, 0x0, sizeof(skb->cb));
402 
403 	/*
404 	 * The default is for netbuf fragments to be interpreted
405 	 * as wordstreams rather than bytestreams.
406 	 */
407 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
408 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
409 
410 	/*
411 	 * XXX:how about we reserve first then align
412 	 * Align & make sure that the tail & data are adjusted properly
413 	 */
414 
415 	if (align) {
416 		offset = ((unsigned long)skb->data) % align;
417 		if (offset)
418 			skb_reserve(skb, align - offset);
419 	}
420 
421 	/*
422 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
423 	 * pointer
424 	 */
425 	skb_reserve(skb, reserve);
426 	qdf_nbuf_count_inc(skb);
427 
428 	return skb;
429 }
430 #else
431 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
432 				 int align, int prio, const char *func,
433 				 uint32_t line)
434 {
435 	struct sk_buff *skb;
436 	unsigned long offset;
437 	int flags = GFP_KERNEL;
438 
439 	if (align)
440 		size += (align - 1);
441 
442 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
443 		flags = GFP_ATOMIC;
444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
445 		/*
446 		 * Observed that kcompactd burns out CPU to make order-3 page.
447 		 *__netdev_alloc_skb has 4k page fallback option just in case of
448 		 * failing high order page allocation so we don't need to be
449 		 * hard. Make kcompactd rest in piece.
450 		 */
451 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
452 #endif
453 	}
454 
455 	skb = __netdev_alloc_skb(NULL, size, flags);
456 
457 	if (skb)
458 		goto skb_alloc;
459 
460 	skb = pld_nbuf_pre_alloc(size);
461 
462 	if (!skb) {
463 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
464 				size, func, line);
465 		__qdf_nbuf_start_replenish_timer();
466 		return NULL;
467 	} else {
468 		__qdf_nbuf_stop_replenish_timer();
469 	}
470 
471 skb_alloc:
472 	memset(skb->cb, 0x0, sizeof(skb->cb));
473 
474 	/*
475 	 * The default is for netbuf fragments to be interpreted
476 	 * as wordstreams rather than bytestreams.
477 	 */
478 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
479 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
480 
481 	/*
482 	 * XXX:how about we reserve first then align
483 	 * Align & make sure that the tail & data are adjusted properly
484 	 */
485 
486 	if (align) {
487 		offset = ((unsigned long)skb->data) % align;
488 		if (offset)
489 			skb_reserve(skb, align - offset);
490 	}
491 
492 	/*
493 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
494 	 * pointer
495 	 */
496 	skb_reserve(skb, reserve);
497 	qdf_nbuf_count_inc(skb);
498 
499 	return skb;
500 }
501 #endif
502 qdf_export_symbol(__qdf_nbuf_alloc);
503 
504 /**
505  * __qdf_nbuf_free() - free the nbuf its interrupt safe
506  * @skb: Pointer to network buffer
507  *
508  * Return: none
509  */
510 
511 #ifdef CONFIG_MCL
512 void __qdf_nbuf_free(struct sk_buff *skb)
513 {
514 	if (pld_nbuf_pre_alloc_free(skb))
515 		return;
516 
517 	qdf_nbuf_count_dec(skb);
518 	if (nbuf_free_cb)
519 		nbuf_free_cb(skb);
520 	else
521 		dev_kfree_skb_any(skb);
522 }
523 #else
524 void __qdf_nbuf_free(struct sk_buff *skb)
525 {
526 	if (pld_nbuf_pre_alloc_free(skb))
527 		return;
528 
529 	qdf_nbuf_count_dec(skb);
530 	dev_kfree_skb_any(skb);
531 }
532 #endif
533 
534 qdf_export_symbol(__qdf_nbuf_free);
535 
536 #ifdef NBUF_MEMORY_DEBUG
537 enum qdf_nbuf_event_type {
538 	QDF_NBUF_ALLOC,
539 	QDF_NBUF_FREE,
540 	QDF_NBUF_MAP,
541 	QDF_NBUF_UNMAP,
542 };
543 
544 struct qdf_nbuf_event {
545 	qdf_nbuf_t nbuf;
546 	char file[QDF_MEM_FILE_NAME_SIZE];
547 	uint32_t line;
548 	enum qdf_nbuf_event_type type;
549 	uint64_t timestamp;
550 };
551 
552 #define QDF_NBUF_HISTORY_SIZE 4096
553 static qdf_atomic_t qdf_nbuf_history_index;
554 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
555 
556 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
557 {
558 	int32_t next = qdf_atomic_inc_return(index);
559 
560 	if (next == size)
561 		qdf_atomic_sub(size, index);
562 
563 	return next % size;
564 }
565 
566 static void
567 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
568 		     enum qdf_nbuf_event_type type)
569 {
570 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
571 						   QDF_NBUF_HISTORY_SIZE);
572 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
573 
574 	event->nbuf = nbuf;
575 	qdf_str_lcopy(event->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
576 	event->line = line;
577 	event->type = type;
578 	event->timestamp = qdf_get_log_timestamp();
579 }
580 #endif /* NBUF_MEMORY_DEBUG */
581 
582 #ifdef NBUF_MAP_UNMAP_DEBUG
583 struct qdf_nbuf_map_metadata {
584 	struct hlist_node node;
585 	qdf_nbuf_t nbuf;
586 	char file[QDF_MEM_FILE_NAME_SIZE];
587 	uint32_t line;
588 };
589 
590 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
591 			 sizeof(struct qdf_nbuf_map_metadata), 0);
592 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
593 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
594 static qdf_spinlock_t qdf_nbuf_map_lock;
595 
596 static void qdf_nbuf_map_tracking_init(void)
597 {
598 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
599 	hash_init(qdf_nbuf_map_ht);
600 	qdf_spinlock_create(&qdf_nbuf_map_lock);
601 }
602 
603 void qdf_nbuf_map_check_for_leaks(void)
604 {
605 	struct qdf_nbuf_map_metadata *meta;
606 	int bucket;
607 	uint32_t count = 0;
608 	bool is_empty;
609 
610 	qdf_flex_mem_release(&qdf_nbuf_map_pool);
611 
612 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
613 	is_empty = hash_empty(qdf_nbuf_map_ht);
614 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
615 
616 	if (is_empty)
617 		return;
618 
619 	qdf_err("Nbuf map without unmap events detected!");
620 	qdf_err("------------------------------------------------------------");
621 
622 	/* Hold the lock for the entire iteration for safe list/meta access. We
623 	 * are explicitly preferring the chance to watchdog on the print, over
624 	 * the posibility of invalid list/memory access. Since we are going to
625 	 * panic anyway, the worst case is loading up the crash dump to find out
626 	 * what was in the hash table.
627 	 */
628 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
629 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
630 		count++;
631 		qdf_err("0x%pk @ %s:%u",
632 			meta->nbuf, meta->file, meta->line);
633 	}
634 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
635 
636 	panic("%u fatal nbuf map without unmap events detected!", count);
637 }
638 
639 static void qdf_nbuf_map_tracking_deinit(void)
640 {
641 	qdf_nbuf_map_check_for_leaks();
642 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
643 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
644 }
645 
646 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
647 {
648 	struct qdf_nbuf_map_metadata *meta;
649 
650 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
651 		if (meta->nbuf == nbuf)
652 			return meta;
653 	}
654 
655 	return NULL;
656 }
657 
658 static QDF_STATUS
659 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
660 {
661 	struct qdf_nbuf_map_metadata *meta;
662 
663 	QDF_BUG(nbuf);
664 	if (!nbuf) {
665 		qdf_err("Cannot map null nbuf");
666 		return QDF_STATUS_E_INVAL;
667 	}
668 
669 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
670 	meta = qdf_nbuf_meta_get(nbuf);
671 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
672 	if (meta)
673 		QDF_DEBUG_PANIC(
674 			"Double nbuf map detected @ %s:%u; last map from %s:%u",
675 			kbasename(file), line, meta->file, meta->line);
676 
677 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
678 	if (!meta) {
679 		qdf_err("Failed to allocate nbuf map tracking metadata");
680 		return QDF_STATUS_E_NOMEM;
681 	}
682 
683 	meta->nbuf = nbuf;
684 	qdf_str_lcopy(meta->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
685 	meta->line = line;
686 
687 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
688 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
689 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
690 
691 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
692 
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 static void
697 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
698 {
699 	struct qdf_nbuf_map_metadata *meta;
700 
701 	QDF_BUG(nbuf);
702 	if (!nbuf) {
703 		qdf_err("Cannot unmap null nbuf");
704 		return;
705 	}
706 
707 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
708 	meta = qdf_nbuf_meta_get(nbuf);
709 
710 	if (!meta)
711 		QDF_DEBUG_PANIC(
712 		      "Double nbuf unmap or unmap without map detected @ %s:%u",
713 		      kbasename(file), line);
714 
715 	hash_del(&meta->node);
716 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
717 
718 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
719 
720 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
721 }
722 
723 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
724 			      qdf_nbuf_t buf,
725 			      qdf_dma_dir_t dir,
726 			      const char *file,
727 			      uint32_t line)
728 {
729 	QDF_STATUS status;
730 
731 	status = qdf_nbuf_track_map(buf, file, line);
732 	if (QDF_IS_STATUS_ERROR(status))
733 		return status;
734 
735 	status = __qdf_nbuf_map(osdev, buf, dir);
736 	if (QDF_IS_STATUS_ERROR(status))
737 		qdf_nbuf_untrack_map(buf, file, line);
738 
739 	return status;
740 }
741 
742 qdf_export_symbol(qdf_nbuf_map_debug);
743 
744 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
745 			  qdf_nbuf_t buf,
746 			  qdf_dma_dir_t dir,
747 			  const char *file,
748 			  uint32_t line)
749 {
750 	qdf_nbuf_untrack_map(buf, file, line);
751 	__qdf_nbuf_unmap_single(osdev, buf, dir);
752 }
753 
754 qdf_export_symbol(qdf_nbuf_unmap_debug);
755 
756 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
757 				     qdf_nbuf_t buf,
758 				     qdf_dma_dir_t dir,
759 				     const char *file,
760 				     uint32_t line)
761 {
762 	QDF_STATUS status;
763 
764 	status = qdf_nbuf_track_map(buf, file, line);
765 	if (QDF_IS_STATUS_ERROR(status))
766 		return status;
767 
768 	status = __qdf_nbuf_map_single(osdev, buf, dir);
769 	if (QDF_IS_STATUS_ERROR(status))
770 		qdf_nbuf_untrack_map(buf, file, line);
771 
772 	return status;
773 }
774 
775 qdf_export_symbol(qdf_nbuf_map_single_debug);
776 
777 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
778 				 qdf_nbuf_t buf,
779 				 qdf_dma_dir_t dir,
780 				 const char *file,
781 				 uint32_t line)
782 {
783 	qdf_nbuf_untrack_map(buf, file, line);
784 	__qdf_nbuf_unmap_single(osdev, buf, dir);
785 }
786 
787 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
788 
789 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
790 				     qdf_nbuf_t buf,
791 				     qdf_dma_dir_t dir,
792 				     int nbytes,
793 				     const char *file,
794 				     uint32_t line)
795 {
796 	QDF_STATUS status;
797 
798 	status = qdf_nbuf_track_map(buf, file, line);
799 	if (QDF_IS_STATUS_ERROR(status))
800 		return status;
801 
802 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
803 	if (QDF_IS_STATUS_ERROR(status))
804 		qdf_nbuf_untrack_map(buf, file, line);
805 
806 	return status;
807 }
808 
809 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
810 
811 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
812 				 qdf_nbuf_t buf,
813 				 qdf_dma_dir_t dir,
814 				 int nbytes,
815 				 const char *file,
816 				 uint32_t line)
817 {
818 	qdf_nbuf_untrack_map(buf, file, line);
819 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
820 }
821 
822 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
823 
824 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
825 					    qdf_nbuf_t buf,
826 					    qdf_dma_dir_t dir,
827 					    int nbytes,
828 					    const char *file,
829 					    uint32_t line)
830 {
831 	QDF_STATUS status;
832 
833 	status = qdf_nbuf_track_map(buf, file, line);
834 	if (QDF_IS_STATUS_ERROR(status))
835 		return status;
836 
837 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
838 	if (QDF_IS_STATUS_ERROR(status))
839 		qdf_nbuf_untrack_map(buf, file, line);
840 
841 	return status;
842 }
843 
844 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
845 
846 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
847 					qdf_nbuf_t buf,
848 					qdf_dma_dir_t dir,
849 					int nbytes,
850 					const char *file,
851 					uint32_t line)
852 {
853 	qdf_nbuf_untrack_map(buf, file, line);
854 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
855 }
856 
857 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
858 
859 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, uint8_t *file,
860 					     uint32_t line)
861 {
862 	struct qdf_nbuf_map_metadata *meta;
863 
864 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
865 	meta = qdf_nbuf_meta_get(nbuf);
866 	if (meta)
867 		QDF_DEBUG_PANIC(
868 			"Nbuf freed @ %s:%u while mapped from %s:%u",
869 			kbasename(file), line, meta->file, meta->line);
870 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
871 }
872 #else
873 static inline void qdf_nbuf_map_tracking_init(void)
874 {
875 }
876 
877 static inline void qdf_nbuf_map_tracking_deinit(void)
878 {
879 }
880 
881 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
882 						    uint8_t *file,
883 						    uint32_t line)
884 {
885 }
886 #endif /* NBUF_MAP_UNMAP_DEBUG */
887 
888 /**
889  * __qdf_nbuf_map() - map a buffer to local bus address space
890  * @osdev: OS device
891  * @bmap: Bitmap
892  * @skb: Pointer to network buffer
893  * @dir: Direction
894  *
895  * Return: QDF_STATUS
896  */
897 #ifdef QDF_OS_DEBUG
898 QDF_STATUS
899 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
900 {
901 	struct skb_shared_info *sh = skb_shinfo(skb);
902 
903 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
904 			|| (dir == QDF_DMA_FROM_DEVICE));
905 
906 	/*
907 	 * Assume there's only a single fragment.
908 	 * To support multiple fragments, it would be necessary to change
909 	 * qdf_nbuf_t to be a separate object that stores meta-info
910 	 * (including the bus address for each fragment) and a pointer
911 	 * to the underlying sk_buff.
912 	 */
913 	qdf_assert(sh->nr_frags == 0);
914 
915 	return __qdf_nbuf_map_single(osdev, skb, dir);
916 }
917 qdf_export_symbol(__qdf_nbuf_map);
918 
919 #else
920 QDF_STATUS
921 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
922 {
923 	return __qdf_nbuf_map_single(osdev, skb, dir);
924 }
925 qdf_export_symbol(__qdf_nbuf_map);
926 #endif
927 /**
928  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
929  * @osdev: OS device
930  * @skb: Pointer to network buffer
931  * @dir: dma direction
932  *
933  * Return: none
934  */
935 void
936 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
937 			qdf_dma_dir_t dir)
938 {
939 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
940 		   || (dir == QDF_DMA_FROM_DEVICE));
941 
942 	/*
943 	 * Assume there's a single fragment.
944 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
945 	 */
946 	__qdf_nbuf_unmap_single(osdev, skb, dir);
947 }
948 qdf_export_symbol(__qdf_nbuf_unmap);
949 
950 /**
951  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
952  * @osdev: OS device
953  * @skb: Pointer to network buffer
954  * @dir: Direction
955  *
956  * Return: QDF_STATUS
957  */
958 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
959 QDF_STATUS
960 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
961 {
962 	qdf_dma_addr_t paddr;
963 
964 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
965 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
966 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
967 	return QDF_STATUS_SUCCESS;
968 }
969 qdf_export_symbol(__qdf_nbuf_map_single);
970 #else
971 QDF_STATUS
972 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
973 {
974 	qdf_dma_addr_t paddr;
975 
976 	/* assume that the OS only provides a single fragment */
977 	QDF_NBUF_CB_PADDR(buf) = paddr =
978 		dma_map_single(osdev->dev, buf->data,
979 				skb_end_pointer(buf) - buf->data,
980 				__qdf_dma_dir_to_os(dir));
981 	return dma_mapping_error(osdev->dev, paddr)
982 		? QDF_STATUS_E_FAILURE
983 		: QDF_STATUS_SUCCESS;
984 }
985 qdf_export_symbol(__qdf_nbuf_map_single);
986 #endif
987 /**
988  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
989  * @osdev: OS device
990  * @skb: Pointer to network buffer
991  * @dir: Direction
992  *
993  * Return: none
994  */
995 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
996 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
997 				qdf_dma_dir_t dir)
998 {
999 }
1000 #else
1001 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1002 					qdf_dma_dir_t dir)
1003 {
1004 	if (QDF_NBUF_CB_PADDR(buf))
1005 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1006 			skb_end_pointer(buf) - buf->data,
1007 			__qdf_dma_dir_to_os(dir));
1008 }
1009 #endif
1010 qdf_export_symbol(__qdf_nbuf_unmap_single);
1011 
1012 /**
1013  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1014  * @skb: Pointer to network buffer
1015  * @cksum: Pointer to checksum value
1016  *
1017  * Return: QDF_STATUS
1018  */
1019 QDF_STATUS
1020 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1021 {
1022 	switch (cksum->l4_result) {
1023 	case QDF_NBUF_RX_CKSUM_NONE:
1024 		skb->ip_summed = CHECKSUM_NONE;
1025 		break;
1026 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1027 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1028 		break;
1029 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1030 		skb->ip_summed = CHECKSUM_PARTIAL;
1031 		skb->csum = cksum->val;
1032 		break;
1033 	default:
1034 		pr_err("Unknown checksum type\n");
1035 		qdf_assert(0);
1036 		return QDF_STATUS_E_NOSUPPORT;
1037 	}
1038 	return QDF_STATUS_SUCCESS;
1039 }
1040 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1041 
1042 /**
1043  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1044  * @skb: Pointer to network buffer
1045  *
1046  * Return: TX checksum value
1047  */
1048 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1049 {
1050 	switch (skb->ip_summed) {
1051 	case CHECKSUM_NONE:
1052 		return QDF_NBUF_TX_CKSUM_NONE;
1053 	case CHECKSUM_PARTIAL:
1054 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1055 	case CHECKSUM_COMPLETE:
1056 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1057 	default:
1058 		return QDF_NBUF_TX_CKSUM_NONE;
1059 	}
1060 }
1061 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1062 
1063 /**
1064  * __qdf_nbuf_get_tid() - get tid
1065  * @skb: Pointer to network buffer
1066  *
1067  * Return: tid
1068  */
1069 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1070 {
1071 	return skb->priority;
1072 }
1073 qdf_export_symbol(__qdf_nbuf_get_tid);
1074 
1075 /**
1076  * __qdf_nbuf_set_tid() - set tid
1077  * @skb: Pointer to network buffer
1078  *
1079  * Return: none
1080  */
1081 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1082 {
1083 	skb->priority = tid;
1084 }
1085 qdf_export_symbol(__qdf_nbuf_set_tid);
1086 
1087 /**
1088  * __qdf_nbuf_set_tid() - set tid
1089  * @skb: Pointer to network buffer
1090  *
1091  * Return: none
1092  */
1093 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1094 {
1095 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1096 }
1097 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1098 
1099 /**
1100  * __qdf_nbuf_reg_trace_cb() - register trace callback
1101  * @cb_func_ptr: Pointer to trace callback function
1102  *
1103  * Return: none
1104  */
1105 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1106 {
1107 	qdf_trace_update_cb = cb_func_ptr;
1108 }
1109 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1110 
1111 /**
1112  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1113  *              of DHCP packet.
1114  * @data: Pointer to DHCP packet data buffer
1115  *
1116  * This func. returns the subtype of DHCP packet.
1117  *
1118  * Return: subtype of the DHCP packet.
1119  */
1120 enum qdf_proto_subtype
1121 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1122 {
1123 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1124 
1125 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1126 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1127 					QDF_DHCP_OPTION53_LENGTH)) {
1128 
1129 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1130 		case QDF_DHCP_DISCOVER:
1131 			subtype = QDF_PROTO_DHCP_DISCOVER;
1132 			break;
1133 		case QDF_DHCP_REQUEST:
1134 			subtype = QDF_PROTO_DHCP_REQUEST;
1135 			break;
1136 		case QDF_DHCP_OFFER:
1137 			subtype = QDF_PROTO_DHCP_OFFER;
1138 			break;
1139 		case QDF_DHCP_ACK:
1140 			subtype = QDF_PROTO_DHCP_ACK;
1141 			break;
1142 		case QDF_DHCP_NAK:
1143 			subtype = QDF_PROTO_DHCP_NACK;
1144 			break;
1145 		case QDF_DHCP_RELEASE:
1146 			subtype = QDF_PROTO_DHCP_RELEASE;
1147 			break;
1148 		case QDF_DHCP_INFORM:
1149 			subtype = QDF_PROTO_DHCP_INFORM;
1150 			break;
1151 		case QDF_DHCP_DECLINE:
1152 			subtype = QDF_PROTO_DHCP_DECLINE;
1153 			break;
1154 		default:
1155 			break;
1156 		}
1157 	}
1158 
1159 	return subtype;
1160 }
1161 
1162 /**
1163  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1164  *            of EAPOL packet.
1165  * @data: Pointer to EAPOL packet data buffer
1166  *
1167  * This func. returns the subtype of EAPOL packet.
1168  *
1169  * Return: subtype of the EAPOL packet.
1170  */
1171 enum qdf_proto_subtype
1172 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1173 {
1174 	uint16_t eapol_key_info;
1175 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1176 	uint16_t mask;
1177 
1178 	eapol_key_info = (uint16_t)(*(uint16_t *)
1179 			(data + EAPOL_KEY_INFO_OFFSET));
1180 
1181 	mask = eapol_key_info & EAPOL_MASK;
1182 	switch (mask) {
1183 	case EAPOL_M1_BIT_MASK:
1184 		subtype = QDF_PROTO_EAPOL_M1;
1185 		break;
1186 	case EAPOL_M2_BIT_MASK:
1187 		subtype = QDF_PROTO_EAPOL_M2;
1188 		break;
1189 	case EAPOL_M3_BIT_MASK:
1190 		subtype = QDF_PROTO_EAPOL_M3;
1191 		break;
1192 	case EAPOL_M4_BIT_MASK:
1193 		subtype = QDF_PROTO_EAPOL_M4;
1194 		break;
1195 	default:
1196 		break;
1197 	}
1198 
1199 	return subtype;
1200 }
1201 
1202 /**
1203  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1204  *            of ARP packet.
1205  * @data: Pointer to ARP packet data buffer
1206  *
1207  * This func. returns the subtype of ARP packet.
1208  *
1209  * Return: subtype of the ARP packet.
1210  */
1211 enum qdf_proto_subtype
1212 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1213 {
1214 	uint16_t subtype;
1215 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1216 
1217 	subtype = (uint16_t)(*(uint16_t *)
1218 			(data + ARP_SUB_TYPE_OFFSET));
1219 
1220 	switch (QDF_SWAP_U16(subtype)) {
1221 	case ARP_REQUEST:
1222 		proto_subtype = QDF_PROTO_ARP_REQ;
1223 		break;
1224 	case ARP_RESPONSE:
1225 		proto_subtype = QDF_PROTO_ARP_RES;
1226 		break;
1227 	default:
1228 		break;
1229 	}
1230 
1231 	return proto_subtype;
1232 }
1233 
1234 /**
1235  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1236  *            of IPV4 ICMP packet.
1237  * @data: Pointer to IPV4 ICMP packet data buffer
1238  *
1239  * This func. returns the subtype of ICMP packet.
1240  *
1241  * Return: subtype of the ICMP packet.
1242  */
1243 enum qdf_proto_subtype
1244 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1245 {
1246 	uint8_t subtype;
1247 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1248 
1249 	subtype = (uint8_t)(*(uint8_t *)
1250 			(data + ICMP_SUBTYPE_OFFSET));
1251 
1252 	switch (subtype) {
1253 	case ICMP_REQUEST:
1254 		proto_subtype = QDF_PROTO_ICMP_REQ;
1255 		break;
1256 	case ICMP_RESPONSE:
1257 		proto_subtype = QDF_PROTO_ICMP_RES;
1258 		break;
1259 	default:
1260 		break;
1261 	}
1262 
1263 	return proto_subtype;
1264 }
1265 
1266 /**
1267  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1268  *            of IPV6 ICMPV6 packet.
1269  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1270  *
1271  * This func. returns the subtype of ICMPV6 packet.
1272  *
1273  * Return: subtype of the ICMPV6 packet.
1274  */
1275 enum qdf_proto_subtype
1276 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1277 {
1278 	uint8_t subtype;
1279 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1280 
1281 	subtype = (uint8_t)(*(uint8_t *)
1282 			(data + ICMPV6_SUBTYPE_OFFSET));
1283 
1284 	switch (subtype) {
1285 	case ICMPV6_REQUEST:
1286 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1287 		break;
1288 	case ICMPV6_RESPONSE:
1289 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1290 		break;
1291 	case ICMPV6_RS:
1292 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1293 		break;
1294 	case ICMPV6_RA:
1295 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1296 		break;
1297 	case ICMPV6_NS:
1298 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1299 		break;
1300 	case ICMPV6_NA:
1301 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1302 		break;
1303 	default:
1304 		break;
1305 	}
1306 
1307 	return proto_subtype;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1312  *            of IPV4 packet.
1313  * @data: Pointer to IPV4 packet data buffer
1314  *
1315  * This func. returns the proto type of IPV4 packet.
1316  *
1317  * Return: proto type of IPV4 packet.
1318  */
1319 uint8_t
1320 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1321 {
1322 	uint8_t proto_type;
1323 
1324 	proto_type = (uint8_t)(*(uint8_t *)(data +
1325 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1326 	return proto_type;
1327 }
1328 
1329 /**
1330  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1331  *            of IPV6 packet.
1332  * @data: Pointer to IPV6 packet data buffer
1333  *
1334  * This func. returns the proto type of IPV6 packet.
1335  *
1336  * Return: proto type of IPV6 packet.
1337  */
1338 uint8_t
1339 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1340 {
1341 	uint8_t proto_type;
1342 
1343 	proto_type = (uint8_t)(*(uint8_t *)(data +
1344 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1345 	return proto_type;
1346 }
1347 
1348 /**
1349  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1350  * @data: Pointer to network data
1351  *
1352  * This api is for Tx packets.
1353  *
1354  * Return: true if packet is ipv4 packet
1355  *	   false otherwise
1356  */
1357 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1358 {
1359 	uint16_t ether_type;
1360 
1361 	ether_type = (uint16_t)(*(uint16_t *)(data +
1362 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1363 
1364 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1365 		return true;
1366 	else
1367 		return false;
1368 }
1369 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1370 
1371 /**
1372  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1373  * @data: Pointer to network data buffer
1374  *
1375  * This api is for ipv4 packet.
1376  *
1377  * Return: true if packet is DHCP packet
1378  *	   false otherwise
1379  */
1380 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1381 {
1382 	uint16_t sport;
1383 	uint16_t dport;
1384 
1385 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1386 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1387 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1388 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1389 					 sizeof(uint16_t)));
1390 
1391 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1392 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1393 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1394 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1395 		return true;
1396 	else
1397 		return false;
1398 }
1399 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1400 
1401 /**
1402  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1403  * @data: Pointer to network data buffer
1404  *
1405  * This api is for ipv4 packet.
1406  *
1407  * Return: true if packet is EAPOL packet
1408  *	   false otherwise.
1409  */
1410 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1411 {
1412 	uint16_t ether_type;
1413 
1414 	ether_type = (uint16_t)(*(uint16_t *)(data +
1415 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1416 
1417 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1418 		return true;
1419 	else
1420 		return false;
1421 }
1422 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1423 
1424 /**
1425  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1426  * @skb: Pointer to network buffer
1427  *
1428  * This api is for ipv4 packet.
1429  *
1430  * Return: true if packet is WAPI packet
1431  *	   false otherwise.
1432  */
1433 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1434 {
1435 	uint16_t ether_type;
1436 
1437 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1438 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1439 
1440 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1441 		return true;
1442 	else
1443 		return false;
1444 }
1445 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1446 
1447 /**
1448  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1449  * @skb: Pointer to network buffer
1450  *
1451  * This api is for ipv4 packet.
1452  *
1453  * Return: true if packet is tdls packet
1454  *	   false otherwise.
1455  */
1456 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1457 {
1458 	uint16_t ether_type;
1459 
1460 	ether_type = *(uint16_t *)(skb->data +
1461 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1462 
1463 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1464 		return true;
1465 	else
1466 		return false;
1467 }
1468 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1469 
1470 /**
1471  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1472  * @data: Pointer to network data buffer
1473  *
1474  * This api is for ipv4 packet.
1475  *
1476  * Return: true if packet is ARP packet
1477  *	   false otherwise.
1478  */
1479 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1480 {
1481 	uint16_t ether_type;
1482 
1483 	ether_type = (uint16_t)(*(uint16_t *)(data +
1484 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1485 
1486 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1487 		return true;
1488 	else
1489 		return false;
1490 }
1491 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1492 
1493 /**
1494  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1495  * @data: Pointer to network data buffer
1496  *
1497  * This api is for ipv4 packet.
1498  *
1499  * Return: true if packet is ARP request
1500  *	   false otherwise.
1501  */
1502 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1503 {
1504 	uint16_t op_code;
1505 
1506 	op_code = (uint16_t)(*(uint16_t *)(data +
1507 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1508 
1509 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1510 		return true;
1511 	return false;
1512 }
1513 
1514 /**
1515  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1516  * @data: Pointer to network data buffer
1517  *
1518  * This api is for ipv4 packet.
1519  *
1520  * Return: true if packet is ARP response
1521  *	   false otherwise.
1522  */
1523 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1524 {
1525 	uint16_t op_code;
1526 
1527 	op_code = (uint16_t)(*(uint16_t *)(data +
1528 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1529 
1530 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1531 		return true;
1532 	return false;
1533 }
1534 
1535 /**
1536  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1537  * @data: Pointer to network data buffer
1538  *
1539  * This api is for ipv4 packet.
1540  *
1541  * Return: ARP packet source IP value.
1542  */
1543 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1544 {
1545 	uint32_t src_ip;
1546 
1547 	src_ip = (uint32_t)(*(uint32_t *)(data +
1548 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1549 
1550 	return src_ip;
1551 }
1552 
1553 /**
1554  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1555  * @data: Pointer to network data buffer
1556  *
1557  * This api is for ipv4 packet.
1558  *
1559  * Return: ARP packet target IP value.
1560  */
1561 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1562 {
1563 	uint32_t tgt_ip;
1564 
1565 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1566 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1567 
1568 	return tgt_ip;
1569 }
1570 
1571 /**
1572  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1573  * @data: Pointer to network data buffer
1574  * @len: length to copy
1575  *
1576  * This api is for dns domain name
1577  *
1578  * Return: dns domain name.
1579  */
1580 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1581 {
1582 	uint8_t *domain_name;
1583 
1584 	domain_name = (uint8_t *)
1585 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1586 	return domain_name;
1587 }
1588 
1589 
1590 /**
1591  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1592  * @data: Pointer to network data buffer
1593  *
1594  * This api is for dns query packet.
1595  *
1596  * Return: true if packet is dns query packet.
1597  *	   false otherwise.
1598  */
1599 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1600 {
1601 	uint16_t op_code;
1602 	uint16_t tgt_port;
1603 
1604 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1605 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1606 	/* Standard DNS query always happen on Dest Port 53. */
1607 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1608 		op_code = (uint16_t)(*(uint16_t *)(data +
1609 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1610 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1611 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1612 			return true;
1613 	}
1614 	return false;
1615 }
1616 
1617 /**
1618  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1619  * @data: Pointer to network data buffer
1620  *
1621  * This api is for dns query response.
1622  *
1623  * Return: true if packet is dns response packet.
1624  *	   false otherwise.
1625  */
1626 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1627 {
1628 	uint16_t op_code;
1629 	uint16_t src_port;
1630 
1631 	src_port = (uint16_t)(*(uint16_t *)(data +
1632 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1633 	/* Standard DNS response always comes on Src Port 53. */
1634 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1635 		op_code = (uint16_t)(*(uint16_t *)(data +
1636 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1637 
1638 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1639 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1640 			return true;
1641 	}
1642 	return false;
1643 }
1644 
1645 /**
1646  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1647  * @data: Pointer to network data buffer
1648  *
1649  * This api is for tcp syn packet.
1650  *
1651  * Return: true if packet is tcp syn packet.
1652  *	   false otherwise.
1653  */
1654 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1655 {
1656 	uint8_t op_code;
1657 
1658 	op_code = (uint8_t)(*(uint8_t *)(data +
1659 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1660 
1661 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1662 		return true;
1663 	return false;
1664 }
1665 
1666 /**
1667  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1668  * @data: Pointer to network data buffer
1669  *
1670  * This api is for tcp syn ack packet.
1671  *
1672  * Return: true if packet is tcp syn ack packet.
1673  *	   false otherwise.
1674  */
1675 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1676 {
1677 	uint8_t op_code;
1678 
1679 	op_code = (uint8_t)(*(uint8_t *)(data +
1680 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1681 
1682 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1683 		return true;
1684 	return false;
1685 }
1686 
1687 /**
1688  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1689  * @data: Pointer to network data buffer
1690  *
1691  * This api is for tcp ack packet.
1692  *
1693  * Return: true if packet is tcp ack packet.
1694  *	   false otherwise.
1695  */
1696 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1697 {
1698 	uint8_t op_code;
1699 
1700 	op_code = (uint8_t)(*(uint8_t *)(data +
1701 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1702 
1703 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1704 		return true;
1705 	return false;
1706 }
1707 
1708 /**
1709  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1710  * @data: Pointer to network data buffer
1711  *
1712  * This api is for tcp packet.
1713  *
1714  * Return: tcp source port value.
1715  */
1716 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1717 {
1718 	uint16_t src_port;
1719 
1720 	src_port = (uint16_t)(*(uint16_t *)(data +
1721 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1722 
1723 	return src_port;
1724 }
1725 
1726 /**
1727  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1728  * @data: Pointer to network data buffer
1729  *
1730  * This api is for tcp packet.
1731  *
1732  * Return: tcp destination port value.
1733  */
1734 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1735 {
1736 	uint16_t tgt_port;
1737 
1738 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1739 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1740 
1741 	return tgt_port;
1742 }
1743 
1744 /**
1745  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1746  * @data: Pointer to network data buffer
1747  *
1748  * This api is for ipv4 req packet.
1749  *
1750  * Return: true if packet is icmpv4 request
1751  *	   false otherwise.
1752  */
1753 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1754 {
1755 	uint8_t op_code;
1756 
1757 	op_code = (uint8_t)(*(uint8_t *)(data +
1758 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1759 
1760 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1761 		return true;
1762 	return false;
1763 }
1764 
1765 /**
1766  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1767  * @data: Pointer to network data buffer
1768  *
1769  * This api is for ipv4 res packet.
1770  *
1771  * Return: true if packet is icmpv4 response
1772  *	   false otherwise.
1773  */
1774 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1775 {
1776 	uint8_t op_code;
1777 
1778 	op_code = (uint8_t)(*(uint8_t *)(data +
1779 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1780 
1781 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1782 		return true;
1783 	return false;
1784 }
1785 
1786 /**
1787  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1788  * @data: Pointer to network data buffer
1789  *
1790  * This api is for ipv4 packet.
1791  *
1792  * Return: icmpv4 packet source IP value.
1793  */
1794 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1795 {
1796 	uint32_t src_ip;
1797 
1798 	src_ip = (uint32_t)(*(uint32_t *)(data +
1799 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1800 
1801 	return src_ip;
1802 }
1803 
1804 /**
1805  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1806  * @data: Pointer to network data buffer
1807  *
1808  * This api is for ipv4 packet.
1809  *
1810  * Return: icmpv4 packet target IP value.
1811  */
1812 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1813 {
1814 	uint32_t tgt_ip;
1815 
1816 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1817 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1818 
1819 	return tgt_ip;
1820 }
1821 
1822 
1823 /**
1824  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1825  * @data: Pointer to IPV6 packet data buffer
1826  *
1827  * This func. checks whether it is a IPV6 packet or not.
1828  *
1829  * Return: TRUE if it is a IPV6 packet
1830  *         FALSE if not
1831  */
1832 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1833 {
1834 	uint16_t ether_type;
1835 
1836 	ether_type = (uint16_t)(*(uint16_t *)(data +
1837 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1838 
1839 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1840 		return true;
1841 	else
1842 		return false;
1843 }
1844 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1845 
1846 /**
1847  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1848  * @data: Pointer to network data buffer
1849  *
1850  * This api is for ipv6 packet.
1851  *
1852  * Return: true if packet is DHCP packet
1853  *	   false otherwise
1854  */
1855 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1856 {
1857 	uint16_t sport;
1858 	uint16_t dport;
1859 
1860 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1861 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1862 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1863 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1864 					sizeof(uint16_t));
1865 
1866 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1867 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1868 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1869 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1870 		return true;
1871 	else
1872 		return false;
1873 }
1874 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1875 
1876 /**
1877  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1878  * @data: Pointer to IPV4 packet data buffer
1879  *
1880  * This func. checks whether it is a IPV4 multicast packet or not.
1881  *
1882  * Return: TRUE if it is a IPV4 multicast packet
1883  *         FALSE if not
1884  */
1885 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1886 {
1887 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1888 		uint32_t *dst_addr =
1889 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1890 
1891 		/*
1892 		 * Check first word of the IPV4 address and if it is
1893 		 * equal to 0xE then it represents multicast IP.
1894 		 */
1895 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1896 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1897 			return true;
1898 		else
1899 			return false;
1900 	} else
1901 		return false;
1902 }
1903 
1904 /**
1905  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1906  * @data: Pointer to IPV6 packet data buffer
1907  *
1908  * This func. checks whether it is a IPV6 multicast packet or not.
1909  *
1910  * Return: TRUE if it is a IPV6 multicast packet
1911  *         FALSE if not
1912  */
1913 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1914 {
1915 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1916 		uint16_t *dst_addr;
1917 
1918 		dst_addr = (uint16_t *)
1919 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1920 
1921 		/*
1922 		 * Check first byte of the IP address and if it
1923 		 * 0xFF00 then it is a IPV6 mcast packet.
1924 		 */
1925 		if (*dst_addr ==
1926 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1927 			return true;
1928 		else
1929 			return false;
1930 	} else
1931 		return false;
1932 }
1933 
1934 /**
1935  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1936  * @data: Pointer to IPV4 ICMP packet data buffer
1937  *
1938  * This func. checks whether it is a ICMP packet or not.
1939  *
1940  * Return: TRUE if it is a ICMP packet
1941  *         FALSE if not
1942  */
1943 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1944 {
1945 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1946 		uint8_t pkt_type;
1947 
1948 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1949 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1950 
1951 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1952 			return true;
1953 		else
1954 			return false;
1955 	} else
1956 		return false;
1957 }
1958 
1959 /**
1960  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1961  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1962  *
1963  * This func. checks whether it is a ICMPV6 packet or not.
1964  *
1965  * Return: TRUE if it is a ICMPV6 packet
1966  *         FALSE if not
1967  */
1968 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1969 {
1970 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1971 		uint8_t pkt_type;
1972 
1973 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1974 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1975 
1976 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1977 			return true;
1978 		else
1979 			return false;
1980 	} else
1981 		return false;
1982 }
1983 
1984 /**
1985  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1986  * @data: Pointer to IPV4 UDP packet data buffer
1987  *
1988  * This func. checks whether it is a IPV4 UDP packet or not.
1989  *
1990  * Return: TRUE if it is a IPV4 UDP packet
1991  *         FALSE if not
1992  */
1993 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1994 {
1995 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1996 		uint8_t pkt_type;
1997 
1998 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1999 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2000 
2001 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2002 			return true;
2003 		else
2004 			return false;
2005 	} else
2006 		return false;
2007 }
2008 
2009 /**
2010  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2011  * @data: Pointer to IPV4 TCP packet data buffer
2012  *
2013  * This func. checks whether it is a IPV4 TCP packet or not.
2014  *
2015  * Return: TRUE if it is a IPV4 TCP packet
2016  *         FALSE if not
2017  */
2018 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2019 {
2020 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2021 		uint8_t pkt_type;
2022 
2023 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2024 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2025 
2026 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2027 			return true;
2028 		else
2029 			return false;
2030 	} else
2031 		return false;
2032 }
2033 
2034 /**
2035  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2036  * @data: Pointer to IPV6 UDP packet data buffer
2037  *
2038  * This func. checks whether it is a IPV6 UDP packet or not.
2039  *
2040  * Return: TRUE if it is a IPV6 UDP packet
2041  *         FALSE if not
2042  */
2043 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2044 {
2045 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2046 		uint8_t pkt_type;
2047 
2048 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2049 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2050 
2051 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2052 			return true;
2053 		else
2054 			return false;
2055 	} else
2056 		return false;
2057 }
2058 
2059 /**
2060  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2061  * @data: Pointer to IPV6 TCP packet data buffer
2062  *
2063  * This func. checks whether it is a IPV6 TCP packet or not.
2064  *
2065  * Return: TRUE if it is a IPV6 TCP packet
2066  *         FALSE if not
2067  */
2068 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2069 {
2070 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2071 		uint8_t pkt_type;
2072 
2073 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2074 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2075 
2076 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2077 			return true;
2078 		else
2079 			return false;
2080 	} else
2081 		return false;
2082 }
2083 
2084 /**
2085  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2086  * @nbuf - sk buff
2087  *
2088  * Return: true if packet is broadcast
2089  *	   false otherwise
2090  */
2091 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2092 {
2093 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2094 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2095 }
2096 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2097 
2098 #ifdef NBUF_MEMORY_DEBUG
2099 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2100 
2101 /**
2102  * struct qdf_nbuf_track_t - Network buffer track structure
2103  *
2104  * @p_next: Pointer to next
2105  * @net_buf: Pointer to network buffer
2106  * @file_name: File name
2107  * @line_num: Line number
2108  * @size: Size
2109  */
2110 struct qdf_nbuf_track_t {
2111 	struct qdf_nbuf_track_t *p_next;
2112 	qdf_nbuf_t net_buf;
2113 	char file_name[QDF_MEM_FILE_NAME_SIZE];
2114 	uint32_t line_num;
2115 	size_t size;
2116 };
2117 
2118 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2119 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2120 
2121 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2122 static struct kmem_cache *nbuf_tracking_cache;
2123 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2124 static spinlock_t qdf_net_buf_track_free_list_lock;
2125 static uint32_t qdf_net_buf_track_free_list_count;
2126 static uint32_t qdf_net_buf_track_used_list_count;
2127 static uint32_t qdf_net_buf_track_max_used;
2128 static uint32_t qdf_net_buf_track_max_free;
2129 static uint32_t qdf_net_buf_track_max_allocated;
2130 
2131 /**
2132  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2133  *
2134  * tracks the max number of network buffers that the wlan driver was tracking
2135  * at any one time.
2136  *
2137  * Return: none
2138  */
2139 static inline void update_max_used(void)
2140 {
2141 	int sum;
2142 
2143 	if (qdf_net_buf_track_max_used <
2144 	    qdf_net_buf_track_used_list_count)
2145 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2146 	sum = qdf_net_buf_track_free_list_count +
2147 		qdf_net_buf_track_used_list_count;
2148 	if (qdf_net_buf_track_max_allocated < sum)
2149 		qdf_net_buf_track_max_allocated = sum;
2150 }
2151 
2152 /**
2153  * update_max_free() - update qdf_net_buf_track_free_list_count
2154  *
2155  * tracks the max number tracking buffers kept in the freelist.
2156  *
2157  * Return: none
2158  */
2159 static inline void update_max_free(void)
2160 {
2161 	if (qdf_net_buf_track_max_free <
2162 	    qdf_net_buf_track_free_list_count)
2163 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2164 }
2165 
2166 /**
2167  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2168  *
2169  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2170  * This function also ads fexibility to adjust the allocation and freelist
2171  * scheems.
2172  *
2173  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2174  */
2175 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2176 {
2177 	int flags = GFP_KERNEL;
2178 	unsigned long irq_flag;
2179 	QDF_NBUF_TRACK *new_node = NULL;
2180 
2181 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2182 	qdf_net_buf_track_used_list_count++;
2183 	if (qdf_net_buf_track_free_list != NULL) {
2184 		new_node = qdf_net_buf_track_free_list;
2185 		qdf_net_buf_track_free_list =
2186 			qdf_net_buf_track_free_list->p_next;
2187 		qdf_net_buf_track_free_list_count--;
2188 	}
2189 	update_max_used();
2190 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2191 
2192 	if (new_node != NULL)
2193 		return new_node;
2194 
2195 	if (in_interrupt() || irqs_disabled() || in_atomic())
2196 		flags = GFP_ATOMIC;
2197 
2198 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2199 }
2200 
2201 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2202 #define FREEQ_POOLSIZE 2048
2203 
2204 /**
2205  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2206  *
2207  * Matches calls to qdf_nbuf_track_alloc.
2208  * Either frees the tracking cookie to kernel or an internal
2209  * freelist based on the size of the freelist.
2210  *
2211  * Return: none
2212  */
2213 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2214 {
2215 	unsigned long irq_flag;
2216 
2217 	if (!node)
2218 		return;
2219 
2220 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2221 	 * only shrink the freelist if it is bigger than twice the number of
2222 	 * nbufs in use. If the driver is stalling in a consistent bursty
2223 	 * fasion, this will keep 3/4 of thee allocations from the free list
2224 	 * while also allowing the system to recover memory as less frantic
2225 	 * traffic occurs.
2226 	 */
2227 
2228 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2229 
2230 	qdf_net_buf_track_used_list_count--;
2231 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2232 	   (qdf_net_buf_track_free_list_count >
2233 	    qdf_net_buf_track_used_list_count << 1)) {
2234 		kmem_cache_free(nbuf_tracking_cache, node);
2235 	} else {
2236 		node->p_next = qdf_net_buf_track_free_list;
2237 		qdf_net_buf_track_free_list = node;
2238 		qdf_net_buf_track_free_list_count++;
2239 	}
2240 	update_max_free();
2241 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2242 }
2243 
2244 /**
2245  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2246  *
2247  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2248  * the freelist first makes it performant for the first iperf udp burst
2249  * as well as steady state.
2250  *
2251  * Return: None
2252  */
2253 static void qdf_nbuf_track_prefill(void)
2254 {
2255 	int i;
2256 	QDF_NBUF_TRACK *node, *head;
2257 
2258 	/* prepopulate the freelist */
2259 	head = NULL;
2260 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2261 		node = qdf_nbuf_track_alloc();
2262 		if (node == NULL)
2263 			continue;
2264 		node->p_next = head;
2265 		head = node;
2266 	}
2267 	while (head) {
2268 		node = head->p_next;
2269 		qdf_nbuf_track_free(head);
2270 		head = node;
2271 	}
2272 
2273 	/* prefilled buffers should not count as used */
2274 	qdf_net_buf_track_max_used = 0;
2275 }
2276 
2277 /**
2278  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2279  *
2280  * This initializes the memory manager for the nbuf tracking cookies.  Because
2281  * these cookies are all the same size and only used in this feature, we can
2282  * use a kmem_cache to provide tracking as well as to speed up allocations.
2283  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2284  * features) a freelist is prepopulated here.
2285  *
2286  * Return: None
2287  */
2288 static void qdf_nbuf_track_memory_manager_create(void)
2289 {
2290 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2291 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2292 						sizeof(QDF_NBUF_TRACK),
2293 						0, 0, NULL);
2294 
2295 	qdf_nbuf_track_prefill();
2296 }
2297 
2298 /**
2299  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2300  *
2301  * Empty the freelist and print out usage statistics when it is no longer
2302  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2303  * any nbuf tracking cookies were leaked.
2304  *
2305  * Return: None
2306  */
2307 static void qdf_nbuf_track_memory_manager_destroy(void)
2308 {
2309 	QDF_NBUF_TRACK *node, *tmp;
2310 	unsigned long irq_flag;
2311 
2312 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2313 	node = qdf_net_buf_track_free_list;
2314 
2315 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2316 		qdf_print("%s: unexpectedly large max_used count %d",
2317 			  __func__, qdf_net_buf_track_max_used);
2318 
2319 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2320 		qdf_print("%s: %d unused trackers were allocated",
2321 			  __func__,
2322 			  qdf_net_buf_track_max_allocated -
2323 			  qdf_net_buf_track_max_used);
2324 
2325 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2326 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2327 		qdf_print("%s: check freelist shrinking functionality",
2328 			  __func__);
2329 
2330 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2331 		  "%s: %d residual freelist size",
2332 		  __func__, qdf_net_buf_track_free_list_count);
2333 
2334 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2335 		  "%s: %d max freelist size observed",
2336 		  __func__, qdf_net_buf_track_max_free);
2337 
2338 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2339 		  "%s: %d max buffers used observed",
2340 		  __func__, qdf_net_buf_track_max_used);
2341 
2342 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2343 		  "%s: %d max buffers allocated observed",
2344 		  __func__, qdf_net_buf_track_max_allocated);
2345 
2346 	while (node) {
2347 		tmp = node;
2348 		node = node->p_next;
2349 		kmem_cache_free(nbuf_tracking_cache, tmp);
2350 		qdf_net_buf_track_free_list_count--;
2351 	}
2352 
2353 	if (qdf_net_buf_track_free_list_count != 0)
2354 		qdf_info("%d unfreed tracking memory lost in freelist",
2355 			 qdf_net_buf_track_free_list_count);
2356 
2357 	if (qdf_net_buf_track_used_list_count != 0)
2358 		qdf_info("%d unfreed tracking memory still in use",
2359 			 qdf_net_buf_track_used_list_count);
2360 
2361 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2362 	kmem_cache_destroy(nbuf_tracking_cache);
2363 	qdf_net_buf_track_free_list = NULL;
2364 }
2365 
2366 /**
2367  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2368  *
2369  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2370  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2371  * WLAN driver module whose allocated SKB is freed by network stack are
2372  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2373  * reported as memory leak.
2374  *
2375  * Return: none
2376  */
2377 void qdf_net_buf_debug_init(void)
2378 {
2379 	uint32_t i;
2380 
2381 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2382 
2383 	qdf_nbuf_map_tracking_init();
2384 	qdf_nbuf_track_memory_manager_create();
2385 
2386 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2387 		gp_qdf_net_buf_track_tbl[i] = NULL;
2388 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2389 	}
2390 }
2391 qdf_export_symbol(qdf_net_buf_debug_init);
2392 
2393 /**
2394  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2395  *
2396  * Exit network buffer tracking debug functionality and log SKB memory leaks
2397  * As part of exiting the functionality, free the leaked memory and
2398  * cleanup the tracking buffers.
2399  *
2400  * Return: none
2401  */
2402 void qdf_net_buf_debug_exit(void)
2403 {
2404 	uint32_t i;
2405 	uint32_t count = 0;
2406 	unsigned long irq_flag;
2407 	QDF_NBUF_TRACK *p_node;
2408 	QDF_NBUF_TRACK *p_prev;
2409 
2410 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2411 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2412 		p_node = gp_qdf_net_buf_track_tbl[i];
2413 		while (p_node) {
2414 			p_prev = p_node;
2415 			p_node = p_node->p_next;
2416 			count++;
2417 			qdf_info("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK",
2418 				 p_prev->file_name, p_prev->line_num,
2419 				 p_prev->size, p_prev->net_buf);
2420 			qdf_nbuf_track_free(p_prev);
2421 		}
2422 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2423 	}
2424 
2425 	qdf_nbuf_track_memory_manager_destroy();
2426 	qdf_nbuf_map_tracking_deinit();
2427 
2428 #ifdef CONFIG_HALT_KMEMLEAK
2429 	if (count) {
2430 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2431 		QDF_BUG(0);
2432 	}
2433 #endif
2434 }
2435 qdf_export_symbol(qdf_net_buf_debug_exit);
2436 
2437 /**
2438  * qdf_net_buf_debug_hash() - hash network buffer pointer
2439  *
2440  * Return: hash value
2441  */
2442 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2443 {
2444 	uint32_t i;
2445 
2446 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2447 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2448 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2449 
2450 	return i;
2451 }
2452 
2453 /**
2454  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2455  *
2456  * Return: If skb is found in hash table then return pointer to network buffer
2457  *	else return %NULL
2458  */
2459 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2460 {
2461 	uint32_t i;
2462 	QDF_NBUF_TRACK *p_node;
2463 
2464 	i = qdf_net_buf_debug_hash(net_buf);
2465 	p_node = gp_qdf_net_buf_track_tbl[i];
2466 
2467 	while (p_node) {
2468 		if (p_node->net_buf == net_buf)
2469 			return p_node;
2470 		p_node = p_node->p_next;
2471 	}
2472 
2473 	return NULL;
2474 }
2475 
2476 /**
2477  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2478  *
2479  * Return: none
2480  */
2481 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2482 				uint8_t *file_name, uint32_t line_num)
2483 {
2484 	uint32_t i;
2485 	unsigned long irq_flag;
2486 	QDF_NBUF_TRACK *p_node;
2487 	QDF_NBUF_TRACK *new_node;
2488 
2489 	new_node = qdf_nbuf_track_alloc();
2490 
2491 	i = qdf_net_buf_debug_hash(net_buf);
2492 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2493 
2494 	p_node = qdf_net_buf_debug_look_up(net_buf);
2495 
2496 	if (p_node) {
2497 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2498 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2499 			  net_buf, kbasename(file_name), line_num);
2500 		qdf_nbuf_track_free(new_node);
2501 	} else {
2502 		p_node = new_node;
2503 		if (p_node) {
2504 			p_node->net_buf = net_buf;
2505 			qdf_str_lcopy(p_node->file_name, kbasename(file_name),
2506 				      QDF_MEM_FILE_NAME_SIZE);
2507 			p_node->line_num = line_num;
2508 			p_node->size = size;
2509 			qdf_mem_skb_inc(size);
2510 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2511 			gp_qdf_net_buf_track_tbl[i] = p_node;
2512 		} else
2513 			qdf_print(
2514 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2515 				  kbasename(file_name), line_num, size);
2516 	}
2517 
2518 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2519 }
2520 qdf_export_symbol(qdf_net_buf_debug_add_node);
2521 
2522 /**
2523  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2524  *
2525  * Return: none
2526  */
2527 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2528 {
2529 	uint32_t i;
2530 	QDF_NBUF_TRACK *p_head;
2531 	QDF_NBUF_TRACK *p_node = NULL;
2532 	unsigned long irq_flag;
2533 	QDF_NBUF_TRACK *p_prev;
2534 
2535 	i = qdf_net_buf_debug_hash(net_buf);
2536 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2537 
2538 	p_head = gp_qdf_net_buf_track_tbl[i];
2539 
2540 	/* Unallocated SKB */
2541 	if (!p_head)
2542 		goto done;
2543 
2544 	p_node = p_head;
2545 	/* Found at head of the table */
2546 	if (p_head->net_buf == net_buf) {
2547 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2548 		goto done;
2549 	}
2550 
2551 	/* Search in collision list */
2552 	while (p_node) {
2553 		p_prev = p_node;
2554 		p_node = p_node->p_next;
2555 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2556 			p_prev->p_next = p_node->p_next;
2557 			break;
2558 		}
2559 	}
2560 
2561 done:
2562 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2563 
2564 	if (p_node) {
2565 		qdf_mem_skb_dec(p_node->size);
2566 		qdf_nbuf_track_free(p_node);
2567 	} else {
2568 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2569 			  net_buf);
2570 		QDF_BUG(0);
2571 	}
2572 }
2573 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2574 
2575 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2576 			uint8_t *file_name, uint32_t line_num)
2577 {
2578 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2579 
2580 	while (ext_list) {
2581 		/*
2582 		 * Take care to add if it is Jumbo packet connected using
2583 		 * frag_list
2584 		 */
2585 		qdf_nbuf_t next;
2586 
2587 		next = qdf_nbuf_queue_next(ext_list);
2588 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2589 		ext_list = next;
2590 	}
2591 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2592 }
2593 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2594 
2595 /**
2596  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2597  * @net_buf: Network buf holding head segment (single)
2598  *
2599  * WLAN driver module whose allocated SKB is freed by network stack are
2600  * suppose to call this API before returning SKB to network stack such
2601  * that the SKB is not reported as memory leak.
2602  *
2603  * Return: none
2604  */
2605 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2606 {
2607 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2608 
2609 	while (ext_list) {
2610 		/*
2611 		 * Take care to free if it is Jumbo packet connected using
2612 		 * frag_list
2613 		 */
2614 		qdf_nbuf_t next;
2615 
2616 		next = qdf_nbuf_queue_next(ext_list);
2617 
2618 		if (qdf_nbuf_is_tso(ext_list) &&
2619 			qdf_nbuf_get_users(ext_list) > 1) {
2620 			ext_list = next;
2621 			continue;
2622 		}
2623 
2624 		qdf_net_buf_debug_delete_node(ext_list);
2625 		ext_list = next;
2626 	}
2627 
2628 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2629 		return;
2630 
2631 	qdf_net_buf_debug_delete_node(net_buf);
2632 }
2633 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2634 
2635 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2636 				int reserve, int align, int prio,
2637 				uint8_t *file, uint32_t line)
2638 {
2639 	qdf_nbuf_t nbuf;
2640 
2641 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, file, line);
2642 
2643 	/* Store SKB in internal QDF tracking table */
2644 	if (qdf_likely(nbuf)) {
2645 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2646 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2647 	}
2648 
2649 	return nbuf;
2650 }
2651 qdf_export_symbol(qdf_nbuf_alloc_debug);
2652 
2653 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2654 {
2655 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2656 		goto free_buf;
2657 
2658 	/* Remove SKB from internal QDF tracking table */
2659 	if (qdf_likely(nbuf)) {
2660 		qdf_nbuf_panic_on_free_if_mapped(nbuf, file, line);
2661 		qdf_net_buf_debug_delete_node(nbuf);
2662 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2663 	}
2664 
2665 free_buf:
2666 	__qdf_nbuf_free(nbuf);
2667 }
2668 qdf_export_symbol(qdf_nbuf_free_debug);
2669 
2670 #endif /* NBUF_MEMORY_DEBUG */
2671 
2672 #if defined(FEATURE_TSO)
2673 
2674 /**
2675  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2676  *
2677  * @ethproto: ethernet type of the msdu
2678  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2679  * @l2_len: L2 length for the msdu
2680  * @eit_hdr: pointer to EIT header
2681  * @eit_hdr_len: EIT header length for the msdu
2682  * @eit_hdr_dma_map_addr: dma addr for EIT header
2683  * @tcphdr: pointer to tcp header
2684  * @ipv4_csum_en: ipv4 checksum enable
2685  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2686  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2687  * @ip_id: IP id
2688  * @tcp_seq_num: TCP sequence number
2689  *
2690  * This structure holds the TSO common info that is common
2691  * across all the TCP segments of the jumbo packet.
2692  */
2693 struct qdf_tso_cmn_seg_info_t {
2694 	uint16_t ethproto;
2695 	uint16_t ip_tcp_hdr_len;
2696 	uint16_t l2_len;
2697 	uint8_t *eit_hdr;
2698 	uint32_t eit_hdr_len;
2699 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2700 	struct tcphdr *tcphdr;
2701 	uint16_t ipv4_csum_en;
2702 	uint16_t tcp_ipv4_csum_en;
2703 	uint16_t tcp_ipv6_csum_en;
2704 	uint16_t ip_id;
2705 	uint32_t tcp_seq_num;
2706 };
2707 
2708 /**
2709  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2710  * information
2711  * @osdev: qdf device handle
2712  * @skb: skb buffer
2713  * @tso_info: Parameters common to all segements
2714  *
2715  * Get the TSO information that is common across all the TCP
2716  * segments of the jumbo packet
2717  *
2718  * Return: 0 - success 1 - failure
2719  */
2720 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2721 			struct sk_buff *skb,
2722 			struct qdf_tso_cmn_seg_info_t *tso_info)
2723 {
2724 	/* Get ethernet type and ethernet header length */
2725 	tso_info->ethproto = vlan_get_protocol(skb);
2726 
2727 	/* Determine whether this is an IPv4 or IPv6 packet */
2728 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2729 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2730 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2731 
2732 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2733 		tso_info->ipv4_csum_en = 1;
2734 		tso_info->tcp_ipv4_csum_en = 1;
2735 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2736 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2737 				ipv4_hdr->protocol);
2738 			return 1;
2739 		}
2740 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2741 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2742 		tso_info->tcp_ipv6_csum_en = 1;
2743 	} else {
2744 		qdf_err("TSO: ethertype 0x%x is not supported!",
2745 			tso_info->ethproto);
2746 		return 1;
2747 	}
2748 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2749 	tso_info->tcphdr = tcp_hdr(skb);
2750 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2751 	/* get pointer to the ethernet + IP + TCP header and their length */
2752 	tso_info->eit_hdr = skb->data;
2753 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2754 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2755 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2756 							tso_info->eit_hdr,
2757 							tso_info->eit_hdr_len,
2758 							DMA_TO_DEVICE);
2759 	if (unlikely(dma_mapping_error(osdev->dev,
2760 				       tso_info->eit_hdr_dma_map_addr))) {
2761 		qdf_err("DMA mapping error!");
2762 		qdf_assert(0);
2763 		return 1;
2764 	}
2765 
2766 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2767 		/* inlcude IPv4 header length for IPV4 (total length) */
2768 		tso_info->ip_tcp_hdr_len =
2769 			tso_info->eit_hdr_len - tso_info->l2_len;
2770 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2771 		/* exclude IPv6 header length for IPv6 (payload length) */
2772 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2773 	}
2774 	/*
2775 	 * The length of the payload (application layer data) is added to
2776 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2777 	 * descriptor.
2778 	 */
2779 
2780 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2781 		tso_info->tcp_seq_num,
2782 		tso_info->eit_hdr_len,
2783 		tso_info->l2_len,
2784 		skb->len);
2785 	return 0;
2786 }
2787 
2788 
2789 /**
2790  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2791  *
2792  * @curr_seg: Segment whose contents are initialized
2793  * @tso_cmn_info: Parameters common to all segements
2794  *
2795  * Return: None
2796  */
2797 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2798 				struct qdf_tso_seg_elem_t *curr_seg,
2799 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2800 {
2801 	/* Initialize the flags to 0 */
2802 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2803 
2804 	/*
2805 	 * The following fields remain the same across all segments of
2806 	 * a jumbo packet
2807 	 */
2808 	curr_seg->seg.tso_flags.tso_enable = 1;
2809 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2810 		tso_cmn_info->ipv4_csum_en;
2811 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2812 		tso_cmn_info->tcp_ipv6_csum_en;
2813 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2814 		tso_cmn_info->tcp_ipv4_csum_en;
2815 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2816 
2817 	/* The following fields change for the segments */
2818 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2819 	tso_cmn_info->ip_id++;
2820 
2821 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2822 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2823 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2824 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2825 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2826 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2827 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2828 
2829 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2830 
2831 	/*
2832 	 * First fragment for each segment always contains the ethernet,
2833 	 * IP and TCP header
2834 	 */
2835 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2836 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2837 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2838 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2839 
2840 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2841 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2842 		   tso_cmn_info->eit_hdr_len,
2843 		   curr_seg->seg.tso_flags.tcp_seq_num,
2844 		   curr_seg->seg.total_len);
2845 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2846 }
2847 
2848 /**
2849  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2850  * into segments
2851  * @nbuf: network buffer to be segmented
2852  * @tso_info: This is the output. The information about the
2853  *           TSO segments will be populated within this.
2854  *
2855  * This function fragments a TCP jumbo packet into smaller
2856  * segments to be transmitted by the driver. It chains the TSO
2857  * segments created into a list.
2858  *
2859  * Return: number of TSO segments
2860  */
2861 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2862 		struct qdf_tso_info_t *tso_info)
2863 {
2864 	/* common across all segments */
2865 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2866 	/* segment specific */
2867 	void *tso_frag_vaddr;
2868 	qdf_dma_addr_t tso_frag_paddr = 0;
2869 	uint32_t num_seg = 0;
2870 	struct qdf_tso_seg_elem_t *curr_seg;
2871 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2872 	struct skb_frag_struct *frag = NULL;
2873 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2874 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2875 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2876 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2877 	int j = 0; /* skb fragment index */
2878 
2879 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2880 
2881 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2882 						skb, &tso_cmn_info))) {
2883 		qdf_warn("TSO: error getting common segment info");
2884 		return 0;
2885 	}
2886 
2887 	total_num_seg = tso_info->tso_num_seg_list;
2888 	curr_seg = tso_info->tso_seg_list;
2889 
2890 	/* length of the first chunk of data in the skb */
2891 	skb_frag_len = skb_headlen(skb);
2892 
2893 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2894 	/* update the remaining skb fragment length and TSO segment length */
2895 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2896 	skb_proc -= tso_cmn_info.eit_hdr_len;
2897 
2898 	/* get the address to the next tso fragment */
2899 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2900 	/* get the length of the next tso fragment */
2901 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2902 
2903 	if (tso_frag_len != 0) {
2904 		tso_frag_paddr = dma_map_single(osdev->dev,
2905 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2906 	}
2907 
2908 	if (unlikely(dma_mapping_error(osdev->dev,
2909 					tso_frag_paddr))) {
2910 		qdf_err("DMA mapping error!");
2911 		qdf_assert(0);
2912 		return 0;
2913 	}
2914 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2915 		__LINE__, skb_frag_len, tso_frag_len);
2916 	num_seg = tso_info->num_segs;
2917 	tso_info->num_segs = 0;
2918 	tso_info->is_tso = 1;
2919 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2920 
2921 	while (num_seg && curr_seg) {
2922 		int i = 1; /* tso fragment index */
2923 		uint8_t more_tso_frags = 1;
2924 
2925 		curr_seg->seg.num_frags = 0;
2926 		tso_info->num_segs++;
2927 		total_num_seg->num_seg.tso_cmn_num_seg++;
2928 
2929 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2930 						 &tso_cmn_info);
2931 
2932 		if (unlikely(skb_proc == 0))
2933 			return tso_info->num_segs;
2934 
2935 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2936 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2937 		/* frag len is added to ip_len in while loop below*/
2938 
2939 		curr_seg->seg.num_frags++;
2940 
2941 		while (more_tso_frags) {
2942 			if (tso_frag_len != 0) {
2943 				curr_seg->seg.tso_frags[i].vaddr =
2944 					tso_frag_vaddr;
2945 				curr_seg->seg.tso_frags[i].length =
2946 					tso_frag_len;
2947 				curr_seg->seg.total_len += tso_frag_len;
2948 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2949 				curr_seg->seg.num_frags++;
2950 				skb_proc = skb_proc - tso_frag_len;
2951 
2952 				/* increment the TCP sequence number */
2953 
2954 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2955 				curr_seg->seg.tso_frags[i].paddr =
2956 					tso_frag_paddr;
2957 			}
2958 
2959 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2960 					__func__, __LINE__,
2961 					i,
2962 					tso_frag_len,
2963 					curr_seg->seg.total_len,
2964 					curr_seg->seg.tso_frags[i].vaddr);
2965 
2966 			/* if there is no more data left in the skb */
2967 			if (!skb_proc)
2968 				return tso_info->num_segs;
2969 
2970 			/* get the next payload fragment information */
2971 			/* check if there are more fragments in this segment */
2972 			if (tso_frag_len < tso_seg_size) {
2973 				more_tso_frags = 1;
2974 				if (tso_frag_len != 0) {
2975 					tso_seg_size = tso_seg_size -
2976 						tso_frag_len;
2977 					i++;
2978 					if (curr_seg->seg.num_frags ==
2979 								FRAG_NUM_MAX) {
2980 						more_tso_frags = 0;
2981 						/*
2982 						 * reset i and the tso
2983 						 * payload size
2984 						 */
2985 						i = 1;
2986 						tso_seg_size =
2987 							skb_shinfo(skb)->
2988 								gso_size;
2989 					}
2990 				}
2991 			} else {
2992 				more_tso_frags = 0;
2993 				/* reset i and the tso payload size */
2994 				i = 1;
2995 				tso_seg_size = skb_shinfo(skb)->gso_size;
2996 			}
2997 
2998 			/* if the next fragment is contiguous */
2999 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3000 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3001 				skb_frag_len = skb_frag_len - tso_frag_len;
3002 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3003 
3004 			} else { /* the next fragment is not contiguous */
3005 				if (skb_shinfo(skb)->nr_frags == 0) {
3006 					qdf_info("TSO: nr_frags == 0!");
3007 					qdf_assert(0);
3008 					return 0;
3009 				}
3010 				if (j >= skb_shinfo(skb)->nr_frags) {
3011 					qdf_info("TSO: nr_frags %d j %d",
3012 						 skb_shinfo(skb)->nr_frags, j);
3013 					qdf_assert(0);
3014 					return 0;
3015 				}
3016 				frag = &skb_shinfo(skb)->frags[j];
3017 				skb_frag_len = skb_frag_size(frag);
3018 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3019 				tso_frag_vaddr = skb_frag_address_safe(frag);
3020 				j++;
3021 			}
3022 
3023 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3024 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3025 				tso_seg_size);
3026 
3027 			if (!(tso_frag_vaddr)) {
3028 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3029 						__func__);
3030 				return 0;
3031 			}
3032 
3033 			tso_frag_paddr =
3034 					 dma_map_single(osdev->dev,
3035 						 tso_frag_vaddr,
3036 						 tso_frag_len,
3037 						 DMA_TO_DEVICE);
3038 			if (unlikely(dma_mapping_error(osdev->dev,
3039 							tso_frag_paddr))) {
3040 				qdf_err("DMA mapping error!");
3041 				qdf_assert(0);
3042 				return 0;
3043 			}
3044 		}
3045 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3046 				curr_seg->seg.tso_flags.tcp_seq_num);
3047 		num_seg--;
3048 		/* if TCP FIN flag was set, set it in the last segment */
3049 		if (!num_seg)
3050 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3051 
3052 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3053 		curr_seg = curr_seg->next;
3054 	}
3055 	return tso_info->num_segs;
3056 }
3057 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3058 
3059 /**
3060  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3061  *
3062  * @osdev: qdf device handle
3063  * @tso_seg: TSO segment element to be unmapped
3064  * @is_last_seg: whether this is last tso seg or not
3065  *
3066  * Return: none
3067  */
3068 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3069 			  struct qdf_tso_seg_elem_t *tso_seg,
3070 			  bool is_last_seg)
3071 {
3072 	uint32_t num_frags = 0;
3073 
3074 	if (tso_seg->seg.num_frags > 0)
3075 		num_frags = tso_seg->seg.num_frags - 1;
3076 
3077 	/*Num of frags in a tso seg cannot be less than 2 */
3078 	if (num_frags < 1) {
3079 		qdf_assert(0);
3080 		qdf_err("ERROR: num of frags in a tso segment is %d",
3081 			(num_frags + 1));
3082 		return;
3083 	}
3084 
3085 	while (num_frags) {
3086 		/*Do dma unmap the tso seg except the 0th frag */
3087 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3088 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3089 				num_frags);
3090 			qdf_assert(0);
3091 			return;
3092 		}
3093 		dma_unmap_single(osdev->dev,
3094 				 tso_seg->seg.tso_frags[num_frags].paddr,
3095 				 tso_seg->seg.tso_frags[num_frags].length,
3096 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3097 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3098 		num_frags--;
3099 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3100 	}
3101 
3102 	if (is_last_seg) {
3103 		/*Do dma unmap for the tso seg 0th frag */
3104 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3105 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3106 			qdf_assert(0);
3107 			return;
3108 		}
3109 		dma_unmap_single(osdev->dev,
3110 				 tso_seg->seg.tso_frags[0].paddr,
3111 				 tso_seg->seg.tso_frags[0].length,
3112 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3113 		tso_seg->seg.tso_frags[0].paddr = 0;
3114 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3115 	}
3116 }
3117 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3118 
3119 /**
3120  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3121  * into segments
3122  * @nbuf:   network buffer to be segmented
3123  * @tso_info:  This is the output. The information about the
3124  *      TSO segments will be populated within this.
3125  *
3126  * This function fragments a TCP jumbo packet into smaller
3127  * segments to be transmitted by the driver. It chains the TSO
3128  * segments created into a list.
3129  *
3130  * Return: 0 - success, 1 - failure
3131  */
3132 #ifndef BUILD_X86
3133 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3134 {
3135 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3136 	uint32_t remainder, num_segs = 0;
3137 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3138 	uint8_t frags_per_tso = 0;
3139 	uint32_t skb_frag_len = 0;
3140 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3141 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3142 	struct skb_frag_struct *frag = NULL;
3143 	int j = 0;
3144 	uint32_t temp_num_seg = 0;
3145 
3146 	/* length of the first chunk of data in the skb minus eit header*/
3147 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3148 
3149 	/* Calculate num of segs for skb's first chunk of data*/
3150 	remainder = skb_frag_len % tso_seg_size;
3151 	num_segs = skb_frag_len / tso_seg_size;
3152 	/**
3153 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3154 	 * In that case, one more tso seg is required to accommodate
3155 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3156 	 * then remaining data will be accomodated while doing the calculation
3157 	 * for nr_frags data. Hence, frags_per_tso++.
3158 	 */
3159 	if (remainder) {
3160 		if (!skb_nr_frags)
3161 			num_segs++;
3162 		else
3163 			frags_per_tso++;
3164 	}
3165 
3166 	while (skb_nr_frags) {
3167 		if (j >= skb_shinfo(skb)->nr_frags) {
3168 			qdf_info("TSO: nr_frags %d j %d",
3169 				 skb_shinfo(skb)->nr_frags, j);
3170 			qdf_assert(0);
3171 			return 0;
3172 		}
3173 		/**
3174 		 * Calculate the number of tso seg for nr_frags data:
3175 		 * Get the length of each frag in skb_frag_len, add to
3176 		 * remainder.Get the number of segments by dividing it to
3177 		 * tso_seg_size and calculate the new remainder.
3178 		 * Decrement the nr_frags value and keep
3179 		 * looping all the skb_fragments.
3180 		 */
3181 		frag = &skb_shinfo(skb)->frags[j];
3182 		skb_frag_len = skb_frag_size(frag);
3183 		temp_num_seg = num_segs;
3184 		remainder += skb_frag_len;
3185 		num_segs += remainder / tso_seg_size;
3186 		remainder = remainder % tso_seg_size;
3187 		skb_nr_frags--;
3188 		if (remainder) {
3189 			if (num_segs > temp_num_seg)
3190 				frags_per_tso = 0;
3191 			/**
3192 			 * increment the tso per frags whenever remainder is
3193 			 * positive. If frags_per_tso reaches the (max-1),
3194 			 * [First frags always have EIT header, therefore max-1]
3195 			 * increment the num_segs as no more data can be
3196 			 * accomodated in the curr tso seg. Reset the remainder
3197 			 * and frags per tso and keep looping.
3198 			 */
3199 			frags_per_tso++;
3200 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3201 				num_segs++;
3202 				frags_per_tso = 0;
3203 				remainder = 0;
3204 			}
3205 			/**
3206 			 * If this is the last skb frag and still remainder is
3207 			 * non-zero(frags_per_tso is not reached to the max-1)
3208 			 * then increment the num_segs to take care of the
3209 			 * remaining length.
3210 			 */
3211 			if (!skb_nr_frags && remainder) {
3212 				num_segs++;
3213 				frags_per_tso = 0;
3214 			}
3215 		} else {
3216 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3217 			frags_per_tso = 0;
3218 		}
3219 		j++;
3220 	}
3221 
3222 	return num_segs;
3223 }
3224 #else
3225 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3226 {
3227 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3228 	struct skb_frag_struct *frag = NULL;
3229 
3230 	/*
3231 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3232 	 * region which cannot be accessed by Target
3233 	 */
3234 	if (virt_to_phys(skb->data) < 0x50000040) {
3235 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3236 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3237 				virt_to_phys(skb->data));
3238 		goto fail;
3239 
3240 	}
3241 
3242 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3243 		frag = &skb_shinfo(skb)->frags[i];
3244 
3245 		if (!frag)
3246 			goto fail;
3247 
3248 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3249 			goto fail;
3250 	}
3251 
3252 
3253 	gso_size = skb_shinfo(skb)->gso_size;
3254 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3255 			+ tcp_hdrlen(skb));
3256 	while (tmp_len) {
3257 		num_segs++;
3258 		if (tmp_len > gso_size)
3259 			tmp_len -= gso_size;
3260 		else
3261 			break;
3262 	}
3263 
3264 	return num_segs;
3265 
3266 	/*
3267 	 * Do not free this frame, just do socket level accounting
3268 	 * so that this is not reused.
3269 	 */
3270 fail:
3271 	if (skb->sk)
3272 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3273 
3274 	return 0;
3275 }
3276 #endif
3277 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3278 
3279 #endif /* FEATURE_TSO */
3280 
3281 /**
3282  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3283  *
3284  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3285  *
3286  * Return: N/A
3287  */
3288 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3289 			  uint32_t *lo, uint32_t *hi)
3290 {
3291 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3292 		*lo = lower_32_bits(dmaaddr);
3293 		*hi = upper_32_bits(dmaaddr);
3294 	} else {
3295 		*lo = dmaaddr;
3296 		*hi = 0;
3297 	}
3298 }
3299 
3300 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3301 
3302 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3303 {
3304 	qdf_nbuf_users_inc(&skb->users);
3305 	return skb;
3306 }
3307 qdf_export_symbol(__qdf_nbuf_inc_users);
3308 
3309 int __qdf_nbuf_get_users(struct sk_buff *skb)
3310 {
3311 	return qdf_nbuf_users_read(&skb->users);
3312 }
3313 qdf_export_symbol(__qdf_nbuf_get_users);
3314 
3315 /**
3316  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3317  * @skb: sk_buff handle
3318  *
3319  * Return: none
3320  */
3321 
3322 void __qdf_nbuf_ref(struct sk_buff *skb)
3323 {
3324 	skb_get(skb);
3325 }
3326 qdf_export_symbol(__qdf_nbuf_ref);
3327 
3328 /**
3329  * __qdf_nbuf_shared() - Check whether the buffer is shared
3330  *  @skb: sk_buff buffer
3331  *
3332  *  Return: true if more than one person has a reference to this buffer.
3333  */
3334 int __qdf_nbuf_shared(struct sk_buff *skb)
3335 {
3336 	return skb_shared(skb);
3337 }
3338 qdf_export_symbol(__qdf_nbuf_shared);
3339 
3340 /**
3341  * __qdf_nbuf_dmamap_create() - create a DMA map.
3342  * @osdev: qdf device handle
3343  * @dmap: dma map handle
3344  *
3345  * This can later be used to map networking buffers. They :
3346  * - need space in adf_drv's software descriptor
3347  * - are typically created during adf_drv_create
3348  * - need to be created before any API(qdf_nbuf_map) that uses them
3349  *
3350  * Return: QDF STATUS
3351  */
3352 QDF_STATUS
3353 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3354 {
3355 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3356 	/*
3357 	 * driver can tell its SG capablity, it must be handled.
3358 	 * Bounce buffers if they are there
3359 	 */
3360 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3361 	if (!(*dmap))
3362 		error = QDF_STATUS_E_NOMEM;
3363 
3364 	return error;
3365 }
3366 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3367 /**
3368  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3369  * @osdev: qdf device handle
3370  * @dmap: dma map handle
3371  *
3372  * Return: none
3373  */
3374 void
3375 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3376 {
3377 	kfree(dmap);
3378 }
3379 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3380 
3381 /**
3382  * __qdf_nbuf_map_nbytes_single() - map nbytes
3383  * @osdev: os device
3384  * @buf: buffer
3385  * @dir: direction
3386  * @nbytes: number of bytes
3387  *
3388  * Return: QDF_STATUS
3389  */
3390 #ifdef A_SIMOS_DEVHOST
3391 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3392 		qdf_device_t osdev, struct sk_buff *buf,
3393 		 qdf_dma_dir_t dir, int nbytes)
3394 {
3395 	qdf_dma_addr_t paddr;
3396 
3397 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3398 	return QDF_STATUS_SUCCESS;
3399 }
3400 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3401 #else
3402 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3403 		qdf_device_t osdev, struct sk_buff *buf,
3404 		 qdf_dma_dir_t dir, int nbytes)
3405 {
3406 	qdf_dma_addr_t paddr;
3407 
3408 	/* assume that the OS only provides a single fragment */
3409 	QDF_NBUF_CB_PADDR(buf) = paddr =
3410 		dma_map_single(osdev->dev, buf->data,
3411 			nbytes, __qdf_dma_dir_to_os(dir));
3412 	return dma_mapping_error(osdev->dev, paddr) ?
3413 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3414 }
3415 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3416 #endif
3417 /**
3418  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3419  * @osdev: os device
3420  * @buf: buffer
3421  * @dir: direction
3422  * @nbytes: number of bytes
3423  *
3424  * Return: none
3425  */
3426 #if defined(A_SIMOS_DEVHOST)
3427 void
3428 __qdf_nbuf_unmap_nbytes_single(
3429 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3430 {
3431 }
3432 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3433 
3434 #else
3435 void
3436 __qdf_nbuf_unmap_nbytes_single(
3437 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3438 {
3439 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3440 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3441 		return;
3442 	}
3443 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3444 			nbytes, __qdf_dma_dir_to_os(dir));
3445 }
3446 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3447 #endif
3448 /**
3449  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3450  * @osdev: os device
3451  * @skb: skb handle
3452  * @dir: dma direction
3453  * @nbytes: number of bytes to be mapped
3454  *
3455  * Return: QDF_STATUS
3456  */
3457 #ifdef QDF_OS_DEBUG
3458 QDF_STATUS
3459 __qdf_nbuf_map_nbytes(
3460 	qdf_device_t osdev,
3461 	struct sk_buff *skb,
3462 	qdf_dma_dir_t dir,
3463 	int nbytes)
3464 {
3465 	struct skb_shared_info  *sh = skb_shinfo(skb);
3466 
3467 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3468 
3469 	/*
3470 	 * Assume there's only a single fragment.
3471 	 * To support multiple fragments, it would be necessary to change
3472 	 * adf_nbuf_t to be a separate object that stores meta-info
3473 	 * (including the bus address for each fragment) and a pointer
3474 	 * to the underlying sk_buff.
3475 	 */
3476 	qdf_assert(sh->nr_frags == 0);
3477 
3478 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3479 }
3480 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3481 #else
3482 QDF_STATUS
3483 __qdf_nbuf_map_nbytes(
3484 	qdf_device_t osdev,
3485 	struct sk_buff *skb,
3486 	qdf_dma_dir_t dir,
3487 	int nbytes)
3488 {
3489 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3490 }
3491 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3492 #endif
3493 /**
3494  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3495  * @osdev: OS device
3496  * @skb: skb handle
3497  * @dir: direction
3498  * @nbytes: number of bytes
3499  *
3500  * Return: none
3501  */
3502 void
3503 __qdf_nbuf_unmap_nbytes(
3504 	qdf_device_t osdev,
3505 	struct sk_buff *skb,
3506 	qdf_dma_dir_t dir,
3507 	int nbytes)
3508 {
3509 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3510 
3511 	/*
3512 	 * Assume there's a single fragment.
3513 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3514 	 */
3515 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3516 }
3517 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3518 
3519 /**
3520  * __qdf_nbuf_dma_map_info() - return the dma map info
3521  * @bmap: dma map
3522  * @sg: dma map info
3523  *
3524  * Return: none
3525  */
3526 void
3527 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3528 {
3529 	qdf_assert(bmap->mapped);
3530 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3531 
3532 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3533 			sizeof(struct __qdf_segment));
3534 	sg->nsegs = bmap->nsegs;
3535 }
3536 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3537 /**
3538  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3539  *			specified by the index
3540  * @skb: sk buff
3541  * @sg: scatter/gather list of all the frags
3542  *
3543  * Return: none
3544  */
3545 #if defined(__QDF_SUPPORT_FRAG_MEM)
3546 void
3547 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3548 {
3549 	qdf_assert(skb != NULL);
3550 	sg->sg_segs[0].vaddr = skb->data;
3551 	sg->sg_segs[0].len   = skb->len;
3552 	sg->nsegs            = 1;
3553 
3554 	for (int i = 1; i <= sh->nr_frags; i++) {
3555 		skb_frag_t    *f        = &sh->frags[i - 1];
3556 
3557 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3558 			f->page_offset);
3559 		sg->sg_segs[i].len      = f->size;
3560 
3561 		qdf_assert(i < QDF_MAX_SGLIST);
3562 	}
3563 	sg->nsegs += i;
3564 
3565 }
3566 qdf_export_symbol(__qdf_nbuf_frag_info);
3567 #else
3568 #ifdef QDF_OS_DEBUG
3569 void
3570 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3571 {
3572 
3573 	struct skb_shared_info  *sh = skb_shinfo(skb);
3574 
3575 	qdf_assert(skb != NULL);
3576 	sg->sg_segs[0].vaddr = skb->data;
3577 	sg->sg_segs[0].len   = skb->len;
3578 	sg->nsegs            = 1;
3579 
3580 	qdf_assert(sh->nr_frags == 0);
3581 }
3582 qdf_export_symbol(__qdf_nbuf_frag_info);
3583 #else
3584 void
3585 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3586 {
3587 	sg->sg_segs[0].vaddr = skb->data;
3588 	sg->sg_segs[0].len   = skb->len;
3589 	sg->nsegs            = 1;
3590 }
3591 qdf_export_symbol(__qdf_nbuf_frag_info);
3592 #endif
3593 #endif
3594 /**
3595  * __qdf_nbuf_get_frag_size() - get frag size
3596  * @nbuf: sk buffer
3597  * @cur_frag: current frag
3598  *
3599  * Return: frag size
3600  */
3601 uint32_t
3602 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3603 {
3604 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3605 	const skb_frag_t *frag = sh->frags + cur_frag;
3606 
3607 	return skb_frag_size(frag);
3608 }
3609 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3610 
3611 /**
3612  * __qdf_nbuf_frag_map() - dma map frag
3613  * @osdev: os device
3614  * @nbuf: sk buff
3615  * @offset: offset
3616  * @dir: direction
3617  * @cur_frag: current fragment
3618  *
3619  * Return: QDF status
3620  */
3621 #ifdef A_SIMOS_DEVHOST
3622 QDF_STATUS __qdf_nbuf_frag_map(
3623 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3624 	int offset, qdf_dma_dir_t dir, int cur_frag)
3625 {
3626 	int32_t paddr, frag_len;
3627 
3628 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3629 	return QDF_STATUS_SUCCESS;
3630 }
3631 qdf_export_symbol(__qdf_nbuf_frag_map);
3632 #else
3633 QDF_STATUS __qdf_nbuf_frag_map(
3634 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3635 	int offset, qdf_dma_dir_t dir, int cur_frag)
3636 {
3637 	dma_addr_t paddr, frag_len;
3638 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3639 	const skb_frag_t *frag = sh->frags + cur_frag;
3640 
3641 	frag_len = skb_frag_size(frag);
3642 
3643 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3644 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3645 					__qdf_dma_dir_to_os(dir));
3646 	return dma_mapping_error(osdev->dev, paddr) ?
3647 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3648 }
3649 qdf_export_symbol(__qdf_nbuf_frag_map);
3650 #endif
3651 /**
3652  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3653  * @dmap: dma map
3654  * @cb: callback
3655  * @arg: argument
3656  *
3657  * Return: none
3658  */
3659 void
3660 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3661 {
3662 	return;
3663 }
3664 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3665 
3666 
3667 /**
3668  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3669  * @osdev: os device
3670  * @buf: sk buff
3671  * @dir: direction
3672  *
3673  * Return: none
3674  */
3675 #if defined(A_SIMOS_DEVHOST)
3676 static void __qdf_nbuf_sync_single_for_cpu(
3677 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3678 {
3679 	return;
3680 }
3681 #else
3682 static void __qdf_nbuf_sync_single_for_cpu(
3683 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3684 {
3685 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3686 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3687 		return;
3688 	}
3689 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3690 		skb_end_offset(buf) - skb_headroom(buf),
3691 		__qdf_dma_dir_to_os(dir));
3692 }
3693 #endif
3694 /**
3695  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3696  * @osdev: os device
3697  * @skb: sk buff
3698  * @dir: direction
3699  *
3700  * Return: none
3701  */
3702 void
3703 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3704 	struct sk_buff *skb, qdf_dma_dir_t dir)
3705 {
3706 	qdf_assert(
3707 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3708 
3709 	/*
3710 	 * Assume there's a single fragment.
3711 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3712 	 */
3713 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3714 }
3715 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3716 
3717 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3718 /**
3719  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3720  * @rx_status: Pointer to rx_status.
3721  * @rtap_buf: Buf to which VHT info has to be updated.
3722  * @rtap_len: Current length of radiotap buffer
3723  *
3724  * Return: Length of radiotap after VHT flags updated.
3725  */
3726 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3727 					struct mon_rx_status *rx_status,
3728 					int8_t *rtap_buf,
3729 					uint32_t rtap_len)
3730 {
3731 	uint16_t vht_flags = 0;
3732 
3733 	rtap_len = qdf_align(rtap_len, 2);
3734 
3735 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3736 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3737 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3738 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3739 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3740 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3741 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3742 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3743 	rtap_len += 2;
3744 
3745 	rtap_buf[rtap_len] |=
3746 		(rx_status->is_stbc ?
3747 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3748 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3749 		(rx_status->ldpc ?
3750 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3751 		(rx_status->beamformed ?
3752 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3753 	rtap_len += 1;
3754 	switch (rx_status->vht_flag_values2) {
3755 	case IEEE80211_RADIOTAP_VHT_BW_20:
3756 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3757 		break;
3758 	case IEEE80211_RADIOTAP_VHT_BW_40:
3759 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3760 		break;
3761 	case IEEE80211_RADIOTAP_VHT_BW_80:
3762 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3763 		break;
3764 	case IEEE80211_RADIOTAP_VHT_BW_160:
3765 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3766 		break;
3767 	}
3768 	rtap_len += 1;
3769 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3770 	rtap_len += 1;
3771 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3772 	rtap_len += 1;
3773 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3774 	rtap_len += 1;
3775 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3776 	rtap_len += 1;
3777 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3778 	rtap_len += 1;
3779 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3780 	rtap_len += 1;
3781 	put_unaligned_le16(rx_status->vht_flag_values6,
3782 			   &rtap_buf[rtap_len]);
3783 	rtap_len += 2;
3784 
3785 	return rtap_len;
3786 }
3787 
3788 /**
3789  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3790  * @rx_status: Pointer to rx_status.
3791  * @rtap_buf: buffer to which radiotap has to be updated
3792  * @rtap_len: radiotap length
3793  *
3794  * API update high-efficiency (11ax) fields in the radiotap header
3795  *
3796  * Return: length of rtap_len updated.
3797  */
3798 static unsigned int
3799 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3800 				     int8_t *rtap_buf, uint32_t rtap_len)
3801 {
3802 	/*
3803 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3804 	 * Enable all "known" HE radiotap flags for now
3805 	 */
3806 	rtap_len = qdf_align(rtap_len, 2);
3807 
3808 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3809 	rtap_len += 2;
3810 
3811 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3812 	rtap_len += 2;
3813 
3814 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3815 	rtap_len += 2;
3816 
3817 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3818 	rtap_len += 2;
3819 
3820 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3821 	rtap_len += 2;
3822 
3823 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3824 	rtap_len += 2;
3825 	qdf_debug("he data %x %x %x %x %x %x",
3826 		  rx_status->he_data1,
3827 		  rx_status->he_data2, rx_status->he_data3,
3828 		  rx_status->he_data4, rx_status->he_data5,
3829 		  rx_status->he_data6);
3830 	return rtap_len;
3831 }
3832 
3833 
3834 /**
3835  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3836  * @rx_status: Pointer to rx_status.
3837  * @rtap_buf: buffer to which radiotap has to be updated
3838  * @rtap_len: radiotap length
3839  *
3840  * API update HE-MU fields in the radiotap header
3841  *
3842  * Return: length of rtap_len updated.
3843  */
3844 static unsigned int
3845 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3846 				     int8_t *rtap_buf, uint32_t rtap_len)
3847 {
3848 	rtap_len = qdf_align(rtap_len, 2);
3849 
3850 	/*
3851 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3852 	 * Enable all "known" he-mu radiotap flags for now
3853 	 */
3854 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3855 	rtap_len += 2;
3856 
3857 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3858 	rtap_len += 2;
3859 
3860 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3861 	rtap_len += 1;
3862 
3863 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3864 	rtap_len += 1;
3865 
3866 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3867 	rtap_len += 1;
3868 
3869 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3870 	rtap_len += 1;
3871 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3872 		  rx_status->he_flags1,
3873 		  rx_status->he_flags2, rx_status->he_RU[0],
3874 		  rx_status->he_RU[1], rx_status->he_RU[2],
3875 		  rx_status->he_RU[3]);
3876 
3877 	return rtap_len;
3878 }
3879 
3880 /**
3881  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3882  * @rx_status: Pointer to rx_status.
3883  * @rtap_buf: buffer to which radiotap has to be updated
3884  * @rtap_len: radiotap length
3885  *
3886  * API update he-mu-other fields in the radiotap header
3887  *
3888  * Return: length of rtap_len updated.
3889  */
3890 static unsigned int
3891 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3892 				     int8_t *rtap_buf, uint32_t rtap_len)
3893 {
3894 	rtap_len = qdf_align(rtap_len, 2);
3895 
3896 	/*
3897 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3898 	 * Enable all "known" he-mu-other radiotap flags for now
3899 	 */
3900 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3901 	rtap_len += 2;
3902 
3903 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3904 	rtap_len += 2;
3905 
3906 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3907 	rtap_len += 1;
3908 
3909 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3910 	rtap_len += 1;
3911 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3912 		  rx_status->he_per_user_1,
3913 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3914 		  rx_status->he_per_user_known);
3915 	return rtap_len;
3916 }
3917 
3918 
3919 /**
3920  * This is the length for radiotap, combined length
3921  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3922  * cannot be more than available headroom_sz.
3923  * increase this when we add more radiotap elements.
3924  * Number after '+' indicates maximum possible increase due to alignment
3925  */
3926 
3927 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3928 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3929 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3930 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3931 #define RADIOTAP_FIXED_HEADER_LEN 17
3932 #define RADIOTAP_HT_FLAGS_LEN 3
3933 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3934 #define RADIOTAP_VENDOR_NS_LEN \
3935 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3936 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3937 				RADIOTAP_FIXED_HEADER_LEN + \
3938 				RADIOTAP_HT_FLAGS_LEN + \
3939 				RADIOTAP_VHT_FLAGS_LEN + \
3940 				RADIOTAP_AMPDU_STATUS_LEN + \
3941 				RADIOTAP_HE_FLAGS_LEN + \
3942 				RADIOTAP_HE_MU_FLAGS_LEN + \
3943 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
3944 				RADIOTAP_VENDOR_NS_LEN)
3945 
3946 #define IEEE80211_RADIOTAP_HE 23
3947 #define IEEE80211_RADIOTAP_HE_MU	24
3948 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3949 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
3950 
3951 /**
3952  * radiotap_num_to_freq() - Get frequency from chan number
3953  * @chan_num - Input channel number
3954  *
3955  * Return - Channel frequency in Mhz
3956  */
3957 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3958 {
3959 	if (chan_num == CHANNEL_NUM_14)
3960 		return CHANNEL_FREQ_2484;
3961 	if (chan_num < CHANNEL_NUM_14)
3962 		return CHANNEL_FREQ_2407 +
3963 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3964 
3965 	if (chan_num < CHANNEL_NUM_27)
3966 		return CHANNEL_FREQ_2512 +
3967 			((chan_num - CHANNEL_NUM_15) *
3968 			 FREQ_MULTIPLIER_CONST_20MHZ);
3969 
3970 	if (chan_num > CHANNEL_NUM_182 &&
3971 			chan_num < CHANNEL_NUM_197)
3972 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3973 			CHANNEL_FREQ_4000);
3974 
3975 	return CHANNEL_FREQ_5000 +
3976 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3977 }
3978 
3979 /**
3980  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
3981  * @rx_status: Pointer to rx_status.
3982  * @rtap_buf: Buf to which AMPDU info has to be updated.
3983  * @rtap_len: Current length of radiotap buffer
3984  *
3985  * Return: Length of radiotap after AMPDU flags updated.
3986  */
3987 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
3988 					struct mon_rx_status *rx_status,
3989 					uint8_t *rtap_buf,
3990 					uint32_t rtap_len)
3991 {
3992 	/*
3993 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
3994 	 * First 32 bits of AMPDU represents the reference number
3995 	 */
3996 
3997 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
3998 	uint16_t ampdu_flags = 0;
3999 	uint16_t ampdu_reserved_flags = 0;
4000 
4001 	rtap_len = qdf_align(rtap_len, 4);
4002 
4003 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4004 	rtap_len += 4;
4005 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4006 	rtap_len += 2;
4007 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4008 	rtap_len += 2;
4009 
4010 	return rtap_len;
4011 }
4012 
4013 /**
4014  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4015  * @rx_status: Pointer to rx_status.
4016  * @nbuf:      nbuf pointer to which radiotap has to be updated
4017  * @headroom_sz: Available headroom size.
4018  *
4019  * Return: length of rtap_len updated.
4020  */
4021 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4022 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4023 {
4024 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4025 	struct ieee80211_radiotap_header *rthdr =
4026 		(struct ieee80211_radiotap_header *)rtap_buf;
4027 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4028 	uint32_t rtap_len = rtap_hdr_len;
4029 	uint8_t length = rtap_len;
4030 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4031 
4032 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4033 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4034 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4035 	rtap_len += 8;
4036 
4037 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4038 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4039 
4040 	if (rx_status->rs_fcs_err)
4041 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4042 
4043 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4044 	rtap_len += 1;
4045 
4046 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4047 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4048 	    !rx_status->he_flags) {
4049 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4050 		rtap_buf[rtap_len] = rx_status->rate;
4051 	} else
4052 		rtap_buf[rtap_len] = 0;
4053 	rtap_len += 1;
4054 
4055 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4056 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4057 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4058 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4059 	rtap_len += 2;
4060 	/* Channel flags. */
4061 	if (rx_status->chan_num > CHANNEL_NUM_35)
4062 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4063 	else
4064 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4065 	if (rx_status->cck_flag)
4066 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4067 	if (rx_status->ofdm_flag)
4068 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4069 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4070 	rtap_len += 2;
4071 
4072 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4073 	 *					(dBm)
4074 	 */
4075 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4076 	/*
4077 	 * rssi_comb is int dB, need to convert it to dBm.
4078 	 * normalize value to noise floor of -96 dBm
4079 	 */
4080 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4081 	rtap_len += 1;
4082 
4083 	/* RX signal noise floor */
4084 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4085 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4086 	rtap_len += 1;
4087 
4088 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4089 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4090 	rtap_buf[rtap_len] = rx_status->nr_ant;
4091 	rtap_len += 1;
4092 
4093 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4094 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4095 		return 0;
4096 	}
4097 
4098 	if (rx_status->ht_flags) {
4099 		length = rtap_len;
4100 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4101 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4102 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4103 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4104 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4105 		rtap_len += 1;
4106 
4107 		if (rx_status->sgi)
4108 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4109 		if (rx_status->bw)
4110 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4111 		else
4112 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4113 		rtap_len += 1;
4114 
4115 		rtap_buf[rtap_len] = rx_status->mcs;
4116 		rtap_len += 1;
4117 
4118 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4119 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4120 			return 0;
4121 		}
4122 	}
4123 
4124 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4125 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4126 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4127 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4128 								rtap_buf,
4129 								rtap_len);
4130 	}
4131 
4132 	if (rx_status->vht_flags) {
4133 		length = rtap_len;
4134 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4135 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4136 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4137 								rtap_buf,
4138 								rtap_len);
4139 
4140 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4141 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4142 			return 0;
4143 		}
4144 	}
4145 
4146 	if (rx_status->he_flags) {
4147 		length = rtap_len;
4148 		/* IEEE80211_RADIOTAP_HE */
4149 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4150 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4151 								rtap_buf,
4152 								rtap_len);
4153 
4154 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4155 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4156 			return 0;
4157 		}
4158 	}
4159 
4160 	if (rx_status->he_mu_flags) {
4161 		length = rtap_len;
4162 		/* IEEE80211_RADIOTAP_HE-MU */
4163 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4164 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4165 								rtap_buf,
4166 								rtap_len);
4167 
4168 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4169 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4170 			return 0;
4171 		}
4172 	}
4173 
4174 	if (rx_status->he_mu_other_flags) {
4175 		length = rtap_len;
4176 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4177 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4178 		rtap_len =
4179 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4180 								rtap_buf,
4181 								rtap_len);
4182 
4183 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4184 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4185 			return 0;
4186 		}
4187 	}
4188 
4189 	rtap_len = qdf_align(rtap_len, 2);
4190 	/*
4191 	 * Radiotap Vendor Namespace
4192 	 */
4193 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4194 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4195 					(rtap_buf + rtap_len);
4196 	/*
4197 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4198 	 */
4199 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4200 	/*
4201 	 * Name space selector = 0
4202 	 * We only will have one namespace for now
4203 	 */
4204 	radiotap_vendor_ns_ath->hdr.selector = 0;
4205 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4206 					sizeof(*radiotap_vendor_ns_ath) -
4207 					sizeof(radiotap_vendor_ns_ath->hdr));
4208 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4209 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4210 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4211 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4212 				cpu_to_le32(rx_status->ppdu_timestamp);
4213 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4214 
4215 	rthdr->it_len = cpu_to_le16(rtap_len);
4216 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4217 
4218 	if (headroom_sz < rtap_len) {
4219 		qdf_err("ERROR: not enough space to update radiotap");
4220 		return 0;
4221 	}
4222 	qdf_nbuf_push_head(nbuf, rtap_len);
4223 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4224 	return rtap_len;
4225 }
4226 #else
4227 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4228 					struct mon_rx_status *rx_status,
4229 					int8_t *rtap_buf,
4230 					uint32_t rtap_len)
4231 {
4232 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4233 	return 0;
4234 }
4235 
4236 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4237 				      int8_t *rtap_buf, uint32_t rtap_len)
4238 {
4239 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4240 	return 0;
4241 }
4242 
4243 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4244 					struct mon_rx_status *rx_status,
4245 					uint8_t *rtap_buf,
4246 					uint32_t rtap_len)
4247 {
4248 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4249 	return 0;
4250 }
4251 
4252 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4253 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4254 {
4255 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4256 	return 0;
4257 }
4258 #endif
4259 qdf_export_symbol(qdf_nbuf_update_radiotap);
4260 
4261 /**
4262  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4263  * @cb_func_ptr: function pointer to the nbuf free callback
4264  *
4265  * This function registers a callback function for nbuf free.
4266  *
4267  * Return: none
4268  */
4269 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4270 {
4271 	nbuf_free_cb = cb_func_ptr;
4272 }
4273 
4274 /**
4275  * qdf_nbuf_classify_pkt() - classify packet
4276  * @skb - sk buff
4277  *
4278  * Return: none
4279  */
4280 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4281 {
4282 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4283 
4284 	/* check destination mac address is broadcast/multicast */
4285 	if (is_broadcast_ether_addr((uint8_t *)eh))
4286 		QDF_NBUF_CB_SET_BCAST(skb);
4287 	else if (is_multicast_ether_addr((uint8_t *)eh))
4288 		QDF_NBUF_CB_SET_MCAST(skb);
4289 
4290 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4291 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4292 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4293 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4294 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4295 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4296 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4297 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4298 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4299 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4300 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4301 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4302 }
4303 qdf_export_symbol(qdf_nbuf_classify_pkt);
4304 
4305 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4306 {
4307 	qdf_nbuf_users_set(&nbuf->users, 1);
4308 	nbuf->data = nbuf->head + NET_SKB_PAD;
4309 	skb_reset_tail_pointer(nbuf);
4310 }
4311 qdf_export_symbol(__qdf_nbuf_init);
4312 
4313 #ifdef WLAN_FEATURE_FASTPATH
4314 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4315 {
4316 	qdf_nbuf_users_set(&nbuf->users, 1);
4317 	nbuf->data = nbuf->head + NET_SKB_PAD;
4318 	skb_reset_tail_pointer(nbuf);
4319 }
4320 qdf_export_symbol(qdf_nbuf_init_fast);
4321 #endif /* WLAN_FEATURE_FASTPATH */
4322 
4323 
4324 #ifdef QDF_NBUF_GLOBAL_COUNT
4325 /**
4326  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4327  *
4328  * Return void
4329  */
4330 void __qdf_nbuf_mod_init(void)
4331 {
4332 	qdf_atomic_init(&nbuf_count);
4333 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4334 }
4335 
4336 /**
4337  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4338  *
4339  * Return void
4340  */
4341 void __qdf_nbuf_mod_exit(void)
4342 {
4343 }
4344 #endif
4345