xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 #include "qdf_str.h"
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* channel number to freq conversion */
78 #define CHANNEL_NUM_14 14
79 #define CHANNEL_NUM_15 15
80 #define CHANNEL_NUM_27 27
81 #define CHANNEL_NUM_35 35
82 #define CHANNEL_NUM_182 182
83 #define CHANNEL_NUM_197 197
84 #define CHANNEL_FREQ_2484 2484
85 #define CHANNEL_FREQ_2407 2407
86 #define CHANNEL_FREQ_2512 2512
87 #define CHANNEL_FREQ_5000 5000
88 #define CHANNEL_FREQ_4000 4000
89 #define FREQ_MULTIPLIER_CONST_5MHZ 5
90 #define FREQ_MULTIPLIER_CONST_20MHZ 20
91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
93 #define RADIOTAP_CCK_CHANNEL 0x0020
94 #define RADIOTAP_OFDM_CHANNEL 0x0040
95 
96 #ifdef CONFIG_MCL
97 #include <qdf_mc_timer.h>
98 
99 struct qdf_track_timer {
100 	qdf_mc_timer_t track_timer;
101 	qdf_atomic_t alloc_fail_cnt;
102 };
103 
104 static struct qdf_track_timer alloc_track_timer;
105 
106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
108 #endif
109 
110 /* Packet Counter */
111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
113 #ifdef QDF_NBUF_GLOBAL_COUNT
114 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
115 static qdf_atomic_t nbuf_count;
116 #endif
117 
118 /**
119  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
120  *
121  * Return: none
122  */
123 void qdf_nbuf_tx_desc_count_display(void)
124 {
125 	qdf_debug("Current Snapshot of the Driver:");
126 	qdf_debug("Data Packets:");
127 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
129 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
138 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
146 	qdf_debug("Mgmt Packets:");
147 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
161 }
162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
163 
164 /**
165  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
166  * @packet_type   : packet type either mgmt/data
167  * @current_state : layer at which the packet currently present
168  *
169  * Return: none
170  */
171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
172 			uint8_t current_state)
173 {
174 	switch (packet_type) {
175 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
176 		nbuf_tx_mgmt[current_state]++;
177 		break;
178 	case QDF_NBUF_TX_PKT_DATA_TRACK:
179 		nbuf_tx_data[current_state]++;
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
186 
187 /**
188  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_clear(void)
193 {
194 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
195 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
196 }
197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
198 
199 /**
200  * qdf_nbuf_set_state() - Updates the packet state
201  * @nbuf:            network buffer
202  * @current_state :  layer at which the packet currently is
203  *
204  * This function updates the packet state to the layer at which the packet
205  * currently is
206  *
207  * Return: none
208  */
209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
210 {
211 	/*
212 	 * Only Mgmt, Data Packets are tracked. WMI messages
213 	 * such as scan commands are not tracked
214 	 */
215 	uint8_t packet_type;
216 
217 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
218 
219 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
220 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
221 		return;
222 	}
223 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
224 	qdf_nbuf_tx_desc_count_update(packet_type,
225 					current_state);
226 }
227 qdf_export_symbol(qdf_nbuf_set_state);
228 
229 #ifdef CONFIG_MCL
230 /**
231  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
232  *
233  * This function starts the alloc fail replenish timer.
234  *
235  * Return: void
236  */
237 static void __qdf_nbuf_start_replenish_timer(void)
238 {
239 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
240 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
241 	    QDF_TIMER_STATE_RUNNING)
242 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
243 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
244 }
245 
246 /**
247  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
248  *
249  * This function stops the alloc fail replenish timer.
250  *
251  * Return: void
252  */
253 static void __qdf_nbuf_stop_replenish_timer(void)
254 {
255 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
256 		return;
257 
258 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
259 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
260 	    QDF_TIMER_STATE_RUNNING)
261 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
262 }
263 
264 /**
265  * qdf_replenish_expire_handler - Replenish expire handler
266  *
267  * This function triggers when the alloc fail replenish timer expires.
268  *
269  * Return: void
270  */
271 static void qdf_replenish_expire_handler(void *arg)
272 {
273 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
274 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
275 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
276 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
277 
278 		/* Error handling here */
279 	}
280 }
281 
282 /**
283  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
284  *
285  * This function initializes the nbuf alloc fail replenish timer.
286  *
287  * Return: void
288  */
289 void __qdf_nbuf_init_replenish_timer(void)
290 {
291 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
292 			  qdf_replenish_expire_handler, NULL);
293 }
294 
295 /**
296  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
297  *
298  * This function deinitializes the nbuf alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 void __qdf_nbuf_deinit_replenish_timer(void)
303 {
304 	__qdf_nbuf_stop_replenish_timer();
305 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
306 }
307 #else
308 
309 static inline void __qdf_nbuf_start_replenish_timer(void) {}
310 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
311 #endif
312 
313 /* globals do not need to be initialized to NULL/0 */
314 qdf_nbuf_trace_update_t qdf_trace_update_cb;
315 qdf_nbuf_free_t nbuf_free_cb;
316 
317 #ifdef QDF_NBUF_GLOBAL_COUNT
318 
319 /**
320  * __qdf_nbuf_count_get() - get nbuf global count
321  *
322  * Return: nbuf global count
323  */
324 int __qdf_nbuf_count_get(void)
325 {
326 	return qdf_atomic_read(&nbuf_count);
327 }
328 qdf_export_symbol(__qdf_nbuf_count_get);
329 
330 /**
331  * __qdf_nbuf_count_inc() - increment nbuf global count
332  *
333  * @buf: sk buff
334  *
335  * Return: void
336  */
337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
338 {
339 	qdf_atomic_inc(&nbuf_count);
340 }
341 qdf_export_symbol(__qdf_nbuf_count_inc);
342 
343 /**
344  * __qdf_nbuf_count_dec() - decrement nbuf global count
345  *
346  * @buf: sk buff
347  *
348  * Return: void
349  */
350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
351 {
352 	qdf_atomic_dec(&nbuf_count);
353 }
354 qdf_export_symbol(__qdf_nbuf_count_dec);
355 #endif
356 
357 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
358 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
359 				 int align, int prio, const char *func,
360 				 uint32_t line)
361 {
362 	struct sk_buff *skb;
363 	unsigned long offset;
364 	uint32_t lowmem_alloc_tries = 0;
365 
366 	if (align)
367 		size += (align - 1);
368 
369 realloc:
370 	skb = dev_alloc_skb(size);
371 
372 	if (skb)
373 		goto skb_alloc;
374 
375 	skb = pld_nbuf_pre_alloc(size);
376 
377 	if (!skb) {
378 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
379 				size, func, line);
380 		return NULL;
381 	}
382 
383 skb_alloc:
384 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
385 	 * Though we are trying to reserve low memory upfront to prevent this,
386 	 * we sometimes see SKBs allocated from low memory.
387 	 */
388 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
389 		lowmem_alloc_tries++;
390 		if (lowmem_alloc_tries > 100) {
391 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
392 				     size, func, line);
393 			return NULL;
394 		} else {
395 			/* Not freeing to make sure it
396 			 * will not get allocated again
397 			 */
398 			goto realloc;
399 		}
400 	}
401 	memset(skb->cb, 0x0, sizeof(skb->cb));
402 
403 	/*
404 	 * The default is for netbuf fragments to be interpreted
405 	 * as wordstreams rather than bytestreams.
406 	 */
407 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
408 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
409 
410 	/*
411 	 * XXX:how about we reserve first then align
412 	 * Align & make sure that the tail & data are adjusted properly
413 	 */
414 
415 	if (align) {
416 		offset = ((unsigned long)skb->data) % align;
417 		if (offset)
418 			skb_reserve(skb, align - offset);
419 	}
420 
421 	/*
422 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
423 	 * pointer
424 	 */
425 	skb_reserve(skb, reserve);
426 	qdf_nbuf_count_inc(skb);
427 
428 	return skb;
429 }
430 #else
431 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
432 				 int align, int prio, const char *func,
433 				 uint32_t line)
434 {
435 	struct sk_buff *skb;
436 	unsigned long offset;
437 	int flags = GFP_KERNEL;
438 
439 	if (align)
440 		size += (align - 1);
441 
442 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
443 		flags = GFP_ATOMIC;
444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
445 		/*
446 		 * Observed that kcompactd burns out CPU to make order-3 page.
447 		 *__netdev_alloc_skb has 4k page fallback option just in case of
448 		 * failing high order page allocation so we don't need to be
449 		 * hard. Make kcompactd rest in piece.
450 		 */
451 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
452 #endif
453 	}
454 
455 	skb = __netdev_alloc_skb(NULL, size, flags);
456 
457 	if (skb)
458 		goto skb_alloc;
459 
460 	skb = pld_nbuf_pre_alloc(size);
461 
462 	if (!skb) {
463 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
464 				size, func, line);
465 		__qdf_nbuf_start_replenish_timer();
466 		return NULL;
467 	} else {
468 		__qdf_nbuf_stop_replenish_timer();
469 	}
470 
471 skb_alloc:
472 	memset(skb->cb, 0x0, sizeof(skb->cb));
473 
474 	/*
475 	 * The default is for netbuf fragments to be interpreted
476 	 * as wordstreams rather than bytestreams.
477 	 */
478 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
479 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
480 
481 	/*
482 	 * XXX:how about we reserve first then align
483 	 * Align & make sure that the tail & data are adjusted properly
484 	 */
485 
486 	if (align) {
487 		offset = ((unsigned long)skb->data) % align;
488 		if (offset)
489 			skb_reserve(skb, align - offset);
490 	}
491 
492 	/*
493 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
494 	 * pointer
495 	 */
496 	skb_reserve(skb, reserve);
497 	qdf_nbuf_count_inc(skb);
498 
499 	return skb;
500 }
501 #endif
502 qdf_export_symbol(__qdf_nbuf_alloc);
503 
504 /**
505  * __qdf_nbuf_free() - free the nbuf its interrupt safe
506  * @skb: Pointer to network buffer
507  *
508  * Return: none
509  */
510 
511 #ifdef CONFIG_MCL
512 void __qdf_nbuf_free(struct sk_buff *skb)
513 {
514 	if (pld_nbuf_pre_alloc_free(skb))
515 		return;
516 
517 	qdf_nbuf_count_dec(skb);
518 	if (nbuf_free_cb)
519 		nbuf_free_cb(skb);
520 	else
521 		dev_kfree_skb_any(skb);
522 }
523 #else
524 void __qdf_nbuf_free(struct sk_buff *skb)
525 {
526 	if (pld_nbuf_pre_alloc_free(skb))
527 		return;
528 
529 	qdf_nbuf_count_dec(skb);
530 	dev_kfree_skb_any(skb);
531 }
532 #endif
533 
534 qdf_export_symbol(__qdf_nbuf_free);
535 
536 #ifdef NBUF_MEMORY_DEBUG
537 enum qdf_nbuf_event_type {
538 	QDF_NBUF_ALLOC,
539 	QDF_NBUF_ALLOC_CLONE,
540 	QDF_NBUF_ALLOC_COPY,
541 	QDF_NBUF_ALLOC_FAILURE,
542 	QDF_NBUF_FREE,
543 	QDF_NBUF_MAP,
544 	QDF_NBUF_UNMAP,
545 };
546 
547 struct qdf_nbuf_event {
548 	qdf_nbuf_t nbuf;
549 	char func[QDF_MEM_FUNC_NAME_SIZE];
550 	uint32_t line;
551 	enum qdf_nbuf_event_type type;
552 	uint64_t timestamp;
553 };
554 
555 #define QDF_NBUF_HISTORY_SIZE 4096
556 static qdf_atomic_t qdf_nbuf_history_index;
557 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
558 
559 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
560 {
561 	int32_t next = qdf_atomic_inc_return(index);
562 
563 	if (next == size)
564 		qdf_atomic_sub(size, index);
565 
566 	return next % size;
567 }
568 
569 static void
570 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
571 		     enum qdf_nbuf_event_type type)
572 {
573 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
574 						   QDF_NBUF_HISTORY_SIZE);
575 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
576 
577 	event->nbuf = nbuf;
578 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
579 	event->line = line;
580 	event->type = type;
581 	event->timestamp = qdf_get_log_timestamp();
582 }
583 #endif /* NBUF_MEMORY_DEBUG */
584 
585 #ifdef NBUF_MAP_UNMAP_DEBUG
586 struct qdf_nbuf_map_metadata {
587 	struct hlist_node node;
588 	qdf_nbuf_t nbuf;
589 	char func[QDF_MEM_FUNC_NAME_SIZE];
590 	uint32_t line;
591 };
592 
593 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
594 			 sizeof(struct qdf_nbuf_map_metadata), 0);
595 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
596 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
597 static qdf_spinlock_t qdf_nbuf_map_lock;
598 
599 static void qdf_nbuf_map_tracking_init(void)
600 {
601 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
602 	hash_init(qdf_nbuf_map_ht);
603 	qdf_spinlock_create(&qdf_nbuf_map_lock);
604 }
605 
606 static void qdf_nbuf_map_leaks_print(void)
607 {
608 	struct qdf_nbuf_map_metadata *meta;
609 	int bucket;
610 	uint32_t count = 0;
611 
612 	QDF_BUG(qdf_spin_is_locked(&qdf_nbuf_map_lock));
613 
614 	qdf_nofl_alert("Nbuf map-no-unmap events detected!");
615 	qdf_nofl_alert("-----------------------------------------------------");
616 
617 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
618 		count++;
619 		qdf_nofl_alert("0x%zx @ %s:%u",
620 			       (uintptr_t)meta->nbuf, meta->func, meta->line);
621 	}
622 
623 	QDF_DEBUG_PANIC("%u fatal nbuf map-no-unmap events detected!", count);
624 }
625 
626 void qdf_nbuf_map_check_for_leaks(void)
627 {
628 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
629 	if (!hash_empty(qdf_nbuf_map_ht))
630 		qdf_nbuf_map_leaks_print();
631 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
632 }
633 
634 static void qdf_nbuf_map_tracking_deinit(void)
635 {
636 	qdf_nbuf_map_check_for_leaks();
637 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
638 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
639 }
640 
641 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
642 {
643 	struct qdf_nbuf_map_metadata *meta;
644 
645 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
646 		if (meta->nbuf == nbuf)
647 			return meta;
648 	}
649 
650 	return NULL;
651 }
652 
653 static QDF_STATUS
654 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
655 {
656 	struct qdf_nbuf_map_metadata *meta;
657 
658 	QDF_BUG(nbuf);
659 	if (!nbuf) {
660 		qdf_err("Cannot map null nbuf");
661 		return QDF_STATUS_E_INVAL;
662 	}
663 
664 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
665 	meta = qdf_nbuf_meta_get(nbuf);
666 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
667 	if (meta)
668 		QDF_DEBUG_PANIC(
669 			"Double nbuf map detected @ %s:%u; last map from %s:%u",
670 			func, line, meta->func, meta->line);
671 
672 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
673 	if (!meta) {
674 		qdf_err("Failed to allocate nbuf map tracking metadata");
675 		return QDF_STATUS_E_NOMEM;
676 	}
677 
678 	meta->nbuf = nbuf;
679 	qdf_str_lcopy(meta->func, func, QDF_MEM_FUNC_NAME_SIZE);
680 	meta->line = line;
681 
682 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
683 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
684 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
685 
686 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
687 
688 	return QDF_STATUS_SUCCESS;
689 }
690 
691 static void
692 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
693 {
694 	struct qdf_nbuf_map_metadata *meta;
695 
696 	QDF_BUG(nbuf);
697 	if (!nbuf) {
698 		qdf_err("Cannot unmap null nbuf");
699 		return;
700 	}
701 
702 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
703 	meta = qdf_nbuf_meta_get(nbuf);
704 
705 	if (!meta)
706 		QDF_DEBUG_PANIC(
707 		      "Double nbuf unmap or unmap without map detected @ %s:%u",
708 		      func, line);
709 
710 	hash_del(&meta->node);
711 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
712 
713 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
714 
715 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
716 }
717 
718 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
719 			      qdf_nbuf_t buf,
720 			      qdf_dma_dir_t dir,
721 			      const char *func,
722 			      uint32_t line)
723 {
724 	QDF_STATUS status;
725 
726 	status = qdf_nbuf_track_map(buf, func, line);
727 	if (QDF_IS_STATUS_ERROR(status))
728 		return status;
729 
730 	status = __qdf_nbuf_map(osdev, buf, dir);
731 	if (QDF_IS_STATUS_ERROR(status))
732 		qdf_nbuf_untrack_map(buf, func, line);
733 
734 	return status;
735 }
736 
737 qdf_export_symbol(qdf_nbuf_map_debug);
738 
739 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
740 			  qdf_nbuf_t buf,
741 			  qdf_dma_dir_t dir,
742 			  const char *func,
743 			  uint32_t line)
744 {
745 	qdf_nbuf_untrack_map(buf, func, line);
746 	__qdf_nbuf_unmap_single(osdev, buf, dir);
747 }
748 
749 qdf_export_symbol(qdf_nbuf_unmap_debug);
750 
751 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
752 				     qdf_nbuf_t buf,
753 				     qdf_dma_dir_t dir,
754 				     const char *func,
755 				     uint32_t line)
756 {
757 	QDF_STATUS status;
758 
759 	status = qdf_nbuf_track_map(buf, func, line);
760 	if (QDF_IS_STATUS_ERROR(status))
761 		return status;
762 
763 	status = __qdf_nbuf_map_single(osdev, buf, dir);
764 	if (QDF_IS_STATUS_ERROR(status))
765 		qdf_nbuf_untrack_map(buf, func, line);
766 
767 	return status;
768 }
769 
770 qdf_export_symbol(qdf_nbuf_map_single_debug);
771 
772 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
773 				 qdf_nbuf_t buf,
774 				 qdf_dma_dir_t dir,
775 				 const char *func,
776 				 uint32_t line)
777 {
778 	qdf_nbuf_untrack_map(buf, func, line);
779 	__qdf_nbuf_unmap_single(osdev, buf, dir);
780 }
781 
782 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
783 
784 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
785 				     qdf_nbuf_t buf,
786 				     qdf_dma_dir_t dir,
787 				     int nbytes,
788 				     const char *func,
789 				     uint32_t line)
790 {
791 	QDF_STATUS status;
792 
793 	status = qdf_nbuf_track_map(buf, func, line);
794 	if (QDF_IS_STATUS_ERROR(status))
795 		return status;
796 
797 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
798 	if (QDF_IS_STATUS_ERROR(status))
799 		qdf_nbuf_untrack_map(buf, func, line);
800 
801 	return status;
802 }
803 
804 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
805 
806 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
807 				 qdf_nbuf_t buf,
808 				 qdf_dma_dir_t dir,
809 				 int nbytes,
810 				 const char *func,
811 				 uint32_t line)
812 {
813 	qdf_nbuf_untrack_map(buf, func, line);
814 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
815 }
816 
817 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
818 
819 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
820 					    qdf_nbuf_t buf,
821 					    qdf_dma_dir_t dir,
822 					    int nbytes,
823 					    const char *func,
824 					    uint32_t line)
825 {
826 	QDF_STATUS status;
827 
828 	status = qdf_nbuf_track_map(buf, func, line);
829 	if (QDF_IS_STATUS_ERROR(status))
830 		return status;
831 
832 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
833 	if (QDF_IS_STATUS_ERROR(status))
834 		qdf_nbuf_untrack_map(buf, func, line);
835 
836 	return status;
837 }
838 
839 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
840 
841 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
842 					qdf_nbuf_t buf,
843 					qdf_dma_dir_t dir,
844 					int nbytes,
845 					const char *func,
846 					uint32_t line)
847 {
848 	qdf_nbuf_untrack_map(buf, func, line);
849 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
850 }
851 
852 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
853 
854 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, const char *func,
855 					     uint32_t line)
856 {
857 	struct qdf_nbuf_map_metadata *meta;
858 
859 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
860 	meta = qdf_nbuf_meta_get(nbuf);
861 	if (meta)
862 		QDF_DEBUG_PANIC(
863 			"Nbuf freed @ %s:%u while mapped from %s:%u",
864 			kbasename(func), line, meta->func, meta->line);
865 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
866 }
867 #else
868 static inline void qdf_nbuf_map_tracking_init(void)
869 {
870 }
871 
872 static inline void qdf_nbuf_map_tracking_deinit(void)
873 {
874 }
875 
876 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
877 						    const char *func,
878 						    uint32_t line)
879 {
880 }
881 #endif /* NBUF_MAP_UNMAP_DEBUG */
882 
883 /**
884  * __qdf_nbuf_map() - map a buffer to local bus address space
885  * @osdev: OS device
886  * @bmap: Bitmap
887  * @skb: Pointer to network buffer
888  * @dir: Direction
889  *
890  * Return: QDF_STATUS
891  */
892 #ifdef QDF_OS_DEBUG
893 QDF_STATUS
894 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
895 {
896 	struct skb_shared_info *sh = skb_shinfo(skb);
897 
898 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
899 			|| (dir == QDF_DMA_FROM_DEVICE));
900 
901 	/*
902 	 * Assume there's only a single fragment.
903 	 * To support multiple fragments, it would be necessary to change
904 	 * qdf_nbuf_t to be a separate object that stores meta-info
905 	 * (including the bus address for each fragment) and a pointer
906 	 * to the underlying sk_buff.
907 	 */
908 	qdf_assert(sh->nr_frags == 0);
909 
910 	return __qdf_nbuf_map_single(osdev, skb, dir);
911 }
912 qdf_export_symbol(__qdf_nbuf_map);
913 
914 #else
915 QDF_STATUS
916 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
917 {
918 	return __qdf_nbuf_map_single(osdev, skb, dir);
919 }
920 qdf_export_symbol(__qdf_nbuf_map);
921 #endif
922 /**
923  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
924  * @osdev: OS device
925  * @skb: Pointer to network buffer
926  * @dir: dma direction
927  *
928  * Return: none
929  */
930 void
931 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
932 			qdf_dma_dir_t dir)
933 {
934 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
935 		   || (dir == QDF_DMA_FROM_DEVICE));
936 
937 	/*
938 	 * Assume there's a single fragment.
939 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
940 	 */
941 	__qdf_nbuf_unmap_single(osdev, skb, dir);
942 }
943 qdf_export_symbol(__qdf_nbuf_unmap);
944 
945 /**
946  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
947  * @osdev: OS device
948  * @skb: Pointer to network buffer
949  * @dir: Direction
950  *
951  * Return: QDF_STATUS
952  */
953 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
954 QDF_STATUS
955 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
956 {
957 	qdf_dma_addr_t paddr;
958 
959 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
960 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
961 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
962 	return QDF_STATUS_SUCCESS;
963 }
964 qdf_export_symbol(__qdf_nbuf_map_single);
965 #else
966 QDF_STATUS
967 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
968 {
969 	qdf_dma_addr_t paddr;
970 
971 	/* assume that the OS only provides a single fragment */
972 	QDF_NBUF_CB_PADDR(buf) = paddr =
973 		dma_map_single(osdev->dev, buf->data,
974 				skb_end_pointer(buf) - buf->data,
975 				__qdf_dma_dir_to_os(dir));
976 	return dma_mapping_error(osdev->dev, paddr)
977 		? QDF_STATUS_E_FAILURE
978 		: QDF_STATUS_SUCCESS;
979 }
980 qdf_export_symbol(__qdf_nbuf_map_single);
981 #endif
982 /**
983  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
984  * @osdev: OS device
985  * @skb: Pointer to network buffer
986  * @dir: Direction
987  *
988  * Return: none
989  */
990 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
991 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
992 				qdf_dma_dir_t dir)
993 {
994 }
995 #else
996 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
997 					qdf_dma_dir_t dir)
998 {
999 	if (QDF_NBUF_CB_PADDR(buf))
1000 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1001 			skb_end_pointer(buf) - buf->data,
1002 			__qdf_dma_dir_to_os(dir));
1003 }
1004 #endif
1005 qdf_export_symbol(__qdf_nbuf_unmap_single);
1006 
1007 /**
1008  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1009  * @skb: Pointer to network buffer
1010  * @cksum: Pointer to checksum value
1011  *
1012  * Return: QDF_STATUS
1013  */
1014 QDF_STATUS
1015 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1016 {
1017 	switch (cksum->l4_result) {
1018 	case QDF_NBUF_RX_CKSUM_NONE:
1019 		skb->ip_summed = CHECKSUM_NONE;
1020 		break;
1021 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1022 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1023 		break;
1024 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1025 		skb->ip_summed = CHECKSUM_PARTIAL;
1026 		skb->csum = cksum->val;
1027 		break;
1028 	default:
1029 		pr_err("Unknown checksum type\n");
1030 		qdf_assert(0);
1031 		return QDF_STATUS_E_NOSUPPORT;
1032 	}
1033 	return QDF_STATUS_SUCCESS;
1034 }
1035 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1036 
1037 /**
1038  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1039  * @skb: Pointer to network buffer
1040  *
1041  * Return: TX checksum value
1042  */
1043 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1044 {
1045 	switch (skb->ip_summed) {
1046 	case CHECKSUM_NONE:
1047 		return QDF_NBUF_TX_CKSUM_NONE;
1048 	case CHECKSUM_PARTIAL:
1049 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1050 	case CHECKSUM_COMPLETE:
1051 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1052 	default:
1053 		return QDF_NBUF_TX_CKSUM_NONE;
1054 	}
1055 }
1056 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1057 
1058 /**
1059  * __qdf_nbuf_get_tid() - get tid
1060  * @skb: Pointer to network buffer
1061  *
1062  * Return: tid
1063  */
1064 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1065 {
1066 	return skb->priority;
1067 }
1068 qdf_export_symbol(__qdf_nbuf_get_tid);
1069 
1070 /**
1071  * __qdf_nbuf_set_tid() - set tid
1072  * @skb: Pointer to network buffer
1073  *
1074  * Return: none
1075  */
1076 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1077 {
1078 	skb->priority = tid;
1079 }
1080 qdf_export_symbol(__qdf_nbuf_set_tid);
1081 
1082 /**
1083  * __qdf_nbuf_set_tid() - set tid
1084  * @skb: Pointer to network buffer
1085  *
1086  * Return: none
1087  */
1088 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1089 {
1090 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1091 }
1092 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1093 
1094 /**
1095  * __qdf_nbuf_reg_trace_cb() - register trace callback
1096  * @cb_func_ptr: Pointer to trace callback function
1097  *
1098  * Return: none
1099  */
1100 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1101 {
1102 	qdf_trace_update_cb = cb_func_ptr;
1103 }
1104 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1105 
1106 /**
1107  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1108  *              of DHCP packet.
1109  * @data: Pointer to DHCP packet data buffer
1110  *
1111  * This func. returns the subtype of DHCP packet.
1112  *
1113  * Return: subtype of the DHCP packet.
1114  */
1115 enum qdf_proto_subtype
1116 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1117 {
1118 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1119 
1120 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1121 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1122 					QDF_DHCP_OPTION53_LENGTH)) {
1123 
1124 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1125 		case QDF_DHCP_DISCOVER:
1126 			subtype = QDF_PROTO_DHCP_DISCOVER;
1127 			break;
1128 		case QDF_DHCP_REQUEST:
1129 			subtype = QDF_PROTO_DHCP_REQUEST;
1130 			break;
1131 		case QDF_DHCP_OFFER:
1132 			subtype = QDF_PROTO_DHCP_OFFER;
1133 			break;
1134 		case QDF_DHCP_ACK:
1135 			subtype = QDF_PROTO_DHCP_ACK;
1136 			break;
1137 		case QDF_DHCP_NAK:
1138 			subtype = QDF_PROTO_DHCP_NACK;
1139 			break;
1140 		case QDF_DHCP_RELEASE:
1141 			subtype = QDF_PROTO_DHCP_RELEASE;
1142 			break;
1143 		case QDF_DHCP_INFORM:
1144 			subtype = QDF_PROTO_DHCP_INFORM;
1145 			break;
1146 		case QDF_DHCP_DECLINE:
1147 			subtype = QDF_PROTO_DHCP_DECLINE;
1148 			break;
1149 		default:
1150 			break;
1151 		}
1152 	}
1153 
1154 	return subtype;
1155 }
1156 
1157 /**
1158  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1159  *            of EAPOL packet.
1160  * @data: Pointer to EAPOL packet data buffer
1161  *
1162  * This func. returns the subtype of EAPOL packet.
1163  *
1164  * Return: subtype of the EAPOL packet.
1165  */
1166 enum qdf_proto_subtype
1167 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1168 {
1169 	uint16_t eapol_key_info;
1170 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1171 	uint16_t mask;
1172 
1173 	eapol_key_info = (uint16_t)(*(uint16_t *)
1174 			(data + EAPOL_KEY_INFO_OFFSET));
1175 
1176 	mask = eapol_key_info & EAPOL_MASK;
1177 	switch (mask) {
1178 	case EAPOL_M1_BIT_MASK:
1179 		subtype = QDF_PROTO_EAPOL_M1;
1180 		break;
1181 	case EAPOL_M2_BIT_MASK:
1182 		subtype = QDF_PROTO_EAPOL_M2;
1183 		break;
1184 	case EAPOL_M3_BIT_MASK:
1185 		subtype = QDF_PROTO_EAPOL_M3;
1186 		break;
1187 	case EAPOL_M4_BIT_MASK:
1188 		subtype = QDF_PROTO_EAPOL_M4;
1189 		break;
1190 	default:
1191 		break;
1192 	}
1193 
1194 	return subtype;
1195 }
1196 
1197 /**
1198  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1199  *            of ARP packet.
1200  * @data: Pointer to ARP packet data buffer
1201  *
1202  * This func. returns the subtype of ARP packet.
1203  *
1204  * Return: subtype of the ARP packet.
1205  */
1206 enum qdf_proto_subtype
1207 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1208 {
1209 	uint16_t subtype;
1210 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1211 
1212 	subtype = (uint16_t)(*(uint16_t *)
1213 			(data + ARP_SUB_TYPE_OFFSET));
1214 
1215 	switch (QDF_SWAP_U16(subtype)) {
1216 	case ARP_REQUEST:
1217 		proto_subtype = QDF_PROTO_ARP_REQ;
1218 		break;
1219 	case ARP_RESPONSE:
1220 		proto_subtype = QDF_PROTO_ARP_RES;
1221 		break;
1222 	default:
1223 		break;
1224 	}
1225 
1226 	return proto_subtype;
1227 }
1228 
1229 /**
1230  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1231  *            of IPV4 ICMP packet.
1232  * @data: Pointer to IPV4 ICMP packet data buffer
1233  *
1234  * This func. returns the subtype of ICMP packet.
1235  *
1236  * Return: subtype of the ICMP packet.
1237  */
1238 enum qdf_proto_subtype
1239 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1240 {
1241 	uint8_t subtype;
1242 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1243 
1244 	subtype = (uint8_t)(*(uint8_t *)
1245 			(data + ICMP_SUBTYPE_OFFSET));
1246 
1247 	switch (subtype) {
1248 	case ICMP_REQUEST:
1249 		proto_subtype = QDF_PROTO_ICMP_REQ;
1250 		break;
1251 	case ICMP_RESPONSE:
1252 		proto_subtype = QDF_PROTO_ICMP_RES;
1253 		break;
1254 	default:
1255 		break;
1256 	}
1257 
1258 	return proto_subtype;
1259 }
1260 
1261 /**
1262  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1263  *            of IPV6 ICMPV6 packet.
1264  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1265  *
1266  * This func. returns the subtype of ICMPV6 packet.
1267  *
1268  * Return: subtype of the ICMPV6 packet.
1269  */
1270 enum qdf_proto_subtype
1271 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1272 {
1273 	uint8_t subtype;
1274 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1275 
1276 	subtype = (uint8_t)(*(uint8_t *)
1277 			(data + ICMPV6_SUBTYPE_OFFSET));
1278 
1279 	switch (subtype) {
1280 	case ICMPV6_REQUEST:
1281 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1282 		break;
1283 	case ICMPV6_RESPONSE:
1284 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1285 		break;
1286 	case ICMPV6_RS:
1287 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1288 		break;
1289 	case ICMPV6_RA:
1290 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1291 		break;
1292 	case ICMPV6_NS:
1293 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1294 		break;
1295 	case ICMPV6_NA:
1296 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1297 		break;
1298 	default:
1299 		break;
1300 	}
1301 
1302 	return proto_subtype;
1303 }
1304 
1305 /**
1306  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1307  *            of IPV4 packet.
1308  * @data: Pointer to IPV4 packet data buffer
1309  *
1310  * This func. returns the proto type of IPV4 packet.
1311  *
1312  * Return: proto type of IPV4 packet.
1313  */
1314 uint8_t
1315 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1316 {
1317 	uint8_t proto_type;
1318 
1319 	proto_type = (uint8_t)(*(uint8_t *)(data +
1320 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1321 	return proto_type;
1322 }
1323 
1324 /**
1325  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1326  *            of IPV6 packet.
1327  * @data: Pointer to IPV6 packet data buffer
1328  *
1329  * This func. returns the proto type of IPV6 packet.
1330  *
1331  * Return: proto type of IPV6 packet.
1332  */
1333 uint8_t
1334 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1335 {
1336 	uint8_t proto_type;
1337 
1338 	proto_type = (uint8_t)(*(uint8_t *)(data +
1339 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1340 	return proto_type;
1341 }
1342 
1343 /**
1344  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1345  * @data: Pointer to network data
1346  *
1347  * This api is for Tx packets.
1348  *
1349  * Return: true if packet is ipv4 packet
1350  *	   false otherwise
1351  */
1352 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1353 {
1354 	uint16_t ether_type;
1355 
1356 	ether_type = (uint16_t)(*(uint16_t *)(data +
1357 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1358 
1359 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1360 		return true;
1361 	else
1362 		return false;
1363 }
1364 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1365 
1366 /**
1367  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1368  * @data: Pointer to network data buffer
1369  *
1370  * This api is for ipv4 packet.
1371  *
1372  * Return: true if packet is DHCP packet
1373  *	   false otherwise
1374  */
1375 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1376 {
1377 	uint16_t sport;
1378 	uint16_t dport;
1379 
1380 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1381 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1382 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1383 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1384 					 sizeof(uint16_t)));
1385 
1386 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1387 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1388 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1389 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1390 		return true;
1391 	else
1392 		return false;
1393 }
1394 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1395 
1396 /**
1397  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1398  * @data: Pointer to network data buffer
1399  *
1400  * This api is for ipv4 packet.
1401  *
1402  * Return: true if packet is EAPOL packet
1403  *	   false otherwise.
1404  */
1405 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1406 {
1407 	uint16_t ether_type;
1408 
1409 	ether_type = (uint16_t)(*(uint16_t *)(data +
1410 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1411 
1412 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1413 		return true;
1414 	else
1415 		return false;
1416 }
1417 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1418 
1419 /**
1420  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1421  * @skb: Pointer to network buffer
1422  *
1423  * This api is for ipv4 packet.
1424  *
1425  * Return: true if packet is WAPI packet
1426  *	   false otherwise.
1427  */
1428 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1429 {
1430 	uint16_t ether_type;
1431 
1432 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1433 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1434 
1435 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1436 		return true;
1437 	else
1438 		return false;
1439 }
1440 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1441 
1442 /**
1443  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1444  * @skb: Pointer to network buffer
1445  *
1446  * This api is for ipv4 packet.
1447  *
1448  * Return: true if packet is tdls packet
1449  *	   false otherwise.
1450  */
1451 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1452 {
1453 	uint16_t ether_type;
1454 
1455 	ether_type = *(uint16_t *)(skb->data +
1456 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1457 
1458 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1459 		return true;
1460 	else
1461 		return false;
1462 }
1463 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1464 
1465 /**
1466  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1467  * @data: Pointer to network data buffer
1468  *
1469  * This api is for ipv4 packet.
1470  *
1471  * Return: true if packet is ARP packet
1472  *	   false otherwise.
1473  */
1474 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1475 {
1476 	uint16_t ether_type;
1477 
1478 	ether_type = (uint16_t)(*(uint16_t *)(data +
1479 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1480 
1481 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1482 		return true;
1483 	else
1484 		return false;
1485 }
1486 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1487 
1488 /**
1489  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1490  * @data: Pointer to network data buffer
1491  *
1492  * This api is for ipv4 packet.
1493  *
1494  * Return: true if packet is ARP request
1495  *	   false otherwise.
1496  */
1497 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1498 {
1499 	uint16_t op_code;
1500 
1501 	op_code = (uint16_t)(*(uint16_t *)(data +
1502 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1503 
1504 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1505 		return true;
1506 	return false;
1507 }
1508 
1509 /**
1510  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1511  * @data: Pointer to network data buffer
1512  *
1513  * This api is for ipv4 packet.
1514  *
1515  * Return: true if packet is ARP response
1516  *	   false otherwise.
1517  */
1518 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1519 {
1520 	uint16_t op_code;
1521 
1522 	op_code = (uint16_t)(*(uint16_t *)(data +
1523 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1524 
1525 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1526 		return true;
1527 	return false;
1528 }
1529 
1530 /**
1531  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1532  * @data: Pointer to network data buffer
1533  *
1534  * This api is for ipv4 packet.
1535  *
1536  * Return: ARP packet source IP value.
1537  */
1538 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1539 {
1540 	uint32_t src_ip;
1541 
1542 	src_ip = (uint32_t)(*(uint32_t *)(data +
1543 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1544 
1545 	return src_ip;
1546 }
1547 
1548 /**
1549  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1550  * @data: Pointer to network data buffer
1551  *
1552  * This api is for ipv4 packet.
1553  *
1554  * Return: ARP packet target IP value.
1555  */
1556 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1557 {
1558 	uint32_t tgt_ip;
1559 
1560 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1561 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1562 
1563 	return tgt_ip;
1564 }
1565 
1566 /**
1567  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1568  * @data: Pointer to network data buffer
1569  * @len: length to copy
1570  *
1571  * This api is for dns domain name
1572  *
1573  * Return: dns domain name.
1574  */
1575 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1576 {
1577 	uint8_t *domain_name;
1578 
1579 	domain_name = (uint8_t *)
1580 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1581 	return domain_name;
1582 }
1583 
1584 
1585 /**
1586  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1587  * @data: Pointer to network data buffer
1588  *
1589  * This api is for dns query packet.
1590  *
1591  * Return: true if packet is dns query packet.
1592  *	   false otherwise.
1593  */
1594 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1595 {
1596 	uint16_t op_code;
1597 	uint16_t tgt_port;
1598 
1599 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1600 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1601 	/* Standard DNS query always happen on Dest Port 53. */
1602 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1603 		op_code = (uint16_t)(*(uint16_t *)(data +
1604 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1605 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1606 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1607 			return true;
1608 	}
1609 	return false;
1610 }
1611 
1612 /**
1613  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1614  * @data: Pointer to network data buffer
1615  *
1616  * This api is for dns query response.
1617  *
1618  * Return: true if packet is dns response packet.
1619  *	   false otherwise.
1620  */
1621 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1622 {
1623 	uint16_t op_code;
1624 	uint16_t src_port;
1625 
1626 	src_port = (uint16_t)(*(uint16_t *)(data +
1627 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1628 	/* Standard DNS response always comes on Src Port 53. */
1629 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1630 		op_code = (uint16_t)(*(uint16_t *)(data +
1631 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1632 
1633 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1634 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1635 			return true;
1636 	}
1637 	return false;
1638 }
1639 
1640 /**
1641  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1642  * @data: Pointer to network data buffer
1643  *
1644  * This api is for tcp syn packet.
1645  *
1646  * Return: true if packet is tcp syn packet.
1647  *	   false otherwise.
1648  */
1649 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1650 {
1651 	uint8_t op_code;
1652 
1653 	op_code = (uint8_t)(*(uint8_t *)(data +
1654 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1655 
1656 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1657 		return true;
1658 	return false;
1659 }
1660 
1661 /**
1662  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1663  * @data: Pointer to network data buffer
1664  *
1665  * This api is for tcp syn ack packet.
1666  *
1667  * Return: true if packet is tcp syn ack packet.
1668  *	   false otherwise.
1669  */
1670 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1671 {
1672 	uint8_t op_code;
1673 
1674 	op_code = (uint8_t)(*(uint8_t *)(data +
1675 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1676 
1677 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1678 		return true;
1679 	return false;
1680 }
1681 
1682 /**
1683  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1684  * @data: Pointer to network data buffer
1685  *
1686  * This api is for tcp ack packet.
1687  *
1688  * Return: true if packet is tcp ack packet.
1689  *	   false otherwise.
1690  */
1691 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1692 {
1693 	uint8_t op_code;
1694 
1695 	op_code = (uint8_t)(*(uint8_t *)(data +
1696 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1697 
1698 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1699 		return true;
1700 	return false;
1701 }
1702 
1703 /**
1704  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1705  * @data: Pointer to network data buffer
1706  *
1707  * This api is for tcp packet.
1708  *
1709  * Return: tcp source port value.
1710  */
1711 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1712 {
1713 	uint16_t src_port;
1714 
1715 	src_port = (uint16_t)(*(uint16_t *)(data +
1716 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1717 
1718 	return src_port;
1719 }
1720 
1721 /**
1722  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1723  * @data: Pointer to network data buffer
1724  *
1725  * This api is for tcp packet.
1726  *
1727  * Return: tcp destination port value.
1728  */
1729 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1730 {
1731 	uint16_t tgt_port;
1732 
1733 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1734 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1735 
1736 	return tgt_port;
1737 }
1738 
1739 /**
1740  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1741  * @data: Pointer to network data buffer
1742  *
1743  * This api is for ipv4 req packet.
1744  *
1745  * Return: true if packet is icmpv4 request
1746  *	   false otherwise.
1747  */
1748 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1749 {
1750 	uint8_t op_code;
1751 
1752 	op_code = (uint8_t)(*(uint8_t *)(data +
1753 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1754 
1755 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1756 		return true;
1757 	return false;
1758 }
1759 
1760 /**
1761  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1762  * @data: Pointer to network data buffer
1763  *
1764  * This api is for ipv4 res packet.
1765  *
1766  * Return: true if packet is icmpv4 response
1767  *	   false otherwise.
1768  */
1769 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1770 {
1771 	uint8_t op_code;
1772 
1773 	op_code = (uint8_t)(*(uint8_t *)(data +
1774 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1775 
1776 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1777 		return true;
1778 	return false;
1779 }
1780 
1781 /**
1782  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1783  * @data: Pointer to network data buffer
1784  *
1785  * This api is for ipv4 packet.
1786  *
1787  * Return: icmpv4 packet source IP value.
1788  */
1789 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1790 {
1791 	uint32_t src_ip;
1792 
1793 	src_ip = (uint32_t)(*(uint32_t *)(data +
1794 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1795 
1796 	return src_ip;
1797 }
1798 
1799 /**
1800  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1801  * @data: Pointer to network data buffer
1802  *
1803  * This api is for ipv4 packet.
1804  *
1805  * Return: icmpv4 packet target IP value.
1806  */
1807 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1808 {
1809 	uint32_t tgt_ip;
1810 
1811 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1812 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1813 
1814 	return tgt_ip;
1815 }
1816 
1817 
1818 /**
1819  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1820  * @data: Pointer to IPV6 packet data buffer
1821  *
1822  * This func. checks whether it is a IPV6 packet or not.
1823  *
1824  * Return: TRUE if it is a IPV6 packet
1825  *         FALSE if not
1826  */
1827 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1828 {
1829 	uint16_t ether_type;
1830 
1831 	ether_type = (uint16_t)(*(uint16_t *)(data +
1832 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1833 
1834 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1835 		return true;
1836 	else
1837 		return false;
1838 }
1839 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1840 
1841 /**
1842  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1843  * @data: Pointer to network data buffer
1844  *
1845  * This api is for ipv6 packet.
1846  *
1847  * Return: true if packet is DHCP packet
1848  *	   false otherwise
1849  */
1850 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1851 {
1852 	uint16_t sport;
1853 	uint16_t dport;
1854 
1855 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1856 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1857 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1858 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1859 					sizeof(uint16_t));
1860 
1861 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1862 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1863 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1864 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1865 		return true;
1866 	else
1867 		return false;
1868 }
1869 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1870 
1871 /**
1872  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1873  * @data: Pointer to IPV4 packet data buffer
1874  *
1875  * This func. checks whether it is a IPV4 multicast packet or not.
1876  *
1877  * Return: TRUE if it is a IPV4 multicast packet
1878  *         FALSE if not
1879  */
1880 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1881 {
1882 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1883 		uint32_t *dst_addr =
1884 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1885 
1886 		/*
1887 		 * Check first word of the IPV4 address and if it is
1888 		 * equal to 0xE then it represents multicast IP.
1889 		 */
1890 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1891 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1892 			return true;
1893 		else
1894 			return false;
1895 	} else
1896 		return false;
1897 }
1898 
1899 /**
1900  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1901  * @data: Pointer to IPV6 packet data buffer
1902  *
1903  * This func. checks whether it is a IPV6 multicast packet or not.
1904  *
1905  * Return: TRUE if it is a IPV6 multicast packet
1906  *         FALSE if not
1907  */
1908 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1909 {
1910 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1911 		uint16_t *dst_addr;
1912 
1913 		dst_addr = (uint16_t *)
1914 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1915 
1916 		/*
1917 		 * Check first byte of the IP address and if it
1918 		 * 0xFF00 then it is a IPV6 mcast packet.
1919 		 */
1920 		if (*dst_addr ==
1921 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1922 			return true;
1923 		else
1924 			return false;
1925 	} else
1926 		return false;
1927 }
1928 
1929 /**
1930  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1931  * @data: Pointer to IPV4 ICMP packet data buffer
1932  *
1933  * This func. checks whether it is a ICMP packet or not.
1934  *
1935  * Return: TRUE if it is a ICMP packet
1936  *         FALSE if not
1937  */
1938 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1939 {
1940 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1941 		uint8_t pkt_type;
1942 
1943 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1944 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1945 
1946 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1947 			return true;
1948 		else
1949 			return false;
1950 	} else
1951 		return false;
1952 }
1953 
1954 /**
1955  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1956  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1957  *
1958  * This func. checks whether it is a ICMPV6 packet or not.
1959  *
1960  * Return: TRUE if it is a ICMPV6 packet
1961  *         FALSE if not
1962  */
1963 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1964 {
1965 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1966 		uint8_t pkt_type;
1967 
1968 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1969 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1970 
1971 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1972 			return true;
1973 		else
1974 			return false;
1975 	} else
1976 		return false;
1977 }
1978 
1979 /**
1980  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1981  * @data: Pointer to IPV4 UDP packet data buffer
1982  *
1983  * This func. checks whether it is a IPV4 UDP packet or not.
1984  *
1985  * Return: TRUE if it is a IPV4 UDP packet
1986  *         FALSE if not
1987  */
1988 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1989 {
1990 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1991 		uint8_t pkt_type;
1992 
1993 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1994 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1995 
1996 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1997 			return true;
1998 		else
1999 			return false;
2000 	} else
2001 		return false;
2002 }
2003 
2004 /**
2005  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2006  * @data: Pointer to IPV4 TCP packet data buffer
2007  *
2008  * This func. checks whether it is a IPV4 TCP packet or not.
2009  *
2010  * Return: TRUE if it is a IPV4 TCP packet
2011  *         FALSE if not
2012  */
2013 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2014 {
2015 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2016 		uint8_t pkt_type;
2017 
2018 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2019 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2020 
2021 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2022 			return true;
2023 		else
2024 			return false;
2025 	} else
2026 		return false;
2027 }
2028 
2029 /**
2030  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2031  * @data: Pointer to IPV6 UDP packet data buffer
2032  *
2033  * This func. checks whether it is a IPV6 UDP packet or not.
2034  *
2035  * Return: TRUE if it is a IPV6 UDP packet
2036  *         FALSE if not
2037  */
2038 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2039 {
2040 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2041 		uint8_t pkt_type;
2042 
2043 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2044 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2045 
2046 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2047 			return true;
2048 		else
2049 			return false;
2050 	} else
2051 		return false;
2052 }
2053 
2054 /**
2055  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2056  * @data: Pointer to IPV6 TCP packet data buffer
2057  *
2058  * This func. checks whether it is a IPV6 TCP packet or not.
2059  *
2060  * Return: TRUE if it is a IPV6 TCP packet
2061  *         FALSE if not
2062  */
2063 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2064 {
2065 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2066 		uint8_t pkt_type;
2067 
2068 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2069 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2070 
2071 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2072 			return true;
2073 		else
2074 			return false;
2075 	} else
2076 		return false;
2077 }
2078 
2079 /**
2080  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2081  * @nbuf - sk buff
2082  *
2083  * Return: true if packet is broadcast
2084  *	   false otherwise
2085  */
2086 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2087 {
2088 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2089 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2090 }
2091 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2092 
2093 #ifdef NBUF_MEMORY_DEBUG
2094 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2095 
2096 /**
2097  * struct qdf_nbuf_track_t - Network buffer track structure
2098  *
2099  * @p_next: Pointer to next
2100  * @net_buf: Pointer to network buffer
2101  * @func_name: Function name
2102  * @line_num: Line number
2103  * @size: Size
2104  */
2105 struct qdf_nbuf_track_t {
2106 	struct qdf_nbuf_track_t *p_next;
2107 	qdf_nbuf_t net_buf;
2108 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2109 	uint32_t line_num;
2110 	size_t size;
2111 };
2112 
2113 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2114 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2115 
2116 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2117 static struct kmem_cache *nbuf_tracking_cache;
2118 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2119 static spinlock_t qdf_net_buf_track_free_list_lock;
2120 static uint32_t qdf_net_buf_track_free_list_count;
2121 static uint32_t qdf_net_buf_track_used_list_count;
2122 static uint32_t qdf_net_buf_track_max_used;
2123 static uint32_t qdf_net_buf_track_max_free;
2124 static uint32_t qdf_net_buf_track_max_allocated;
2125 
2126 /**
2127  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2128  *
2129  * tracks the max number of network buffers that the wlan driver was tracking
2130  * at any one time.
2131  *
2132  * Return: none
2133  */
2134 static inline void update_max_used(void)
2135 {
2136 	int sum;
2137 
2138 	if (qdf_net_buf_track_max_used <
2139 	    qdf_net_buf_track_used_list_count)
2140 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2141 	sum = qdf_net_buf_track_free_list_count +
2142 		qdf_net_buf_track_used_list_count;
2143 	if (qdf_net_buf_track_max_allocated < sum)
2144 		qdf_net_buf_track_max_allocated = sum;
2145 }
2146 
2147 /**
2148  * update_max_free() - update qdf_net_buf_track_free_list_count
2149  *
2150  * tracks the max number tracking buffers kept in the freelist.
2151  *
2152  * Return: none
2153  */
2154 static inline void update_max_free(void)
2155 {
2156 	if (qdf_net_buf_track_max_free <
2157 	    qdf_net_buf_track_free_list_count)
2158 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2159 }
2160 
2161 /**
2162  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2163  *
2164  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2165  * This function also ads fexibility to adjust the allocation and freelist
2166  * scheems.
2167  *
2168  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2169  */
2170 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2171 {
2172 	int flags = GFP_KERNEL;
2173 	unsigned long irq_flag;
2174 	QDF_NBUF_TRACK *new_node = NULL;
2175 
2176 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2177 	qdf_net_buf_track_used_list_count++;
2178 	if (qdf_net_buf_track_free_list != NULL) {
2179 		new_node = qdf_net_buf_track_free_list;
2180 		qdf_net_buf_track_free_list =
2181 			qdf_net_buf_track_free_list->p_next;
2182 		qdf_net_buf_track_free_list_count--;
2183 	}
2184 	update_max_used();
2185 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2186 
2187 	if (new_node != NULL)
2188 		return new_node;
2189 
2190 	if (in_interrupt() || irqs_disabled() || in_atomic())
2191 		flags = GFP_ATOMIC;
2192 
2193 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2194 }
2195 
2196 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2197 #define FREEQ_POOLSIZE 2048
2198 
2199 /**
2200  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2201  *
2202  * Matches calls to qdf_nbuf_track_alloc.
2203  * Either frees the tracking cookie to kernel or an internal
2204  * freelist based on the size of the freelist.
2205  *
2206  * Return: none
2207  */
2208 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2209 {
2210 	unsigned long irq_flag;
2211 
2212 	if (!node)
2213 		return;
2214 
2215 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2216 	 * only shrink the freelist if it is bigger than twice the number of
2217 	 * nbufs in use. If the driver is stalling in a consistent bursty
2218 	 * fasion, this will keep 3/4 of thee allocations from the free list
2219 	 * while also allowing the system to recover memory as less frantic
2220 	 * traffic occurs.
2221 	 */
2222 
2223 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2224 
2225 	qdf_net_buf_track_used_list_count--;
2226 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2227 	   (qdf_net_buf_track_free_list_count >
2228 	    qdf_net_buf_track_used_list_count << 1)) {
2229 		kmem_cache_free(nbuf_tracking_cache, node);
2230 	} else {
2231 		node->p_next = qdf_net_buf_track_free_list;
2232 		qdf_net_buf_track_free_list = node;
2233 		qdf_net_buf_track_free_list_count++;
2234 	}
2235 	update_max_free();
2236 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2237 }
2238 
2239 /**
2240  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2241  *
2242  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2243  * the freelist first makes it performant for the first iperf udp burst
2244  * as well as steady state.
2245  *
2246  * Return: None
2247  */
2248 static void qdf_nbuf_track_prefill(void)
2249 {
2250 	int i;
2251 	QDF_NBUF_TRACK *node, *head;
2252 
2253 	/* prepopulate the freelist */
2254 	head = NULL;
2255 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2256 		node = qdf_nbuf_track_alloc();
2257 		if (node == NULL)
2258 			continue;
2259 		node->p_next = head;
2260 		head = node;
2261 	}
2262 	while (head) {
2263 		node = head->p_next;
2264 		qdf_nbuf_track_free(head);
2265 		head = node;
2266 	}
2267 
2268 	/* prefilled buffers should not count as used */
2269 	qdf_net_buf_track_max_used = 0;
2270 }
2271 
2272 /**
2273  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2274  *
2275  * This initializes the memory manager for the nbuf tracking cookies.  Because
2276  * these cookies are all the same size and only used in this feature, we can
2277  * use a kmem_cache to provide tracking as well as to speed up allocations.
2278  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2279  * features) a freelist is prepopulated here.
2280  *
2281  * Return: None
2282  */
2283 static void qdf_nbuf_track_memory_manager_create(void)
2284 {
2285 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2286 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2287 						sizeof(QDF_NBUF_TRACK),
2288 						0, 0, NULL);
2289 
2290 	qdf_nbuf_track_prefill();
2291 }
2292 
2293 /**
2294  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2295  *
2296  * Empty the freelist and print out usage statistics when it is no longer
2297  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2298  * any nbuf tracking cookies were leaked.
2299  *
2300  * Return: None
2301  */
2302 static void qdf_nbuf_track_memory_manager_destroy(void)
2303 {
2304 	QDF_NBUF_TRACK *node, *tmp;
2305 	unsigned long irq_flag;
2306 
2307 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2308 	node = qdf_net_buf_track_free_list;
2309 
2310 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2311 		qdf_print("%s: unexpectedly large max_used count %d",
2312 			  __func__, qdf_net_buf_track_max_used);
2313 
2314 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2315 		qdf_print("%s: %d unused trackers were allocated",
2316 			  __func__,
2317 			  qdf_net_buf_track_max_allocated -
2318 			  qdf_net_buf_track_max_used);
2319 
2320 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2321 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2322 		qdf_print("%s: check freelist shrinking functionality",
2323 			  __func__);
2324 
2325 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2326 		  "%s: %d residual freelist size",
2327 		  __func__, qdf_net_buf_track_free_list_count);
2328 
2329 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2330 		  "%s: %d max freelist size observed",
2331 		  __func__, qdf_net_buf_track_max_free);
2332 
2333 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2334 		  "%s: %d max buffers used observed",
2335 		  __func__, qdf_net_buf_track_max_used);
2336 
2337 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2338 		  "%s: %d max buffers allocated observed",
2339 		  __func__, qdf_net_buf_track_max_allocated);
2340 
2341 	while (node) {
2342 		tmp = node;
2343 		node = node->p_next;
2344 		kmem_cache_free(nbuf_tracking_cache, tmp);
2345 		qdf_net_buf_track_free_list_count--;
2346 	}
2347 
2348 	if (qdf_net_buf_track_free_list_count != 0)
2349 		qdf_info("%d unfreed tracking memory lost in freelist",
2350 			 qdf_net_buf_track_free_list_count);
2351 
2352 	if (qdf_net_buf_track_used_list_count != 0)
2353 		qdf_info("%d unfreed tracking memory still in use",
2354 			 qdf_net_buf_track_used_list_count);
2355 
2356 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2357 	kmem_cache_destroy(nbuf_tracking_cache);
2358 	qdf_net_buf_track_free_list = NULL;
2359 }
2360 
2361 /**
2362  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2363  *
2364  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2365  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2366  * WLAN driver module whose allocated SKB is freed by network stack are
2367  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2368  * reported as memory leak.
2369  *
2370  * Return: none
2371  */
2372 void qdf_net_buf_debug_init(void)
2373 {
2374 	uint32_t i;
2375 
2376 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2377 
2378 	qdf_nbuf_map_tracking_init();
2379 	qdf_nbuf_track_memory_manager_create();
2380 
2381 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2382 		gp_qdf_net_buf_track_tbl[i] = NULL;
2383 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2384 	}
2385 }
2386 qdf_export_symbol(qdf_net_buf_debug_init);
2387 
2388 /**
2389  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2390  *
2391  * Exit network buffer tracking debug functionality and log SKB memory leaks
2392  * As part of exiting the functionality, free the leaked memory and
2393  * cleanup the tracking buffers.
2394  *
2395  * Return: none
2396  */
2397 void qdf_net_buf_debug_exit(void)
2398 {
2399 	uint32_t i;
2400 	uint32_t count = 0;
2401 	unsigned long irq_flag;
2402 	QDF_NBUF_TRACK *p_node;
2403 	QDF_NBUF_TRACK *p_prev;
2404 
2405 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2406 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2407 		p_node = gp_qdf_net_buf_track_tbl[i];
2408 		while (p_node) {
2409 			p_prev = p_node;
2410 			p_node = p_node->p_next;
2411 			count++;
2412 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2413 				 p_prev->func_name, p_prev->line_num,
2414 				 p_prev->size, p_prev->net_buf);
2415 			qdf_nbuf_track_free(p_prev);
2416 		}
2417 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2418 	}
2419 
2420 	qdf_nbuf_track_memory_manager_destroy();
2421 	qdf_nbuf_map_tracking_deinit();
2422 
2423 #ifdef CONFIG_HALT_KMEMLEAK
2424 	if (count) {
2425 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2426 		QDF_BUG(0);
2427 	}
2428 #endif
2429 }
2430 qdf_export_symbol(qdf_net_buf_debug_exit);
2431 
2432 /**
2433  * qdf_net_buf_debug_hash() - hash network buffer pointer
2434  *
2435  * Return: hash value
2436  */
2437 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2438 {
2439 	uint32_t i;
2440 
2441 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2442 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2443 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2444 
2445 	return i;
2446 }
2447 
2448 /**
2449  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2450  *
2451  * Return: If skb is found in hash table then return pointer to network buffer
2452  *	else return %NULL
2453  */
2454 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2455 {
2456 	uint32_t i;
2457 	QDF_NBUF_TRACK *p_node;
2458 
2459 	i = qdf_net_buf_debug_hash(net_buf);
2460 	p_node = gp_qdf_net_buf_track_tbl[i];
2461 
2462 	while (p_node) {
2463 		if (p_node->net_buf == net_buf)
2464 			return p_node;
2465 		p_node = p_node->p_next;
2466 	}
2467 
2468 	return NULL;
2469 }
2470 
2471 /**
2472  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2473  *
2474  * Return: none
2475  */
2476 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2477 				const char *func_name, uint32_t line_num)
2478 {
2479 	uint32_t i;
2480 	unsigned long irq_flag;
2481 	QDF_NBUF_TRACK *p_node;
2482 	QDF_NBUF_TRACK *new_node;
2483 
2484 	new_node = qdf_nbuf_track_alloc();
2485 
2486 	i = qdf_net_buf_debug_hash(net_buf);
2487 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2488 
2489 	p_node = qdf_net_buf_debug_look_up(net_buf);
2490 
2491 	if (p_node) {
2492 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2493 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2494 			  net_buf, func_name, line_num);
2495 		qdf_nbuf_track_free(new_node);
2496 	} else {
2497 		p_node = new_node;
2498 		if (p_node) {
2499 			p_node->net_buf = net_buf;
2500 			qdf_str_lcopy(p_node->func_name, func_name,
2501 				      QDF_MEM_FUNC_NAME_SIZE);
2502 			p_node->line_num = line_num;
2503 			p_node->size = size;
2504 			qdf_mem_skb_inc(size);
2505 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2506 			gp_qdf_net_buf_track_tbl[i] = p_node;
2507 		} else
2508 			qdf_print(
2509 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2510 				  func_name, line_num, size);
2511 	}
2512 
2513 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2514 }
2515 qdf_export_symbol(qdf_net_buf_debug_add_node);
2516 
2517 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2518 				   uint32_t line_num)
2519 {
2520 	uint32_t i;
2521 	unsigned long irq_flag;
2522 	QDF_NBUF_TRACK *p_node;
2523 
2524 	i = qdf_net_buf_debug_hash(net_buf);
2525 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2526 
2527 	p_node = qdf_net_buf_debug_look_up(net_buf);
2528 
2529 	if (p_node) {
2530 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2531 			      QDF_MEM_FUNC_NAME_SIZE);
2532 		p_node->line_num = line_num;
2533 	}
2534 
2535 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2536 }
2537 
2538 qdf_export_symbol(qdf_net_buf_debug_update_node);
2539 
2540 /**
2541  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2542  *
2543  * Return: none
2544  */
2545 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2546 {
2547 	uint32_t i;
2548 	QDF_NBUF_TRACK *p_head;
2549 	QDF_NBUF_TRACK *p_node = NULL;
2550 	unsigned long irq_flag;
2551 	QDF_NBUF_TRACK *p_prev;
2552 
2553 	i = qdf_net_buf_debug_hash(net_buf);
2554 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2555 
2556 	p_head = gp_qdf_net_buf_track_tbl[i];
2557 
2558 	/* Unallocated SKB */
2559 	if (!p_head)
2560 		goto done;
2561 
2562 	p_node = p_head;
2563 	/* Found at head of the table */
2564 	if (p_head->net_buf == net_buf) {
2565 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2566 		goto done;
2567 	}
2568 
2569 	/* Search in collision list */
2570 	while (p_node) {
2571 		p_prev = p_node;
2572 		p_node = p_node->p_next;
2573 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2574 			p_prev->p_next = p_node->p_next;
2575 			break;
2576 		}
2577 	}
2578 
2579 done:
2580 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2581 
2582 	if (p_node) {
2583 		qdf_mem_skb_dec(p_node->size);
2584 		qdf_nbuf_track_free(p_node);
2585 	} else {
2586 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2587 			  net_buf);
2588 		QDF_BUG(0);
2589 	}
2590 }
2591 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2592 
2593 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2594 				   const char *func_name, uint32_t line_num)
2595 {
2596 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2597 
2598 	while (ext_list) {
2599 		/*
2600 		 * Take care to add if it is Jumbo packet connected using
2601 		 * frag_list
2602 		 */
2603 		qdf_nbuf_t next;
2604 
2605 		next = qdf_nbuf_queue_next(ext_list);
2606 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2607 		ext_list = next;
2608 	}
2609 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2610 }
2611 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2612 
2613 /**
2614  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2615  * @net_buf: Network buf holding head segment (single)
2616  *
2617  * WLAN driver module whose allocated SKB is freed by network stack are
2618  * suppose to call this API before returning SKB to network stack such
2619  * that the SKB is not reported as memory leak.
2620  *
2621  * Return: none
2622  */
2623 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2624 {
2625 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2626 
2627 	while (ext_list) {
2628 		/*
2629 		 * Take care to free if it is Jumbo packet connected using
2630 		 * frag_list
2631 		 */
2632 		qdf_nbuf_t next;
2633 
2634 		next = qdf_nbuf_queue_next(ext_list);
2635 
2636 		if (qdf_nbuf_get_users(ext_list) > 1) {
2637 			ext_list = next;
2638 			continue;
2639 		}
2640 
2641 		qdf_net_buf_debug_delete_node(ext_list);
2642 		ext_list = next;
2643 	}
2644 
2645 	if (qdf_nbuf_get_users(net_buf) > 1)
2646 		return;
2647 
2648 	qdf_net_buf_debug_delete_node(net_buf);
2649 }
2650 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2651 
2652 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2653 				int reserve, int align, int prio,
2654 				const char *func, uint32_t line)
2655 {
2656 	qdf_nbuf_t nbuf;
2657 
2658 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2659 
2660 	/* Store SKB in internal QDF tracking table */
2661 	if (qdf_likely(nbuf)) {
2662 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2663 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2664 	} else {
2665 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2666 	}
2667 
2668 	return nbuf;
2669 }
2670 qdf_export_symbol(qdf_nbuf_alloc_debug);
2671 
2672 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2673 {
2674 	if (qdf_unlikely(!nbuf))
2675 		return;
2676 
2677 	if (qdf_nbuf_get_users(nbuf) > 1)
2678 		goto free_buf;
2679 
2680 	/* Remove SKB from internal QDF tracking table */
2681 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2682 	qdf_net_buf_debug_delete_node(nbuf);
2683 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2684 
2685 free_buf:
2686 	__qdf_nbuf_free(nbuf);
2687 }
2688 qdf_export_symbol(qdf_nbuf_free_debug);
2689 
2690 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2691 {
2692 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2693 
2694 	if (qdf_unlikely(!cloned_buf))
2695 		return NULL;
2696 
2697 	/* Store SKB in internal QDF tracking table */
2698 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2699 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2700 
2701 	return cloned_buf;
2702 }
2703 qdf_export_symbol(qdf_nbuf_clone_debug);
2704 
2705 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2706 {
2707 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2708 
2709 	if (qdf_unlikely(!copied_buf))
2710 		return NULL;
2711 
2712 	/* Store SKB in internal QDF tracking table */
2713 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2714 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2715 
2716 	return copied_buf;
2717 }
2718 qdf_export_symbol(qdf_nbuf_copy_debug);
2719 
2720 #endif /* NBUF_MEMORY_DEBUG */
2721 
2722 #if defined(FEATURE_TSO)
2723 
2724 /**
2725  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2726  *
2727  * @ethproto: ethernet type of the msdu
2728  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2729  * @l2_len: L2 length for the msdu
2730  * @eit_hdr: pointer to EIT header
2731  * @eit_hdr_len: EIT header length for the msdu
2732  * @eit_hdr_dma_map_addr: dma addr for EIT header
2733  * @tcphdr: pointer to tcp header
2734  * @ipv4_csum_en: ipv4 checksum enable
2735  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2736  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2737  * @ip_id: IP id
2738  * @tcp_seq_num: TCP sequence number
2739  *
2740  * This structure holds the TSO common info that is common
2741  * across all the TCP segments of the jumbo packet.
2742  */
2743 struct qdf_tso_cmn_seg_info_t {
2744 	uint16_t ethproto;
2745 	uint16_t ip_tcp_hdr_len;
2746 	uint16_t l2_len;
2747 	uint8_t *eit_hdr;
2748 	uint32_t eit_hdr_len;
2749 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2750 	struct tcphdr *tcphdr;
2751 	uint16_t ipv4_csum_en;
2752 	uint16_t tcp_ipv4_csum_en;
2753 	uint16_t tcp_ipv6_csum_en;
2754 	uint16_t ip_id;
2755 	uint32_t tcp_seq_num;
2756 };
2757 
2758 /**
2759  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2760  * information
2761  * @osdev: qdf device handle
2762  * @skb: skb buffer
2763  * @tso_info: Parameters common to all segements
2764  *
2765  * Get the TSO information that is common across all the TCP
2766  * segments of the jumbo packet
2767  *
2768  * Return: 0 - success 1 - failure
2769  */
2770 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2771 			struct sk_buff *skb,
2772 			struct qdf_tso_cmn_seg_info_t *tso_info)
2773 {
2774 	/* Get ethernet type and ethernet header length */
2775 	tso_info->ethproto = vlan_get_protocol(skb);
2776 
2777 	/* Determine whether this is an IPv4 or IPv6 packet */
2778 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2779 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2780 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2781 
2782 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2783 		tso_info->ipv4_csum_en = 1;
2784 		tso_info->tcp_ipv4_csum_en = 1;
2785 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2786 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2787 				ipv4_hdr->protocol);
2788 			return 1;
2789 		}
2790 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2791 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2792 		tso_info->tcp_ipv6_csum_en = 1;
2793 	} else {
2794 		qdf_err("TSO: ethertype 0x%x is not supported!",
2795 			tso_info->ethproto);
2796 		return 1;
2797 	}
2798 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2799 	tso_info->tcphdr = tcp_hdr(skb);
2800 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2801 	/* get pointer to the ethernet + IP + TCP header and their length */
2802 	tso_info->eit_hdr = skb->data;
2803 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2804 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2805 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2806 							tso_info->eit_hdr,
2807 							tso_info->eit_hdr_len,
2808 							DMA_TO_DEVICE);
2809 	if (unlikely(dma_mapping_error(osdev->dev,
2810 				       tso_info->eit_hdr_dma_map_addr))) {
2811 		qdf_err("DMA mapping error!");
2812 		qdf_assert(0);
2813 		return 1;
2814 	}
2815 
2816 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2817 		/* inlcude IPv4 header length for IPV4 (total length) */
2818 		tso_info->ip_tcp_hdr_len =
2819 			tso_info->eit_hdr_len - tso_info->l2_len;
2820 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2821 		/* exclude IPv6 header length for IPv6 (payload length) */
2822 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2823 	}
2824 	/*
2825 	 * The length of the payload (application layer data) is added to
2826 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2827 	 * descriptor.
2828 	 */
2829 
2830 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2831 		tso_info->tcp_seq_num,
2832 		tso_info->eit_hdr_len,
2833 		tso_info->l2_len,
2834 		skb->len);
2835 	return 0;
2836 }
2837 
2838 
2839 /**
2840  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2841  *
2842  * @curr_seg: Segment whose contents are initialized
2843  * @tso_cmn_info: Parameters common to all segements
2844  *
2845  * Return: None
2846  */
2847 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2848 				struct qdf_tso_seg_elem_t *curr_seg,
2849 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2850 {
2851 	/* Initialize the flags to 0 */
2852 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2853 
2854 	/*
2855 	 * The following fields remain the same across all segments of
2856 	 * a jumbo packet
2857 	 */
2858 	curr_seg->seg.tso_flags.tso_enable = 1;
2859 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2860 		tso_cmn_info->ipv4_csum_en;
2861 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2862 		tso_cmn_info->tcp_ipv6_csum_en;
2863 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2864 		tso_cmn_info->tcp_ipv4_csum_en;
2865 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2866 
2867 	/* The following fields change for the segments */
2868 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2869 	tso_cmn_info->ip_id++;
2870 
2871 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2872 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2873 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2874 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2875 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2876 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2877 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2878 
2879 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2880 
2881 	/*
2882 	 * First fragment for each segment always contains the ethernet,
2883 	 * IP and TCP header
2884 	 */
2885 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2886 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2887 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2888 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2889 
2890 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2891 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2892 		   tso_cmn_info->eit_hdr_len,
2893 		   curr_seg->seg.tso_flags.tcp_seq_num,
2894 		   curr_seg->seg.total_len);
2895 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2896 }
2897 
2898 /**
2899  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2900  * into segments
2901  * @nbuf: network buffer to be segmented
2902  * @tso_info: This is the output. The information about the
2903  *           TSO segments will be populated within this.
2904  *
2905  * This function fragments a TCP jumbo packet into smaller
2906  * segments to be transmitted by the driver. It chains the TSO
2907  * segments created into a list.
2908  *
2909  * Return: number of TSO segments
2910  */
2911 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2912 		struct qdf_tso_info_t *tso_info)
2913 {
2914 	/* common across all segments */
2915 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2916 	/* segment specific */
2917 	void *tso_frag_vaddr;
2918 	qdf_dma_addr_t tso_frag_paddr = 0;
2919 	uint32_t num_seg = 0;
2920 	struct qdf_tso_seg_elem_t *curr_seg;
2921 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2922 	struct skb_frag_struct *frag = NULL;
2923 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2924 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2925 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2926 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2927 	int j = 0; /* skb fragment index */
2928 
2929 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2930 	total_num_seg = tso_info->tso_num_seg_list;
2931 	curr_seg = tso_info->tso_seg_list;
2932 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2933 
2934 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2935 						skb, &tso_cmn_info))) {
2936 		qdf_warn("TSO: error getting common segment info");
2937 		return 0;
2938 	}
2939 
2940 	/* length of the first chunk of data in the skb */
2941 	skb_frag_len = skb_headlen(skb);
2942 
2943 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2944 	/* update the remaining skb fragment length and TSO segment length */
2945 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2946 	skb_proc -= tso_cmn_info.eit_hdr_len;
2947 
2948 	/* get the address to the next tso fragment */
2949 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2950 	/* get the length of the next tso fragment */
2951 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2952 
2953 	if (tso_frag_len != 0) {
2954 		tso_frag_paddr = dma_map_single(osdev->dev,
2955 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2956 	}
2957 
2958 	if (unlikely(dma_mapping_error(osdev->dev,
2959 					tso_frag_paddr))) {
2960 		qdf_err("DMA mapping error!");
2961 		qdf_assert(0);
2962 		return 0;
2963 	}
2964 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2965 		__LINE__, skb_frag_len, tso_frag_len);
2966 	num_seg = tso_info->num_segs;
2967 	tso_info->num_segs = 0;
2968 	tso_info->is_tso = 1;
2969 
2970 	while (num_seg && curr_seg) {
2971 		int i = 1; /* tso fragment index */
2972 		uint8_t more_tso_frags = 1;
2973 
2974 		curr_seg->seg.num_frags = 0;
2975 		tso_info->num_segs++;
2976 		total_num_seg->num_seg.tso_cmn_num_seg++;
2977 
2978 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2979 						 &tso_cmn_info);
2980 
2981 		if (unlikely(skb_proc == 0))
2982 			return tso_info->num_segs;
2983 
2984 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2985 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2986 		/* frag len is added to ip_len in while loop below*/
2987 
2988 		curr_seg->seg.num_frags++;
2989 
2990 		while (more_tso_frags) {
2991 			if (tso_frag_len != 0) {
2992 				curr_seg->seg.tso_frags[i].vaddr =
2993 					tso_frag_vaddr;
2994 				curr_seg->seg.tso_frags[i].length =
2995 					tso_frag_len;
2996 				curr_seg->seg.total_len += tso_frag_len;
2997 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2998 				curr_seg->seg.num_frags++;
2999 				skb_proc = skb_proc - tso_frag_len;
3000 
3001 				/* increment the TCP sequence number */
3002 
3003 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3004 				curr_seg->seg.tso_frags[i].paddr =
3005 					tso_frag_paddr;
3006 			}
3007 
3008 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3009 					__func__, __LINE__,
3010 					i,
3011 					tso_frag_len,
3012 					curr_seg->seg.total_len,
3013 					curr_seg->seg.tso_frags[i].vaddr);
3014 
3015 			/* if there is no more data left in the skb */
3016 			if (!skb_proc)
3017 				return tso_info->num_segs;
3018 
3019 			/* get the next payload fragment information */
3020 			/* check if there are more fragments in this segment */
3021 			if (tso_frag_len < tso_seg_size) {
3022 				more_tso_frags = 1;
3023 				if (tso_frag_len != 0) {
3024 					tso_seg_size = tso_seg_size -
3025 						tso_frag_len;
3026 					i++;
3027 					if (curr_seg->seg.num_frags ==
3028 								FRAG_NUM_MAX) {
3029 						more_tso_frags = 0;
3030 						/*
3031 						 * reset i and the tso
3032 						 * payload size
3033 						 */
3034 						i = 1;
3035 						tso_seg_size =
3036 							skb_shinfo(skb)->
3037 								gso_size;
3038 					}
3039 				}
3040 			} else {
3041 				more_tso_frags = 0;
3042 				/* reset i and the tso payload size */
3043 				i = 1;
3044 				tso_seg_size = skb_shinfo(skb)->gso_size;
3045 			}
3046 
3047 			/* if the next fragment is contiguous */
3048 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3049 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3050 				skb_frag_len = skb_frag_len - tso_frag_len;
3051 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3052 
3053 			} else { /* the next fragment is not contiguous */
3054 				if (skb_shinfo(skb)->nr_frags == 0) {
3055 					qdf_info("TSO: nr_frags == 0!");
3056 					qdf_assert(0);
3057 					return 0;
3058 				}
3059 				if (j >= skb_shinfo(skb)->nr_frags) {
3060 					qdf_info("TSO: nr_frags %d j %d",
3061 						 skb_shinfo(skb)->nr_frags, j);
3062 					qdf_assert(0);
3063 					return 0;
3064 				}
3065 				frag = &skb_shinfo(skb)->frags[j];
3066 				skb_frag_len = skb_frag_size(frag);
3067 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3068 				tso_frag_vaddr = skb_frag_address_safe(frag);
3069 				j++;
3070 			}
3071 
3072 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3073 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3074 				tso_seg_size);
3075 
3076 			if (!(tso_frag_vaddr)) {
3077 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3078 						__func__);
3079 				return 0;
3080 			}
3081 
3082 			tso_frag_paddr =
3083 					 dma_map_single(osdev->dev,
3084 						 tso_frag_vaddr,
3085 						 tso_frag_len,
3086 						 DMA_TO_DEVICE);
3087 			if (unlikely(dma_mapping_error(osdev->dev,
3088 							tso_frag_paddr))) {
3089 				qdf_err("DMA mapping error!");
3090 				qdf_assert(0);
3091 				return 0;
3092 			}
3093 		}
3094 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3095 				curr_seg->seg.tso_flags.tcp_seq_num);
3096 		num_seg--;
3097 		/* if TCP FIN flag was set, set it in the last segment */
3098 		if (!num_seg)
3099 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3100 
3101 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3102 		curr_seg = curr_seg->next;
3103 	}
3104 	return tso_info->num_segs;
3105 }
3106 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3107 
3108 /**
3109  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3110  *
3111  * @osdev: qdf device handle
3112  * @tso_seg: TSO segment element to be unmapped
3113  * @is_last_seg: whether this is last tso seg or not
3114  *
3115  * Return: none
3116  */
3117 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3118 			  struct qdf_tso_seg_elem_t *tso_seg,
3119 			  bool is_last_seg)
3120 {
3121 	uint32_t num_frags = 0;
3122 
3123 	if (tso_seg->seg.num_frags > 0)
3124 		num_frags = tso_seg->seg.num_frags - 1;
3125 
3126 	/*Num of frags in a tso seg cannot be less than 2 */
3127 	if (num_frags < 1) {
3128 		/*
3129 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3130 		 * this may happen when qdf_nbuf_get_tso_info failed,
3131 		 * do dma unmap for the 0th frag in this seg.
3132 		 */
3133 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3134 			goto last_seg_free_first_frag;
3135 
3136 		qdf_assert(0);
3137 		qdf_err("ERROR: num of frags in a tso segment is %d",
3138 			(num_frags + 1));
3139 		return;
3140 	}
3141 
3142 	while (num_frags) {
3143 		/*Do dma unmap the tso seg except the 0th frag */
3144 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3145 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3146 				num_frags);
3147 			qdf_assert(0);
3148 			return;
3149 		}
3150 		dma_unmap_single(osdev->dev,
3151 				 tso_seg->seg.tso_frags[num_frags].paddr,
3152 				 tso_seg->seg.tso_frags[num_frags].length,
3153 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3154 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3155 		num_frags--;
3156 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3157 	}
3158 
3159 last_seg_free_first_frag:
3160 	if (is_last_seg) {
3161 		/*Do dma unmap for the tso seg 0th frag */
3162 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3163 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3164 			qdf_assert(0);
3165 			return;
3166 		}
3167 		dma_unmap_single(osdev->dev,
3168 				 tso_seg->seg.tso_frags[0].paddr,
3169 				 tso_seg->seg.tso_frags[0].length,
3170 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3171 		tso_seg->seg.tso_frags[0].paddr = 0;
3172 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3173 	}
3174 }
3175 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3176 
3177 /**
3178  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3179  * into segments
3180  * @nbuf:   network buffer to be segmented
3181  * @tso_info:  This is the output. The information about the
3182  *      TSO segments will be populated within this.
3183  *
3184  * This function fragments a TCP jumbo packet into smaller
3185  * segments to be transmitted by the driver. It chains the TSO
3186  * segments created into a list.
3187  *
3188  * Return: 0 - success, 1 - failure
3189  */
3190 #ifndef BUILD_X86
3191 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3192 {
3193 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3194 	uint32_t remainder, num_segs = 0;
3195 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3196 	uint8_t frags_per_tso = 0;
3197 	uint32_t skb_frag_len = 0;
3198 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3199 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3200 	struct skb_frag_struct *frag = NULL;
3201 	int j = 0;
3202 	uint32_t temp_num_seg = 0;
3203 
3204 	/* length of the first chunk of data in the skb minus eit header*/
3205 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3206 
3207 	/* Calculate num of segs for skb's first chunk of data*/
3208 	remainder = skb_frag_len % tso_seg_size;
3209 	num_segs = skb_frag_len / tso_seg_size;
3210 	/**
3211 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3212 	 * In that case, one more tso seg is required to accommodate
3213 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3214 	 * then remaining data will be accomodated while doing the calculation
3215 	 * for nr_frags data. Hence, frags_per_tso++.
3216 	 */
3217 	if (remainder) {
3218 		if (!skb_nr_frags)
3219 			num_segs++;
3220 		else
3221 			frags_per_tso++;
3222 	}
3223 
3224 	while (skb_nr_frags) {
3225 		if (j >= skb_shinfo(skb)->nr_frags) {
3226 			qdf_info("TSO: nr_frags %d j %d",
3227 				 skb_shinfo(skb)->nr_frags, j);
3228 			qdf_assert(0);
3229 			return 0;
3230 		}
3231 		/**
3232 		 * Calculate the number of tso seg for nr_frags data:
3233 		 * Get the length of each frag in skb_frag_len, add to
3234 		 * remainder.Get the number of segments by dividing it to
3235 		 * tso_seg_size and calculate the new remainder.
3236 		 * Decrement the nr_frags value and keep
3237 		 * looping all the skb_fragments.
3238 		 */
3239 		frag = &skb_shinfo(skb)->frags[j];
3240 		skb_frag_len = skb_frag_size(frag);
3241 		temp_num_seg = num_segs;
3242 		remainder += skb_frag_len;
3243 		num_segs += remainder / tso_seg_size;
3244 		remainder = remainder % tso_seg_size;
3245 		skb_nr_frags--;
3246 		if (remainder) {
3247 			if (num_segs > temp_num_seg)
3248 				frags_per_tso = 0;
3249 			/**
3250 			 * increment the tso per frags whenever remainder is
3251 			 * positive. If frags_per_tso reaches the (max-1),
3252 			 * [First frags always have EIT header, therefore max-1]
3253 			 * increment the num_segs as no more data can be
3254 			 * accomodated in the curr tso seg. Reset the remainder
3255 			 * and frags per tso and keep looping.
3256 			 */
3257 			frags_per_tso++;
3258 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3259 				num_segs++;
3260 				frags_per_tso = 0;
3261 				remainder = 0;
3262 			}
3263 			/**
3264 			 * If this is the last skb frag and still remainder is
3265 			 * non-zero(frags_per_tso is not reached to the max-1)
3266 			 * then increment the num_segs to take care of the
3267 			 * remaining length.
3268 			 */
3269 			if (!skb_nr_frags && remainder) {
3270 				num_segs++;
3271 				frags_per_tso = 0;
3272 			}
3273 		} else {
3274 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3275 			frags_per_tso = 0;
3276 		}
3277 		j++;
3278 	}
3279 
3280 	return num_segs;
3281 }
3282 #else
3283 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3284 {
3285 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3286 	struct skb_frag_struct *frag = NULL;
3287 
3288 	/*
3289 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3290 	 * region which cannot be accessed by Target
3291 	 */
3292 	if (virt_to_phys(skb->data) < 0x50000040) {
3293 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3294 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3295 				virt_to_phys(skb->data));
3296 		goto fail;
3297 
3298 	}
3299 
3300 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3301 		frag = &skb_shinfo(skb)->frags[i];
3302 
3303 		if (!frag)
3304 			goto fail;
3305 
3306 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3307 			goto fail;
3308 	}
3309 
3310 
3311 	gso_size = skb_shinfo(skb)->gso_size;
3312 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3313 			+ tcp_hdrlen(skb));
3314 	while (tmp_len) {
3315 		num_segs++;
3316 		if (tmp_len > gso_size)
3317 			tmp_len -= gso_size;
3318 		else
3319 			break;
3320 	}
3321 
3322 	return num_segs;
3323 
3324 	/*
3325 	 * Do not free this frame, just do socket level accounting
3326 	 * so that this is not reused.
3327 	 */
3328 fail:
3329 	if (skb->sk)
3330 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3331 
3332 	return 0;
3333 }
3334 #endif
3335 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3336 
3337 #endif /* FEATURE_TSO */
3338 
3339 /**
3340  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3341  *
3342  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3343  *
3344  * Return: N/A
3345  */
3346 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3347 			  uint32_t *lo, uint32_t *hi)
3348 {
3349 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3350 		*lo = lower_32_bits(dmaaddr);
3351 		*hi = upper_32_bits(dmaaddr);
3352 	} else {
3353 		*lo = dmaaddr;
3354 		*hi = 0;
3355 	}
3356 }
3357 
3358 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3359 
3360 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3361 {
3362 	qdf_nbuf_users_inc(&skb->users);
3363 	return skb;
3364 }
3365 qdf_export_symbol(__qdf_nbuf_inc_users);
3366 
3367 int __qdf_nbuf_get_users(struct sk_buff *skb)
3368 {
3369 	return qdf_nbuf_users_read(&skb->users);
3370 }
3371 qdf_export_symbol(__qdf_nbuf_get_users);
3372 
3373 /**
3374  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3375  * @skb: sk_buff handle
3376  *
3377  * Return: none
3378  */
3379 
3380 void __qdf_nbuf_ref(struct sk_buff *skb)
3381 {
3382 	skb_get(skb);
3383 }
3384 qdf_export_symbol(__qdf_nbuf_ref);
3385 
3386 /**
3387  * __qdf_nbuf_shared() - Check whether the buffer is shared
3388  *  @skb: sk_buff buffer
3389  *
3390  *  Return: true if more than one person has a reference to this buffer.
3391  */
3392 int __qdf_nbuf_shared(struct sk_buff *skb)
3393 {
3394 	return skb_shared(skb);
3395 }
3396 qdf_export_symbol(__qdf_nbuf_shared);
3397 
3398 /**
3399  * __qdf_nbuf_dmamap_create() - create a DMA map.
3400  * @osdev: qdf device handle
3401  * @dmap: dma map handle
3402  *
3403  * This can later be used to map networking buffers. They :
3404  * - need space in adf_drv's software descriptor
3405  * - are typically created during adf_drv_create
3406  * - need to be created before any API(qdf_nbuf_map) that uses them
3407  *
3408  * Return: QDF STATUS
3409  */
3410 QDF_STATUS
3411 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3412 {
3413 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3414 	/*
3415 	 * driver can tell its SG capablity, it must be handled.
3416 	 * Bounce buffers if they are there
3417 	 */
3418 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3419 	if (!(*dmap))
3420 		error = QDF_STATUS_E_NOMEM;
3421 
3422 	return error;
3423 }
3424 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3425 /**
3426  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3427  * @osdev: qdf device handle
3428  * @dmap: dma map handle
3429  *
3430  * Return: none
3431  */
3432 void
3433 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3434 {
3435 	kfree(dmap);
3436 }
3437 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3438 
3439 /**
3440  * __qdf_nbuf_map_nbytes_single() - map nbytes
3441  * @osdev: os device
3442  * @buf: buffer
3443  * @dir: direction
3444  * @nbytes: number of bytes
3445  *
3446  * Return: QDF_STATUS
3447  */
3448 #ifdef A_SIMOS_DEVHOST
3449 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3450 		qdf_device_t osdev, struct sk_buff *buf,
3451 		 qdf_dma_dir_t dir, int nbytes)
3452 {
3453 	qdf_dma_addr_t paddr;
3454 
3455 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3456 	return QDF_STATUS_SUCCESS;
3457 }
3458 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3459 #else
3460 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3461 		qdf_device_t osdev, struct sk_buff *buf,
3462 		 qdf_dma_dir_t dir, int nbytes)
3463 {
3464 	qdf_dma_addr_t paddr;
3465 
3466 	/* assume that the OS only provides a single fragment */
3467 	QDF_NBUF_CB_PADDR(buf) = paddr =
3468 		dma_map_single(osdev->dev, buf->data,
3469 			nbytes, __qdf_dma_dir_to_os(dir));
3470 	return dma_mapping_error(osdev->dev, paddr) ?
3471 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3472 }
3473 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3474 #endif
3475 /**
3476  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3477  * @osdev: os device
3478  * @buf: buffer
3479  * @dir: direction
3480  * @nbytes: number of bytes
3481  *
3482  * Return: none
3483  */
3484 #if defined(A_SIMOS_DEVHOST)
3485 void
3486 __qdf_nbuf_unmap_nbytes_single(
3487 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3488 {
3489 }
3490 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3491 
3492 #else
3493 void
3494 __qdf_nbuf_unmap_nbytes_single(
3495 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3496 {
3497 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3498 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3499 		return;
3500 	}
3501 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3502 			nbytes, __qdf_dma_dir_to_os(dir));
3503 }
3504 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3505 #endif
3506 /**
3507  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3508  * @osdev: os device
3509  * @skb: skb handle
3510  * @dir: dma direction
3511  * @nbytes: number of bytes to be mapped
3512  *
3513  * Return: QDF_STATUS
3514  */
3515 #ifdef QDF_OS_DEBUG
3516 QDF_STATUS
3517 __qdf_nbuf_map_nbytes(
3518 	qdf_device_t osdev,
3519 	struct sk_buff *skb,
3520 	qdf_dma_dir_t dir,
3521 	int nbytes)
3522 {
3523 	struct skb_shared_info  *sh = skb_shinfo(skb);
3524 
3525 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3526 
3527 	/*
3528 	 * Assume there's only a single fragment.
3529 	 * To support multiple fragments, it would be necessary to change
3530 	 * adf_nbuf_t to be a separate object that stores meta-info
3531 	 * (including the bus address for each fragment) and a pointer
3532 	 * to the underlying sk_buff.
3533 	 */
3534 	qdf_assert(sh->nr_frags == 0);
3535 
3536 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3537 }
3538 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3539 #else
3540 QDF_STATUS
3541 __qdf_nbuf_map_nbytes(
3542 	qdf_device_t osdev,
3543 	struct sk_buff *skb,
3544 	qdf_dma_dir_t dir,
3545 	int nbytes)
3546 {
3547 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3548 }
3549 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3550 #endif
3551 /**
3552  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3553  * @osdev: OS device
3554  * @skb: skb handle
3555  * @dir: direction
3556  * @nbytes: number of bytes
3557  *
3558  * Return: none
3559  */
3560 void
3561 __qdf_nbuf_unmap_nbytes(
3562 	qdf_device_t osdev,
3563 	struct sk_buff *skb,
3564 	qdf_dma_dir_t dir,
3565 	int nbytes)
3566 {
3567 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3568 
3569 	/*
3570 	 * Assume there's a single fragment.
3571 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3572 	 */
3573 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3574 }
3575 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3576 
3577 /**
3578  * __qdf_nbuf_dma_map_info() - return the dma map info
3579  * @bmap: dma map
3580  * @sg: dma map info
3581  *
3582  * Return: none
3583  */
3584 void
3585 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3586 {
3587 	qdf_assert(bmap->mapped);
3588 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3589 
3590 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3591 			sizeof(struct __qdf_segment));
3592 	sg->nsegs = bmap->nsegs;
3593 }
3594 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3595 /**
3596  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3597  *			specified by the index
3598  * @skb: sk buff
3599  * @sg: scatter/gather list of all the frags
3600  *
3601  * Return: none
3602  */
3603 #if defined(__QDF_SUPPORT_FRAG_MEM)
3604 void
3605 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3606 {
3607 	qdf_assert(skb != NULL);
3608 	sg->sg_segs[0].vaddr = skb->data;
3609 	sg->sg_segs[0].len   = skb->len;
3610 	sg->nsegs            = 1;
3611 
3612 	for (int i = 1; i <= sh->nr_frags; i++) {
3613 		skb_frag_t    *f        = &sh->frags[i - 1];
3614 
3615 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3616 			f->page_offset);
3617 		sg->sg_segs[i].len      = f->size;
3618 
3619 		qdf_assert(i < QDF_MAX_SGLIST);
3620 	}
3621 	sg->nsegs += i;
3622 
3623 }
3624 qdf_export_symbol(__qdf_nbuf_frag_info);
3625 #else
3626 #ifdef QDF_OS_DEBUG
3627 void
3628 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3629 {
3630 
3631 	struct skb_shared_info  *sh = skb_shinfo(skb);
3632 
3633 	qdf_assert(skb != NULL);
3634 	sg->sg_segs[0].vaddr = skb->data;
3635 	sg->sg_segs[0].len   = skb->len;
3636 	sg->nsegs            = 1;
3637 
3638 	qdf_assert(sh->nr_frags == 0);
3639 }
3640 qdf_export_symbol(__qdf_nbuf_frag_info);
3641 #else
3642 void
3643 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3644 {
3645 	sg->sg_segs[0].vaddr = skb->data;
3646 	sg->sg_segs[0].len   = skb->len;
3647 	sg->nsegs            = 1;
3648 }
3649 qdf_export_symbol(__qdf_nbuf_frag_info);
3650 #endif
3651 #endif
3652 /**
3653  * __qdf_nbuf_get_frag_size() - get frag size
3654  * @nbuf: sk buffer
3655  * @cur_frag: current frag
3656  *
3657  * Return: frag size
3658  */
3659 uint32_t
3660 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3661 {
3662 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3663 	const skb_frag_t *frag = sh->frags + cur_frag;
3664 
3665 	return skb_frag_size(frag);
3666 }
3667 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3668 
3669 /**
3670  * __qdf_nbuf_frag_map() - dma map frag
3671  * @osdev: os device
3672  * @nbuf: sk buff
3673  * @offset: offset
3674  * @dir: direction
3675  * @cur_frag: current fragment
3676  *
3677  * Return: QDF status
3678  */
3679 #ifdef A_SIMOS_DEVHOST
3680 QDF_STATUS __qdf_nbuf_frag_map(
3681 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3682 	int offset, qdf_dma_dir_t dir, int cur_frag)
3683 {
3684 	int32_t paddr, frag_len;
3685 
3686 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3687 	return QDF_STATUS_SUCCESS;
3688 }
3689 qdf_export_symbol(__qdf_nbuf_frag_map);
3690 #else
3691 QDF_STATUS __qdf_nbuf_frag_map(
3692 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3693 	int offset, qdf_dma_dir_t dir, int cur_frag)
3694 {
3695 	dma_addr_t paddr, frag_len;
3696 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3697 	const skb_frag_t *frag = sh->frags + cur_frag;
3698 
3699 	frag_len = skb_frag_size(frag);
3700 
3701 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3702 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3703 					__qdf_dma_dir_to_os(dir));
3704 	return dma_mapping_error(osdev->dev, paddr) ?
3705 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3706 }
3707 qdf_export_symbol(__qdf_nbuf_frag_map);
3708 #endif
3709 /**
3710  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3711  * @dmap: dma map
3712  * @cb: callback
3713  * @arg: argument
3714  *
3715  * Return: none
3716  */
3717 void
3718 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3719 {
3720 	return;
3721 }
3722 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3723 
3724 
3725 /**
3726  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3727  * @osdev: os device
3728  * @buf: sk buff
3729  * @dir: direction
3730  *
3731  * Return: none
3732  */
3733 #if defined(A_SIMOS_DEVHOST)
3734 static void __qdf_nbuf_sync_single_for_cpu(
3735 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3736 {
3737 	return;
3738 }
3739 #else
3740 static void __qdf_nbuf_sync_single_for_cpu(
3741 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3742 {
3743 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3744 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3745 		return;
3746 	}
3747 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3748 		skb_end_offset(buf) - skb_headroom(buf),
3749 		__qdf_dma_dir_to_os(dir));
3750 }
3751 #endif
3752 /**
3753  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3754  * @osdev: os device
3755  * @skb: sk buff
3756  * @dir: direction
3757  *
3758  * Return: none
3759  */
3760 void
3761 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3762 	struct sk_buff *skb, qdf_dma_dir_t dir)
3763 {
3764 	qdf_assert(
3765 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3766 
3767 	/*
3768 	 * Assume there's a single fragment.
3769 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3770 	 */
3771 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3772 }
3773 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3774 
3775 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3776 /**
3777  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3778  * @rx_status: Pointer to rx_status.
3779  * @rtap_buf: Buf to which VHT info has to be updated.
3780  * @rtap_len: Current length of radiotap buffer
3781  *
3782  * Return: Length of radiotap after VHT flags updated.
3783  */
3784 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3785 					struct mon_rx_status *rx_status,
3786 					int8_t *rtap_buf,
3787 					uint32_t rtap_len)
3788 {
3789 	uint16_t vht_flags = 0;
3790 
3791 	rtap_len = qdf_align(rtap_len, 2);
3792 
3793 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3794 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3795 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3796 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3797 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3798 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3799 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3800 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3801 	rtap_len += 2;
3802 
3803 	rtap_buf[rtap_len] |=
3804 		(rx_status->is_stbc ?
3805 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3806 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3807 		(rx_status->ldpc ?
3808 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3809 		(rx_status->beamformed ?
3810 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3811 	rtap_len += 1;
3812 	switch (rx_status->vht_flag_values2) {
3813 	case IEEE80211_RADIOTAP_VHT_BW_20:
3814 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3815 		break;
3816 	case IEEE80211_RADIOTAP_VHT_BW_40:
3817 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3818 		break;
3819 	case IEEE80211_RADIOTAP_VHT_BW_80:
3820 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3821 		break;
3822 	case IEEE80211_RADIOTAP_VHT_BW_160:
3823 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3824 		break;
3825 	}
3826 	rtap_len += 1;
3827 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3828 	rtap_len += 1;
3829 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3830 	rtap_len += 1;
3831 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3832 	rtap_len += 1;
3833 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3834 	rtap_len += 1;
3835 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3836 	rtap_len += 1;
3837 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3838 	rtap_len += 1;
3839 	put_unaligned_le16(rx_status->vht_flag_values6,
3840 			   &rtap_buf[rtap_len]);
3841 	rtap_len += 2;
3842 
3843 	return rtap_len;
3844 }
3845 
3846 /**
3847  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3848  * @rx_status: Pointer to rx_status.
3849  * @rtap_buf: buffer to which radiotap has to be updated
3850  * @rtap_len: radiotap length
3851  *
3852  * API update high-efficiency (11ax) fields in the radiotap header
3853  *
3854  * Return: length of rtap_len updated.
3855  */
3856 static unsigned int
3857 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3858 				     int8_t *rtap_buf, uint32_t rtap_len)
3859 {
3860 	/*
3861 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3862 	 * Enable all "known" HE radiotap flags for now
3863 	 */
3864 	rtap_len = qdf_align(rtap_len, 2);
3865 
3866 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3867 	rtap_len += 2;
3868 
3869 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3870 	rtap_len += 2;
3871 
3872 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3873 	rtap_len += 2;
3874 
3875 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3876 	rtap_len += 2;
3877 
3878 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3879 	rtap_len += 2;
3880 
3881 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3882 	rtap_len += 2;
3883 	qdf_debug("he data %x %x %x %x %x %x",
3884 		  rx_status->he_data1,
3885 		  rx_status->he_data2, rx_status->he_data3,
3886 		  rx_status->he_data4, rx_status->he_data5,
3887 		  rx_status->he_data6);
3888 	return rtap_len;
3889 }
3890 
3891 
3892 /**
3893  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3894  * @rx_status: Pointer to rx_status.
3895  * @rtap_buf: buffer to which radiotap has to be updated
3896  * @rtap_len: radiotap length
3897  *
3898  * API update HE-MU fields in the radiotap header
3899  *
3900  * Return: length of rtap_len updated.
3901  */
3902 static unsigned int
3903 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3904 				     int8_t *rtap_buf, uint32_t rtap_len)
3905 {
3906 	rtap_len = qdf_align(rtap_len, 2);
3907 
3908 	/*
3909 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3910 	 * Enable all "known" he-mu radiotap flags for now
3911 	 */
3912 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3913 	rtap_len += 2;
3914 
3915 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3916 	rtap_len += 2;
3917 
3918 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3919 	rtap_len += 1;
3920 
3921 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3922 	rtap_len += 1;
3923 
3924 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3925 	rtap_len += 1;
3926 
3927 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3928 	rtap_len += 1;
3929 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3930 		  rx_status->he_flags1,
3931 		  rx_status->he_flags2, rx_status->he_RU[0],
3932 		  rx_status->he_RU[1], rx_status->he_RU[2],
3933 		  rx_status->he_RU[3]);
3934 
3935 	return rtap_len;
3936 }
3937 
3938 /**
3939  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3940  * @rx_status: Pointer to rx_status.
3941  * @rtap_buf: buffer to which radiotap has to be updated
3942  * @rtap_len: radiotap length
3943  *
3944  * API update he-mu-other fields in the radiotap header
3945  *
3946  * Return: length of rtap_len updated.
3947  */
3948 static unsigned int
3949 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3950 				     int8_t *rtap_buf, uint32_t rtap_len)
3951 {
3952 	rtap_len = qdf_align(rtap_len, 2);
3953 
3954 	/*
3955 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3956 	 * Enable all "known" he-mu-other radiotap flags for now
3957 	 */
3958 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3959 	rtap_len += 2;
3960 
3961 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3962 	rtap_len += 2;
3963 
3964 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3965 	rtap_len += 1;
3966 
3967 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3968 	rtap_len += 1;
3969 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3970 		  rx_status->he_per_user_1,
3971 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3972 		  rx_status->he_per_user_known);
3973 	return rtap_len;
3974 }
3975 
3976 
3977 /**
3978  * This is the length for radiotap, combined length
3979  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3980  * cannot be more than available headroom_sz.
3981  * increase this when we add more radiotap elements.
3982  * Number after '+' indicates maximum possible increase due to alignment
3983  */
3984 
3985 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3986 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3987 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3988 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3989 #define RADIOTAP_FIXED_HEADER_LEN 17
3990 #define RADIOTAP_HT_FLAGS_LEN 3
3991 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3992 #define RADIOTAP_VENDOR_NS_LEN \
3993 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3994 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3995 				RADIOTAP_FIXED_HEADER_LEN + \
3996 				RADIOTAP_HT_FLAGS_LEN + \
3997 				RADIOTAP_VHT_FLAGS_LEN + \
3998 				RADIOTAP_AMPDU_STATUS_LEN + \
3999 				RADIOTAP_HE_FLAGS_LEN + \
4000 				RADIOTAP_HE_MU_FLAGS_LEN + \
4001 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4002 				RADIOTAP_VENDOR_NS_LEN)
4003 
4004 #define IEEE80211_RADIOTAP_HE 23
4005 #define IEEE80211_RADIOTAP_HE_MU	24
4006 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4007 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4008 
4009 /**
4010  * radiotap_num_to_freq() - Get frequency from chan number
4011  * @chan_num - Input channel number
4012  *
4013  * Return - Channel frequency in Mhz
4014  */
4015 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
4016 {
4017 	if (chan_num == CHANNEL_NUM_14)
4018 		return CHANNEL_FREQ_2484;
4019 	if (chan_num < CHANNEL_NUM_14)
4020 		return CHANNEL_FREQ_2407 +
4021 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4022 
4023 	if (chan_num < CHANNEL_NUM_27)
4024 		return CHANNEL_FREQ_2512 +
4025 			((chan_num - CHANNEL_NUM_15) *
4026 			 FREQ_MULTIPLIER_CONST_20MHZ);
4027 
4028 	if (chan_num > CHANNEL_NUM_182 &&
4029 			chan_num < CHANNEL_NUM_197)
4030 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
4031 			CHANNEL_FREQ_4000);
4032 
4033 	return CHANNEL_FREQ_5000 +
4034 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4035 }
4036 
4037 /**
4038  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4039  * @rx_status: Pointer to rx_status.
4040  * @rtap_buf: Buf to which AMPDU info has to be updated.
4041  * @rtap_len: Current length of radiotap buffer
4042  *
4043  * Return: Length of radiotap after AMPDU flags updated.
4044  */
4045 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4046 					struct mon_rx_status *rx_status,
4047 					uint8_t *rtap_buf,
4048 					uint32_t rtap_len)
4049 {
4050 	/*
4051 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4052 	 * First 32 bits of AMPDU represents the reference number
4053 	 */
4054 
4055 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4056 	uint16_t ampdu_flags = 0;
4057 	uint16_t ampdu_reserved_flags = 0;
4058 
4059 	rtap_len = qdf_align(rtap_len, 4);
4060 
4061 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4062 	rtap_len += 4;
4063 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4064 	rtap_len += 2;
4065 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4066 	rtap_len += 2;
4067 
4068 	return rtap_len;
4069 }
4070 
4071 /**
4072  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4073  * @rx_status: Pointer to rx_status.
4074  * @nbuf:      nbuf pointer to which radiotap has to be updated
4075  * @headroom_sz: Available headroom size.
4076  *
4077  * Return: length of rtap_len updated.
4078  */
4079 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4080 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4081 {
4082 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4083 	struct ieee80211_radiotap_header *rthdr =
4084 		(struct ieee80211_radiotap_header *)rtap_buf;
4085 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4086 	uint32_t rtap_len = rtap_hdr_len;
4087 	uint8_t length = rtap_len;
4088 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4089 
4090 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4091 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4092 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4093 	rtap_len += 8;
4094 
4095 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4096 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4097 
4098 	if (rx_status->rs_fcs_err)
4099 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4100 
4101 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4102 	rtap_len += 1;
4103 
4104 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4105 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4106 	    !rx_status->he_flags) {
4107 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4108 		rtap_buf[rtap_len] = rx_status->rate;
4109 	} else
4110 		rtap_buf[rtap_len] = 0;
4111 	rtap_len += 1;
4112 
4113 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4114 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4115 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4116 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4117 	rtap_len += 2;
4118 	/* Channel flags. */
4119 	if (rx_status->chan_num > CHANNEL_NUM_35)
4120 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4121 	else
4122 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4123 	if (rx_status->cck_flag)
4124 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4125 	if (rx_status->ofdm_flag)
4126 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4127 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4128 	rtap_len += 2;
4129 
4130 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4131 	 *					(dBm)
4132 	 */
4133 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4134 	/*
4135 	 * rssi_comb is int dB, need to convert it to dBm.
4136 	 * normalize value to noise floor of -96 dBm
4137 	 */
4138 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4139 	rtap_len += 1;
4140 
4141 	/* RX signal noise floor */
4142 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4143 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4144 	rtap_len += 1;
4145 
4146 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4147 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4148 	rtap_buf[rtap_len] = rx_status->nr_ant;
4149 	rtap_len += 1;
4150 
4151 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4152 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4153 		return 0;
4154 	}
4155 
4156 	if (rx_status->ht_flags) {
4157 		length = rtap_len;
4158 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4159 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4160 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4161 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4162 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4163 		rtap_len += 1;
4164 
4165 		if (rx_status->sgi)
4166 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4167 		if (rx_status->bw)
4168 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4169 		else
4170 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4171 		rtap_len += 1;
4172 
4173 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4174 		rtap_len += 1;
4175 
4176 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4177 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4178 			return 0;
4179 		}
4180 	}
4181 
4182 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4183 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4184 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4185 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4186 								rtap_buf,
4187 								rtap_len);
4188 	}
4189 
4190 	if (rx_status->vht_flags) {
4191 		length = rtap_len;
4192 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4193 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4194 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4195 								rtap_buf,
4196 								rtap_len);
4197 
4198 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4199 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4200 			return 0;
4201 		}
4202 	}
4203 
4204 	if (rx_status->he_flags) {
4205 		length = rtap_len;
4206 		/* IEEE80211_RADIOTAP_HE */
4207 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4208 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4209 								rtap_buf,
4210 								rtap_len);
4211 
4212 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4213 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4214 			return 0;
4215 		}
4216 	}
4217 
4218 	if (rx_status->he_mu_flags) {
4219 		length = rtap_len;
4220 		/* IEEE80211_RADIOTAP_HE-MU */
4221 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4222 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4223 								rtap_buf,
4224 								rtap_len);
4225 
4226 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4227 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4228 			return 0;
4229 		}
4230 	}
4231 
4232 	if (rx_status->he_mu_other_flags) {
4233 		length = rtap_len;
4234 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4235 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4236 		rtap_len =
4237 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4238 								rtap_buf,
4239 								rtap_len);
4240 
4241 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4242 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4243 			return 0;
4244 		}
4245 	}
4246 
4247 	rtap_len = qdf_align(rtap_len, 2);
4248 	/*
4249 	 * Radiotap Vendor Namespace
4250 	 */
4251 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4252 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4253 					(rtap_buf + rtap_len);
4254 	/*
4255 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4256 	 */
4257 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4258 	/*
4259 	 * Name space selector = 0
4260 	 * We only will have one namespace for now
4261 	 */
4262 	radiotap_vendor_ns_ath->hdr.selector = 0;
4263 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4264 					sizeof(*radiotap_vendor_ns_ath) -
4265 					sizeof(radiotap_vendor_ns_ath->hdr));
4266 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4267 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4268 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4269 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4270 				cpu_to_le32(rx_status->ppdu_timestamp);
4271 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4272 
4273 	rthdr->it_len = cpu_to_le16(rtap_len);
4274 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4275 
4276 	if (headroom_sz < rtap_len) {
4277 		qdf_err("ERROR: not enough space to update radiotap");
4278 		return 0;
4279 	}
4280 	qdf_nbuf_push_head(nbuf, rtap_len);
4281 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4282 	return rtap_len;
4283 }
4284 #else
4285 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4286 					struct mon_rx_status *rx_status,
4287 					int8_t *rtap_buf,
4288 					uint32_t rtap_len)
4289 {
4290 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4291 	return 0;
4292 }
4293 
4294 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4295 				      int8_t *rtap_buf, uint32_t rtap_len)
4296 {
4297 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4298 	return 0;
4299 }
4300 
4301 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4302 					struct mon_rx_status *rx_status,
4303 					uint8_t *rtap_buf,
4304 					uint32_t rtap_len)
4305 {
4306 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4307 	return 0;
4308 }
4309 
4310 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4311 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4312 {
4313 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4314 	return 0;
4315 }
4316 #endif
4317 qdf_export_symbol(qdf_nbuf_update_radiotap);
4318 
4319 /**
4320  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4321  * @cb_func_ptr: function pointer to the nbuf free callback
4322  *
4323  * This function registers a callback function for nbuf free.
4324  *
4325  * Return: none
4326  */
4327 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4328 {
4329 	nbuf_free_cb = cb_func_ptr;
4330 }
4331 
4332 /**
4333  * qdf_nbuf_classify_pkt() - classify packet
4334  * @skb - sk buff
4335  *
4336  * Return: none
4337  */
4338 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4339 {
4340 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4341 
4342 	/* check destination mac address is broadcast/multicast */
4343 	if (is_broadcast_ether_addr((uint8_t *)eh))
4344 		QDF_NBUF_CB_SET_BCAST(skb);
4345 	else if (is_multicast_ether_addr((uint8_t *)eh))
4346 		QDF_NBUF_CB_SET_MCAST(skb);
4347 
4348 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4349 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4350 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4351 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4352 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4353 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4354 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4355 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4356 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4357 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4358 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4359 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4360 }
4361 qdf_export_symbol(qdf_nbuf_classify_pkt);
4362 
4363 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4364 {
4365 	qdf_nbuf_users_set(&nbuf->users, 1);
4366 	nbuf->data = nbuf->head + NET_SKB_PAD;
4367 	skb_reset_tail_pointer(nbuf);
4368 }
4369 qdf_export_symbol(__qdf_nbuf_init);
4370 
4371 #ifdef WLAN_FEATURE_FASTPATH
4372 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4373 {
4374 	qdf_nbuf_users_set(&nbuf->users, 1);
4375 	nbuf->data = nbuf->head + NET_SKB_PAD;
4376 	skb_reset_tail_pointer(nbuf);
4377 }
4378 qdf_export_symbol(qdf_nbuf_init_fast);
4379 #endif /* WLAN_FEATURE_FASTPATH */
4380 
4381 
4382 #ifdef QDF_NBUF_GLOBAL_COUNT
4383 /**
4384  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4385  *
4386  * Return void
4387  */
4388 void __qdf_nbuf_mod_init(void)
4389 {
4390 	qdf_atomic_init(&nbuf_count);
4391 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4392 }
4393 
4394 /**
4395  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4396  *
4397  * Return void
4398  */
4399 void __qdf_nbuf_mod_exit(void)
4400 {
4401 }
4402 #endif
4403