xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 #include "qdf_str.h"
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* channel number to freq conversion */
78 #define CHANNEL_NUM_14 14
79 #define CHANNEL_NUM_15 15
80 #define CHANNEL_NUM_27 27
81 #define CHANNEL_NUM_35 35
82 #define CHANNEL_NUM_182 182
83 #define CHANNEL_NUM_197 197
84 #define CHANNEL_FREQ_2484 2484
85 #define CHANNEL_FREQ_2407 2407
86 #define CHANNEL_FREQ_2512 2512
87 #define CHANNEL_FREQ_5000 5000
88 #define CHANNEL_FREQ_4000 4000
89 #define FREQ_MULTIPLIER_CONST_5MHZ 5
90 #define FREQ_MULTIPLIER_CONST_20MHZ 20
91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
93 #define RADIOTAP_CCK_CHANNEL 0x0020
94 #define RADIOTAP_OFDM_CHANNEL 0x0040
95 
96 #ifdef CONFIG_MCL
97 #include <qdf_mc_timer.h>
98 
99 struct qdf_track_timer {
100 	qdf_mc_timer_t track_timer;
101 	qdf_atomic_t alloc_fail_cnt;
102 };
103 
104 static struct qdf_track_timer alloc_track_timer;
105 
106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
108 #endif
109 
110 /* Packet Counter */
111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
113 #ifdef QDF_NBUF_GLOBAL_COUNT
114 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
115 static qdf_atomic_t nbuf_count;
116 #endif
117 
118 /**
119  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
120  *
121  * Return: none
122  */
123 void qdf_nbuf_tx_desc_count_display(void)
124 {
125 	qdf_debug("Current Snapshot of the Driver:");
126 	qdf_debug("Data Packets:");
127 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
129 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
138 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
146 	qdf_debug("Mgmt Packets:");
147 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
161 }
162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
163 
164 /**
165  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
166  * @packet_type   : packet type either mgmt/data
167  * @current_state : layer at which the packet currently present
168  *
169  * Return: none
170  */
171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
172 			uint8_t current_state)
173 {
174 	switch (packet_type) {
175 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
176 		nbuf_tx_mgmt[current_state]++;
177 		break;
178 	case QDF_NBUF_TX_PKT_DATA_TRACK:
179 		nbuf_tx_data[current_state]++;
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
186 
187 /**
188  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_clear(void)
193 {
194 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
195 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
196 }
197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
198 
199 /**
200  * qdf_nbuf_set_state() - Updates the packet state
201  * @nbuf:            network buffer
202  * @current_state :  layer at which the packet currently is
203  *
204  * This function updates the packet state to the layer at which the packet
205  * currently is
206  *
207  * Return: none
208  */
209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
210 {
211 	/*
212 	 * Only Mgmt, Data Packets are tracked. WMI messages
213 	 * such as scan commands are not tracked
214 	 */
215 	uint8_t packet_type;
216 
217 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
218 
219 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
220 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
221 		return;
222 	}
223 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
224 	qdf_nbuf_tx_desc_count_update(packet_type,
225 					current_state);
226 }
227 qdf_export_symbol(qdf_nbuf_set_state);
228 
229 #ifdef CONFIG_MCL
230 /**
231  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
232  *
233  * This function starts the alloc fail replenish timer.
234  *
235  * Return: void
236  */
237 static void __qdf_nbuf_start_replenish_timer(void)
238 {
239 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
240 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
241 	    QDF_TIMER_STATE_RUNNING)
242 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
243 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
244 }
245 
246 /**
247  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
248  *
249  * This function stops the alloc fail replenish timer.
250  *
251  * Return: void
252  */
253 static void __qdf_nbuf_stop_replenish_timer(void)
254 {
255 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
256 		return;
257 
258 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
259 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
260 	    QDF_TIMER_STATE_RUNNING)
261 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
262 }
263 
264 /**
265  * qdf_replenish_expire_handler - Replenish expire handler
266  *
267  * This function triggers when the alloc fail replenish timer expires.
268  *
269  * Return: void
270  */
271 static void qdf_replenish_expire_handler(void *arg)
272 {
273 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
274 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
275 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
276 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
277 
278 		/* Error handling here */
279 	}
280 }
281 
282 /**
283  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
284  *
285  * This function initializes the nbuf alloc fail replenish timer.
286  *
287  * Return: void
288  */
289 void __qdf_nbuf_init_replenish_timer(void)
290 {
291 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
292 			  qdf_replenish_expire_handler, NULL);
293 }
294 
295 /**
296  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
297  *
298  * This function deinitializes the nbuf alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 void __qdf_nbuf_deinit_replenish_timer(void)
303 {
304 	__qdf_nbuf_stop_replenish_timer();
305 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
306 }
307 #else
308 
309 static inline void __qdf_nbuf_start_replenish_timer(void) {}
310 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
311 #endif
312 
313 /* globals do not need to be initialized to NULL/0 */
314 qdf_nbuf_trace_update_t qdf_trace_update_cb;
315 qdf_nbuf_free_t nbuf_free_cb;
316 
317 #ifdef QDF_NBUF_GLOBAL_COUNT
318 
319 /**
320  * __qdf_nbuf_count_get() - get nbuf global count
321  *
322  * Return: nbuf global count
323  */
324 int __qdf_nbuf_count_get(void)
325 {
326 	return qdf_atomic_read(&nbuf_count);
327 }
328 qdf_export_symbol(__qdf_nbuf_count_get);
329 
330 /**
331  * __qdf_nbuf_count_inc() - increment nbuf global count
332  *
333  * @buf: sk buff
334  *
335  * Return: void
336  */
337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
338 {
339 	qdf_atomic_inc(&nbuf_count);
340 }
341 qdf_export_symbol(__qdf_nbuf_count_inc);
342 
343 /**
344  * __qdf_nbuf_count_dec() - decrement nbuf global count
345  *
346  * @buf: sk buff
347  *
348  * Return: void
349  */
350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
351 {
352 	qdf_atomic_dec(&nbuf_count);
353 }
354 qdf_export_symbol(__qdf_nbuf_count_dec);
355 #endif
356 
357 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
358 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
359 				 int align, int prio, const char *func,
360 				 uint32_t line)
361 {
362 	struct sk_buff *skb;
363 	unsigned long offset;
364 	uint32_t lowmem_alloc_tries = 0;
365 
366 	if (align)
367 		size += (align - 1);
368 
369 realloc:
370 	skb = dev_alloc_skb(size);
371 
372 	if (skb)
373 		goto skb_alloc;
374 
375 	skb = pld_nbuf_pre_alloc(size);
376 
377 	if (!skb) {
378 		qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
379 			     size, func, line);
380 		return NULL;
381 	}
382 
383 skb_alloc:
384 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
385 	 * Though we are trying to reserve low memory upfront to prevent this,
386 	 * we sometimes see SKBs allocated from low memory.
387 	 */
388 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
389 		lowmem_alloc_tries++;
390 		if (lowmem_alloc_tries > 100) {
391 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
392 				     size, func, line);
393 			return NULL;
394 		} else {
395 			/* Not freeing to make sure it
396 			 * will not get allocated again
397 			 */
398 			goto realloc;
399 		}
400 	}
401 	memset(skb->cb, 0x0, sizeof(skb->cb));
402 
403 	/*
404 	 * The default is for netbuf fragments to be interpreted
405 	 * as wordstreams rather than bytestreams.
406 	 */
407 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
408 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
409 
410 	/*
411 	 * XXX:how about we reserve first then align
412 	 * Align & make sure that the tail & data are adjusted properly
413 	 */
414 
415 	if (align) {
416 		offset = ((unsigned long)skb->data) % align;
417 		if (offset)
418 			skb_reserve(skb, align - offset);
419 	}
420 
421 	/*
422 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
423 	 * pointer
424 	 */
425 	skb_reserve(skb, reserve);
426 	qdf_nbuf_count_inc(skb);
427 
428 	return skb;
429 }
430 #else
431 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
432 				 int align, int prio, const char *func,
433 				 uint32_t line)
434 {
435 	struct sk_buff *skb;
436 	unsigned long offset;
437 	int flags = GFP_KERNEL;
438 
439 	if (align)
440 		size += (align - 1);
441 
442 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
443 		flags = GFP_ATOMIC;
444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
445 		/*
446 		 * Observed that kcompactd burns out CPU to make order-3 page.
447 		 *__netdev_alloc_skb has 4k page fallback option just in case of
448 		 * failing high order page allocation so we don't need to be
449 		 * hard. Make kcompactd rest in piece.
450 		 */
451 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
452 #endif
453 	}
454 
455 	skb = __netdev_alloc_skb(NULL, size, flags);
456 
457 	if (skb)
458 		goto skb_alloc;
459 
460 	skb = pld_nbuf_pre_alloc(size);
461 
462 	if (!skb) {
463 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
464 				size, func, line);
465 		__qdf_nbuf_start_replenish_timer();
466 		return NULL;
467 	} else {
468 		__qdf_nbuf_stop_replenish_timer();
469 	}
470 
471 skb_alloc:
472 	memset(skb->cb, 0x0, sizeof(skb->cb));
473 
474 	/*
475 	 * The default is for netbuf fragments to be interpreted
476 	 * as wordstreams rather than bytestreams.
477 	 */
478 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
479 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
480 
481 	/*
482 	 * XXX:how about we reserve first then align
483 	 * Align & make sure that the tail & data are adjusted properly
484 	 */
485 
486 	if (align) {
487 		offset = ((unsigned long)skb->data) % align;
488 		if (offset)
489 			skb_reserve(skb, align - offset);
490 	}
491 
492 	/*
493 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
494 	 * pointer
495 	 */
496 	skb_reserve(skb, reserve);
497 	qdf_nbuf_count_inc(skb);
498 
499 	return skb;
500 }
501 #endif
502 qdf_export_symbol(__qdf_nbuf_alloc);
503 
504 /**
505  * __qdf_nbuf_free() - free the nbuf its interrupt safe
506  * @skb: Pointer to network buffer
507  *
508  * Return: none
509  */
510 
511 #ifdef CONFIG_MCL
512 void __qdf_nbuf_free(struct sk_buff *skb)
513 {
514 	if (pld_nbuf_pre_alloc_free(skb))
515 		return;
516 
517 	qdf_nbuf_count_dec(skb);
518 	if (nbuf_free_cb)
519 		nbuf_free_cb(skb);
520 	else
521 		dev_kfree_skb_any(skb);
522 }
523 #else
524 void __qdf_nbuf_free(struct sk_buff *skb)
525 {
526 	if (pld_nbuf_pre_alloc_free(skb))
527 		return;
528 
529 	qdf_nbuf_count_dec(skb);
530 	dev_kfree_skb_any(skb);
531 }
532 #endif
533 
534 qdf_export_symbol(__qdf_nbuf_free);
535 
536 #ifdef NBUF_MEMORY_DEBUG
537 enum qdf_nbuf_event_type {
538 	QDF_NBUF_ALLOC,
539 	QDF_NBUF_FREE,
540 	QDF_NBUF_MAP,
541 	QDF_NBUF_UNMAP,
542 };
543 
544 struct qdf_nbuf_event {
545 	qdf_nbuf_t nbuf;
546 	char file[QDF_MEM_FILE_NAME_SIZE];
547 	uint32_t line;
548 	enum qdf_nbuf_event_type type;
549 	uint64_t timestamp;
550 };
551 
552 #define QDF_NBUF_HISTORY_SIZE 4096
553 static qdf_atomic_t qdf_nbuf_history_index;
554 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
555 
556 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
557 {
558 	int32_t next = qdf_atomic_inc_return(index);
559 
560 	if (next == size)
561 		qdf_atomic_sub(size, index);
562 
563 	return next % size;
564 }
565 
566 static void
567 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
568 		     enum qdf_nbuf_event_type type)
569 {
570 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
571 						   QDF_NBUF_HISTORY_SIZE);
572 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
573 
574 	event->nbuf = nbuf;
575 	qdf_str_lcopy(event->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
576 	event->line = line;
577 	event->type = type;
578 	event->timestamp = qdf_get_log_timestamp();
579 }
580 #endif /* NBUF_MEMORY_DEBUG */
581 
582 #ifdef NBUF_MAP_UNMAP_DEBUG
583 struct qdf_nbuf_map_metadata {
584 	struct hlist_node node;
585 	qdf_nbuf_t nbuf;
586 	char file[QDF_MEM_FILE_NAME_SIZE];
587 	uint32_t line;
588 };
589 
590 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
591 			 sizeof(struct qdf_nbuf_map_metadata), 0);
592 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
593 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
594 static qdf_spinlock_t qdf_nbuf_map_lock;
595 
596 static void qdf_nbuf_map_tracking_init(void)
597 {
598 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
599 	hash_init(qdf_nbuf_map_ht);
600 	qdf_spinlock_create(&qdf_nbuf_map_lock);
601 }
602 
603 void qdf_nbuf_map_check_for_leaks(void)
604 {
605 	struct qdf_nbuf_map_metadata *meta;
606 	int bucket;
607 	uint32_t count = 0;
608 	bool is_empty;
609 
610 	qdf_flex_mem_release(&qdf_nbuf_map_pool);
611 
612 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
613 	is_empty = hash_empty(qdf_nbuf_map_ht);
614 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
615 
616 	if (is_empty)
617 		return;
618 
619 	qdf_err("Nbuf map without unmap events detected!");
620 	qdf_err("------------------------------------------------------------");
621 
622 	/* Hold the lock for the entire iteration for safe list/meta access. We
623 	 * are explicitly preferring the chance to watchdog on the print, over
624 	 * the posibility of invalid list/memory access. Since we are going to
625 	 * panic anyway, the worst case is loading up the crash dump to find out
626 	 * what was in the hash table.
627 	 */
628 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
629 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
630 		count++;
631 		qdf_err("0x%pk @ %s:%u",
632 			meta->nbuf, meta->file, meta->line);
633 	}
634 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
635 
636 	panic("%u fatal nbuf map without unmap events detected!", count);
637 }
638 
639 static void qdf_nbuf_map_tracking_deinit(void)
640 {
641 	qdf_nbuf_map_check_for_leaks();
642 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
643 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
644 }
645 
646 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
647 {
648 	struct qdf_nbuf_map_metadata *meta;
649 
650 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
651 		if (meta->nbuf == nbuf)
652 			return meta;
653 	}
654 
655 	return NULL;
656 }
657 
658 static QDF_STATUS
659 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
660 {
661 	struct qdf_nbuf_map_metadata *meta;
662 
663 	QDF_BUG(nbuf);
664 	if (!nbuf) {
665 		qdf_err("Cannot map null nbuf");
666 		return QDF_STATUS_E_INVAL;
667 	}
668 
669 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
670 	meta = qdf_nbuf_meta_get(nbuf);
671 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
672 	if (meta)
673 		QDF_DEBUG_PANIC(
674 			"Double nbuf map detected @ %s:%u; last map from %s:%u",
675 			kbasename(file), line, meta->file, meta->line);
676 
677 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
678 	if (!meta) {
679 		qdf_err("Failed to allocate nbuf map tracking metadata");
680 		return QDF_STATUS_E_NOMEM;
681 	}
682 
683 	meta->nbuf = nbuf;
684 	qdf_str_lcopy(meta->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
685 	meta->line = line;
686 
687 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
688 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
689 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
690 
691 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
692 
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 static void
697 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
698 {
699 	struct qdf_nbuf_map_metadata *meta;
700 
701 	QDF_BUG(nbuf);
702 	if (!nbuf) {
703 		qdf_err("Cannot unmap null nbuf");
704 		return;
705 	}
706 
707 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
708 	meta = qdf_nbuf_meta_get(nbuf);
709 
710 	if (!meta)
711 		QDF_DEBUG_PANIC(
712 		      "Double nbuf unmap or unmap without map detected @ %s:%u",
713 		      kbasename(file), line);
714 
715 	hash_del(&meta->node);
716 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
717 
718 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
719 
720 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
721 }
722 
723 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
724 			      qdf_nbuf_t buf,
725 			      qdf_dma_dir_t dir,
726 			      const char *file,
727 			      uint32_t line)
728 {
729 	QDF_STATUS status;
730 
731 	status = qdf_nbuf_track_map(buf, file, line);
732 	if (QDF_IS_STATUS_ERROR(status))
733 		return status;
734 
735 	status = __qdf_nbuf_map(osdev, buf, dir);
736 	if (QDF_IS_STATUS_ERROR(status))
737 		qdf_nbuf_untrack_map(buf, file, line);
738 
739 	return status;
740 }
741 
742 qdf_export_symbol(qdf_nbuf_map_debug);
743 
744 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
745 			  qdf_nbuf_t buf,
746 			  qdf_dma_dir_t dir,
747 			  const char *file,
748 			  uint32_t line)
749 {
750 	qdf_nbuf_untrack_map(buf, file, line);
751 	__qdf_nbuf_unmap_single(osdev, buf, dir);
752 }
753 
754 qdf_export_symbol(qdf_nbuf_unmap_debug);
755 
756 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
757 				     qdf_nbuf_t buf,
758 				     qdf_dma_dir_t dir,
759 				     const char *file,
760 				     uint32_t line)
761 {
762 	QDF_STATUS status;
763 
764 	status = qdf_nbuf_track_map(buf, file, line);
765 	if (QDF_IS_STATUS_ERROR(status))
766 		return status;
767 
768 	status = __qdf_nbuf_map_single(osdev, buf, dir);
769 	if (QDF_IS_STATUS_ERROR(status))
770 		qdf_nbuf_untrack_map(buf, file, line);
771 
772 	return status;
773 }
774 
775 qdf_export_symbol(qdf_nbuf_map_single_debug);
776 
777 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
778 				 qdf_nbuf_t buf,
779 				 qdf_dma_dir_t dir,
780 				 const char *file,
781 				 uint32_t line)
782 {
783 	qdf_nbuf_untrack_map(buf, file, line);
784 	__qdf_nbuf_unmap_single(osdev, buf, dir);
785 }
786 
787 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
788 
789 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
790 				     qdf_nbuf_t buf,
791 				     qdf_dma_dir_t dir,
792 				     int nbytes,
793 				     const char *file,
794 				     uint32_t line)
795 {
796 	QDF_STATUS status;
797 
798 	status = qdf_nbuf_track_map(buf, file, line);
799 	if (QDF_IS_STATUS_ERROR(status))
800 		return status;
801 
802 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
803 	if (QDF_IS_STATUS_ERROR(status))
804 		qdf_nbuf_untrack_map(buf, file, line);
805 
806 	return status;
807 }
808 
809 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
810 
811 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
812 				 qdf_nbuf_t buf,
813 				 qdf_dma_dir_t dir,
814 				 int nbytes,
815 				 const char *file,
816 				 uint32_t line)
817 {
818 	qdf_nbuf_untrack_map(buf, file, line);
819 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
820 }
821 
822 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
823 
824 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
825 					    qdf_nbuf_t buf,
826 					    qdf_dma_dir_t dir,
827 					    int nbytes,
828 					    const char *file,
829 					    uint32_t line)
830 {
831 	QDF_STATUS status;
832 
833 	status = qdf_nbuf_track_map(buf, file, line);
834 	if (QDF_IS_STATUS_ERROR(status))
835 		return status;
836 
837 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
838 	if (QDF_IS_STATUS_ERROR(status))
839 		qdf_nbuf_untrack_map(buf, file, line);
840 
841 	return status;
842 }
843 
844 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
845 
846 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
847 					qdf_nbuf_t buf,
848 					qdf_dma_dir_t dir,
849 					int nbytes,
850 					const char *file,
851 					uint32_t line)
852 {
853 	qdf_nbuf_untrack_map(buf, file, line);
854 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
855 }
856 
857 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
858 
859 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, uint8_t *file,
860 					     uint32_t line)
861 {
862 	struct qdf_nbuf_map_metadata *meta;
863 
864 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
865 	meta = qdf_nbuf_meta_get(nbuf);
866 	if (meta)
867 		QDF_DEBUG_PANIC(
868 			"Nbuf freed @ %s:%u while mapped from %s:%u",
869 			kbasename(file), line, meta->file, meta->line);
870 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
871 }
872 #else
873 static inline void qdf_nbuf_map_tracking_init(void)
874 {
875 }
876 
877 static inline void qdf_nbuf_map_tracking_deinit(void)
878 {
879 }
880 
881 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
882 						    uint8_t *file,
883 						    uint32_t line)
884 {
885 }
886 #endif /* NBUF_MAP_UNMAP_DEBUG */
887 
888 /**
889  * __qdf_nbuf_map() - map a buffer to local bus address space
890  * @osdev: OS device
891  * @bmap: Bitmap
892  * @skb: Pointer to network buffer
893  * @dir: Direction
894  *
895  * Return: QDF_STATUS
896  */
897 #ifdef QDF_OS_DEBUG
898 QDF_STATUS
899 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
900 {
901 	struct skb_shared_info *sh = skb_shinfo(skb);
902 
903 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
904 			|| (dir == QDF_DMA_FROM_DEVICE));
905 
906 	/*
907 	 * Assume there's only a single fragment.
908 	 * To support multiple fragments, it would be necessary to change
909 	 * qdf_nbuf_t to be a separate object that stores meta-info
910 	 * (including the bus address for each fragment) and a pointer
911 	 * to the underlying sk_buff.
912 	 */
913 	qdf_assert(sh->nr_frags == 0);
914 
915 	return __qdf_nbuf_map_single(osdev, skb, dir);
916 }
917 qdf_export_symbol(__qdf_nbuf_map);
918 
919 #else
920 QDF_STATUS
921 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
922 {
923 	return __qdf_nbuf_map_single(osdev, skb, dir);
924 }
925 qdf_export_symbol(__qdf_nbuf_map);
926 #endif
927 /**
928  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
929  * @osdev: OS device
930  * @skb: Pointer to network buffer
931  * @dir: dma direction
932  *
933  * Return: none
934  */
935 void
936 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
937 			qdf_dma_dir_t dir)
938 {
939 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
940 		   || (dir == QDF_DMA_FROM_DEVICE));
941 
942 	/*
943 	 * Assume there's a single fragment.
944 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
945 	 */
946 	__qdf_nbuf_unmap_single(osdev, skb, dir);
947 }
948 qdf_export_symbol(__qdf_nbuf_unmap);
949 
950 /**
951  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
952  * @osdev: OS device
953  * @skb: Pointer to network buffer
954  * @dir: Direction
955  *
956  * Return: QDF_STATUS
957  */
958 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
959 QDF_STATUS
960 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
961 {
962 	qdf_dma_addr_t paddr;
963 
964 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
965 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
966 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
967 	return QDF_STATUS_SUCCESS;
968 }
969 qdf_export_symbol(__qdf_nbuf_map_single);
970 #else
971 QDF_STATUS
972 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
973 {
974 	qdf_dma_addr_t paddr;
975 
976 	/* assume that the OS only provides a single fragment */
977 	QDF_NBUF_CB_PADDR(buf) = paddr =
978 		dma_map_single(osdev->dev, buf->data,
979 				skb_end_pointer(buf) - buf->data,
980 				__qdf_dma_dir_to_os(dir));
981 	return dma_mapping_error(osdev->dev, paddr)
982 		? QDF_STATUS_E_FAILURE
983 		: QDF_STATUS_SUCCESS;
984 }
985 qdf_export_symbol(__qdf_nbuf_map_single);
986 #endif
987 /**
988  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
989  * @osdev: OS device
990  * @skb: Pointer to network buffer
991  * @dir: Direction
992  *
993  * Return: none
994  */
995 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
996 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
997 				qdf_dma_dir_t dir)
998 {
999 }
1000 #else
1001 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1002 					qdf_dma_dir_t dir)
1003 {
1004 	if (QDF_NBUF_CB_PADDR(buf))
1005 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1006 			skb_end_pointer(buf) - buf->data,
1007 			__qdf_dma_dir_to_os(dir));
1008 }
1009 #endif
1010 qdf_export_symbol(__qdf_nbuf_unmap_single);
1011 
1012 /**
1013  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1014  * @skb: Pointer to network buffer
1015  * @cksum: Pointer to checksum value
1016  *
1017  * Return: QDF_STATUS
1018  */
1019 QDF_STATUS
1020 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1021 {
1022 	switch (cksum->l4_result) {
1023 	case QDF_NBUF_RX_CKSUM_NONE:
1024 		skb->ip_summed = CHECKSUM_NONE;
1025 		break;
1026 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1027 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1028 		break;
1029 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1030 		skb->ip_summed = CHECKSUM_PARTIAL;
1031 		skb->csum = cksum->val;
1032 		break;
1033 	default:
1034 		pr_err("Unknown checksum type\n");
1035 		qdf_assert(0);
1036 		return QDF_STATUS_E_NOSUPPORT;
1037 	}
1038 	return QDF_STATUS_SUCCESS;
1039 }
1040 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1041 
1042 /**
1043  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1044  * @skb: Pointer to network buffer
1045  *
1046  * Return: TX checksum value
1047  */
1048 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1049 {
1050 	switch (skb->ip_summed) {
1051 	case CHECKSUM_NONE:
1052 		return QDF_NBUF_TX_CKSUM_NONE;
1053 	case CHECKSUM_PARTIAL:
1054 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1055 	case CHECKSUM_COMPLETE:
1056 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1057 	default:
1058 		return QDF_NBUF_TX_CKSUM_NONE;
1059 	}
1060 }
1061 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1062 
1063 /**
1064  * __qdf_nbuf_get_tid() - get tid
1065  * @skb: Pointer to network buffer
1066  *
1067  * Return: tid
1068  */
1069 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1070 {
1071 	return skb->priority;
1072 }
1073 qdf_export_symbol(__qdf_nbuf_get_tid);
1074 
1075 /**
1076  * __qdf_nbuf_set_tid() - set tid
1077  * @skb: Pointer to network buffer
1078  *
1079  * Return: none
1080  */
1081 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1082 {
1083 	skb->priority = tid;
1084 }
1085 qdf_export_symbol(__qdf_nbuf_set_tid);
1086 
1087 /**
1088  * __qdf_nbuf_set_tid() - set tid
1089  * @skb: Pointer to network buffer
1090  *
1091  * Return: none
1092  */
1093 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1094 {
1095 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1096 }
1097 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1098 
1099 /**
1100  * __qdf_nbuf_reg_trace_cb() - register trace callback
1101  * @cb_func_ptr: Pointer to trace callback function
1102  *
1103  * Return: none
1104  */
1105 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1106 {
1107 	qdf_trace_update_cb = cb_func_ptr;
1108 }
1109 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1110 
1111 /**
1112  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1113  *              of DHCP packet.
1114  * @data: Pointer to DHCP packet data buffer
1115  *
1116  * This func. returns the subtype of DHCP packet.
1117  *
1118  * Return: subtype of the DHCP packet.
1119  */
1120 enum qdf_proto_subtype
1121 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1122 {
1123 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1124 
1125 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1126 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1127 					QDF_DHCP_OPTION53_LENGTH)) {
1128 
1129 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1130 		case QDF_DHCP_DISCOVER:
1131 			subtype = QDF_PROTO_DHCP_DISCOVER;
1132 			break;
1133 		case QDF_DHCP_REQUEST:
1134 			subtype = QDF_PROTO_DHCP_REQUEST;
1135 			break;
1136 		case QDF_DHCP_OFFER:
1137 			subtype = QDF_PROTO_DHCP_OFFER;
1138 			break;
1139 		case QDF_DHCP_ACK:
1140 			subtype = QDF_PROTO_DHCP_ACK;
1141 			break;
1142 		case QDF_DHCP_NAK:
1143 			subtype = QDF_PROTO_DHCP_NACK;
1144 			break;
1145 		case QDF_DHCP_RELEASE:
1146 			subtype = QDF_PROTO_DHCP_RELEASE;
1147 			break;
1148 		case QDF_DHCP_INFORM:
1149 			subtype = QDF_PROTO_DHCP_INFORM;
1150 			break;
1151 		case QDF_DHCP_DECLINE:
1152 			subtype = QDF_PROTO_DHCP_DECLINE;
1153 			break;
1154 		default:
1155 			break;
1156 		}
1157 	}
1158 
1159 	return subtype;
1160 }
1161 
1162 /**
1163  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1164  *            of EAPOL packet.
1165  * @data: Pointer to EAPOL packet data buffer
1166  *
1167  * This func. returns the subtype of EAPOL packet.
1168  *
1169  * Return: subtype of the EAPOL packet.
1170  */
1171 enum qdf_proto_subtype
1172 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1173 {
1174 	uint16_t eapol_key_info;
1175 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1176 	uint16_t mask;
1177 
1178 	eapol_key_info = (uint16_t)(*(uint16_t *)
1179 			(data + EAPOL_KEY_INFO_OFFSET));
1180 
1181 	mask = eapol_key_info & EAPOL_MASK;
1182 	switch (mask) {
1183 	case EAPOL_M1_BIT_MASK:
1184 		subtype = QDF_PROTO_EAPOL_M1;
1185 		break;
1186 	case EAPOL_M2_BIT_MASK:
1187 		subtype = QDF_PROTO_EAPOL_M2;
1188 		break;
1189 	case EAPOL_M3_BIT_MASK:
1190 		subtype = QDF_PROTO_EAPOL_M3;
1191 		break;
1192 	case EAPOL_M4_BIT_MASK:
1193 		subtype = QDF_PROTO_EAPOL_M4;
1194 		break;
1195 	default:
1196 		break;
1197 	}
1198 
1199 	return subtype;
1200 }
1201 
1202 /**
1203  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1204  *            of ARP packet.
1205  * @data: Pointer to ARP packet data buffer
1206  *
1207  * This func. returns the subtype of ARP packet.
1208  *
1209  * Return: subtype of the ARP packet.
1210  */
1211 enum qdf_proto_subtype
1212 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1213 {
1214 	uint16_t subtype;
1215 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1216 
1217 	subtype = (uint16_t)(*(uint16_t *)
1218 			(data + ARP_SUB_TYPE_OFFSET));
1219 
1220 	switch (QDF_SWAP_U16(subtype)) {
1221 	case ARP_REQUEST:
1222 		proto_subtype = QDF_PROTO_ARP_REQ;
1223 		break;
1224 	case ARP_RESPONSE:
1225 		proto_subtype = QDF_PROTO_ARP_RES;
1226 		break;
1227 	default:
1228 		break;
1229 	}
1230 
1231 	return proto_subtype;
1232 }
1233 
1234 /**
1235  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1236  *            of IPV4 ICMP packet.
1237  * @data: Pointer to IPV4 ICMP packet data buffer
1238  *
1239  * This func. returns the subtype of ICMP packet.
1240  *
1241  * Return: subtype of the ICMP packet.
1242  */
1243 enum qdf_proto_subtype
1244 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1245 {
1246 	uint8_t subtype;
1247 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1248 
1249 	subtype = (uint8_t)(*(uint8_t *)
1250 			(data + ICMP_SUBTYPE_OFFSET));
1251 
1252 	switch (subtype) {
1253 	case ICMP_REQUEST:
1254 		proto_subtype = QDF_PROTO_ICMP_REQ;
1255 		break;
1256 	case ICMP_RESPONSE:
1257 		proto_subtype = QDF_PROTO_ICMP_RES;
1258 		break;
1259 	default:
1260 		break;
1261 	}
1262 
1263 	return proto_subtype;
1264 }
1265 
1266 /**
1267  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1268  *            of IPV6 ICMPV6 packet.
1269  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1270  *
1271  * This func. returns the subtype of ICMPV6 packet.
1272  *
1273  * Return: subtype of the ICMPV6 packet.
1274  */
1275 enum qdf_proto_subtype
1276 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1277 {
1278 	uint8_t subtype;
1279 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1280 
1281 	subtype = (uint8_t)(*(uint8_t *)
1282 			(data + ICMPV6_SUBTYPE_OFFSET));
1283 
1284 	switch (subtype) {
1285 	case ICMPV6_REQUEST:
1286 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1287 		break;
1288 	case ICMPV6_RESPONSE:
1289 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1290 		break;
1291 	case ICMPV6_RS:
1292 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1293 		break;
1294 	case ICMPV6_RA:
1295 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1296 		break;
1297 	case ICMPV6_NS:
1298 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1299 		break;
1300 	case ICMPV6_NA:
1301 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1302 		break;
1303 	default:
1304 		break;
1305 	}
1306 
1307 	return proto_subtype;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1312  *            of IPV4 packet.
1313  * @data: Pointer to IPV4 packet data buffer
1314  *
1315  * This func. returns the proto type of IPV4 packet.
1316  *
1317  * Return: proto type of IPV4 packet.
1318  */
1319 uint8_t
1320 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1321 {
1322 	uint8_t proto_type;
1323 
1324 	proto_type = (uint8_t)(*(uint8_t *)(data +
1325 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1326 	return proto_type;
1327 }
1328 
1329 /**
1330  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1331  *            of IPV6 packet.
1332  * @data: Pointer to IPV6 packet data buffer
1333  *
1334  * This func. returns the proto type of IPV6 packet.
1335  *
1336  * Return: proto type of IPV6 packet.
1337  */
1338 uint8_t
1339 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1340 {
1341 	uint8_t proto_type;
1342 
1343 	proto_type = (uint8_t)(*(uint8_t *)(data +
1344 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1345 	return proto_type;
1346 }
1347 
1348 /**
1349  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1350  * @data: Pointer to network data
1351  *
1352  * This api is for Tx packets.
1353  *
1354  * Return: true if packet is ipv4 packet
1355  *	   false otherwise
1356  */
1357 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1358 {
1359 	uint16_t ether_type;
1360 
1361 	ether_type = (uint16_t)(*(uint16_t *)(data +
1362 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1363 
1364 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1365 		return true;
1366 	else
1367 		return false;
1368 }
1369 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1370 
1371 /**
1372  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1373  * @data: Pointer to network data buffer
1374  *
1375  * This api is for ipv4 packet.
1376  *
1377  * Return: true if packet is DHCP packet
1378  *	   false otherwise
1379  */
1380 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1381 {
1382 	uint16_t sport;
1383 	uint16_t dport;
1384 
1385 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1386 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1387 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1388 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1389 					 sizeof(uint16_t)));
1390 
1391 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1392 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1393 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1394 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1395 		return true;
1396 	else
1397 		return false;
1398 }
1399 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1400 
1401 /**
1402  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1403  * @data: Pointer to network data buffer
1404  *
1405  * This api is for ipv4 packet.
1406  *
1407  * Return: true if packet is EAPOL packet
1408  *	   false otherwise.
1409  */
1410 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1411 {
1412 	uint16_t ether_type;
1413 
1414 	ether_type = (uint16_t)(*(uint16_t *)(data +
1415 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1416 
1417 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1418 		return true;
1419 	else
1420 		return false;
1421 }
1422 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1423 
1424 /**
1425  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1426  * @skb: Pointer to network buffer
1427  *
1428  * This api is for ipv4 packet.
1429  *
1430  * Return: true if packet is WAPI packet
1431  *	   false otherwise.
1432  */
1433 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1434 {
1435 	uint16_t ether_type;
1436 
1437 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1438 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1439 
1440 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1441 		return true;
1442 	else
1443 		return false;
1444 }
1445 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1446 
1447 /**
1448  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1449  * @skb: Pointer to network buffer
1450  *
1451  * This api is for ipv4 packet.
1452  *
1453  * Return: true if packet is tdls packet
1454  *	   false otherwise.
1455  */
1456 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1457 {
1458 	uint16_t ether_type;
1459 
1460 	ether_type = *(uint16_t *)(skb->data +
1461 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1462 
1463 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1464 		return true;
1465 	else
1466 		return false;
1467 }
1468 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1469 
1470 /**
1471  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1472  * @data: Pointer to network data buffer
1473  *
1474  * This api is for ipv4 packet.
1475  *
1476  * Return: true if packet is ARP packet
1477  *	   false otherwise.
1478  */
1479 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1480 {
1481 	uint16_t ether_type;
1482 
1483 	ether_type = (uint16_t)(*(uint16_t *)(data +
1484 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1485 
1486 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1487 		return true;
1488 	else
1489 		return false;
1490 }
1491 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1492 
1493 /**
1494  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1495  * @data: Pointer to network data buffer
1496  *
1497  * This api is for ipv4 packet.
1498  *
1499  * Return: true if packet is ARP request
1500  *	   false otherwise.
1501  */
1502 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1503 {
1504 	uint16_t op_code;
1505 
1506 	op_code = (uint16_t)(*(uint16_t *)(data +
1507 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1508 
1509 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1510 		return true;
1511 	return false;
1512 }
1513 
1514 /**
1515  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1516  * @data: Pointer to network data buffer
1517  *
1518  * This api is for ipv4 packet.
1519  *
1520  * Return: true if packet is ARP response
1521  *	   false otherwise.
1522  */
1523 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1524 {
1525 	uint16_t op_code;
1526 
1527 	op_code = (uint16_t)(*(uint16_t *)(data +
1528 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1529 
1530 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1531 		return true;
1532 	return false;
1533 }
1534 
1535 /**
1536  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1537  * @data: Pointer to network data buffer
1538  *
1539  * This api is for ipv4 packet.
1540  *
1541  * Return: ARP packet source IP value.
1542  */
1543 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1544 {
1545 	uint32_t src_ip;
1546 
1547 	src_ip = (uint32_t)(*(uint32_t *)(data +
1548 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1549 
1550 	return src_ip;
1551 }
1552 
1553 /**
1554  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1555  * @data: Pointer to network data buffer
1556  *
1557  * This api is for ipv4 packet.
1558  *
1559  * Return: ARP packet target IP value.
1560  */
1561 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1562 {
1563 	uint32_t tgt_ip;
1564 
1565 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1566 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1567 
1568 	return tgt_ip;
1569 }
1570 
1571 /**
1572  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1573  * @data: Pointer to network data buffer
1574  * @len: length to copy
1575  *
1576  * This api is for dns domain name
1577  *
1578  * Return: dns domain name.
1579  */
1580 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1581 {
1582 	uint8_t *domain_name;
1583 
1584 	domain_name = (uint8_t *)
1585 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1586 	return domain_name;
1587 }
1588 
1589 
1590 /**
1591  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1592  * @data: Pointer to network data buffer
1593  *
1594  * This api is for dns query packet.
1595  *
1596  * Return: true if packet is dns query packet.
1597  *	   false otherwise.
1598  */
1599 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1600 {
1601 	uint16_t op_code;
1602 	uint16_t tgt_port;
1603 
1604 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1605 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1606 	/* Standard DNS query always happen on Dest Port 53. */
1607 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1608 		op_code = (uint16_t)(*(uint16_t *)(data +
1609 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1610 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1611 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1612 			return true;
1613 	}
1614 	return false;
1615 }
1616 
1617 /**
1618  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1619  * @data: Pointer to network data buffer
1620  *
1621  * This api is for dns query response.
1622  *
1623  * Return: true if packet is dns response packet.
1624  *	   false otherwise.
1625  */
1626 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1627 {
1628 	uint16_t op_code;
1629 	uint16_t src_port;
1630 
1631 	src_port = (uint16_t)(*(uint16_t *)(data +
1632 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1633 	/* Standard DNS response always comes on Src Port 53. */
1634 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1635 		op_code = (uint16_t)(*(uint16_t *)(data +
1636 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1637 
1638 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1639 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1640 			return true;
1641 	}
1642 	return false;
1643 }
1644 
1645 /**
1646  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1647  * @data: Pointer to network data buffer
1648  *
1649  * This api is for tcp syn packet.
1650  *
1651  * Return: true if packet is tcp syn packet.
1652  *	   false otherwise.
1653  */
1654 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1655 {
1656 	uint8_t op_code;
1657 
1658 	op_code = (uint8_t)(*(uint8_t *)(data +
1659 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1660 
1661 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1662 		return true;
1663 	return false;
1664 }
1665 
1666 /**
1667  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1668  * @data: Pointer to network data buffer
1669  *
1670  * This api is for tcp syn ack packet.
1671  *
1672  * Return: true if packet is tcp syn ack packet.
1673  *	   false otherwise.
1674  */
1675 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1676 {
1677 	uint8_t op_code;
1678 
1679 	op_code = (uint8_t)(*(uint8_t *)(data +
1680 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1681 
1682 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1683 		return true;
1684 	return false;
1685 }
1686 
1687 /**
1688  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1689  * @data: Pointer to network data buffer
1690  *
1691  * This api is for tcp ack packet.
1692  *
1693  * Return: true if packet is tcp ack packet.
1694  *	   false otherwise.
1695  */
1696 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1697 {
1698 	uint8_t op_code;
1699 
1700 	op_code = (uint8_t)(*(uint8_t *)(data +
1701 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1702 
1703 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1704 		return true;
1705 	return false;
1706 }
1707 
1708 /**
1709  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1710  * @data: Pointer to network data buffer
1711  *
1712  * This api is for tcp packet.
1713  *
1714  * Return: tcp source port value.
1715  */
1716 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1717 {
1718 	uint16_t src_port;
1719 
1720 	src_port = (uint16_t)(*(uint16_t *)(data +
1721 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1722 
1723 	return src_port;
1724 }
1725 
1726 /**
1727  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1728  * @data: Pointer to network data buffer
1729  *
1730  * This api is for tcp packet.
1731  *
1732  * Return: tcp destination port value.
1733  */
1734 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1735 {
1736 	uint16_t tgt_port;
1737 
1738 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1739 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1740 
1741 	return tgt_port;
1742 }
1743 
1744 /**
1745  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1746  * @data: Pointer to network data buffer
1747  *
1748  * This api is for ipv4 req packet.
1749  *
1750  * Return: true if packet is icmpv4 request
1751  *	   false otherwise.
1752  */
1753 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1754 {
1755 	uint8_t op_code;
1756 
1757 	op_code = (uint8_t)(*(uint8_t *)(data +
1758 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1759 
1760 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1761 		return true;
1762 	return false;
1763 }
1764 
1765 /**
1766  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1767  * @data: Pointer to network data buffer
1768  *
1769  * This api is for ipv4 res packet.
1770  *
1771  * Return: true if packet is icmpv4 response
1772  *	   false otherwise.
1773  */
1774 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1775 {
1776 	uint8_t op_code;
1777 
1778 	op_code = (uint8_t)(*(uint8_t *)(data +
1779 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1780 
1781 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1782 		return true;
1783 	return false;
1784 }
1785 
1786 /**
1787  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1788  * @data: Pointer to network data buffer
1789  *
1790  * This api is for ipv4 packet.
1791  *
1792  * Return: icmpv4 packet source IP value.
1793  */
1794 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1795 {
1796 	uint32_t src_ip;
1797 
1798 	src_ip = (uint32_t)(*(uint32_t *)(data +
1799 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1800 
1801 	return src_ip;
1802 }
1803 
1804 /**
1805  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1806  * @data: Pointer to network data buffer
1807  *
1808  * This api is for ipv4 packet.
1809  *
1810  * Return: icmpv4 packet target IP value.
1811  */
1812 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1813 {
1814 	uint32_t tgt_ip;
1815 
1816 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1817 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1818 
1819 	return tgt_ip;
1820 }
1821 
1822 
1823 /**
1824  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1825  * @data: Pointer to IPV6 packet data buffer
1826  *
1827  * This func. checks whether it is a IPV6 packet or not.
1828  *
1829  * Return: TRUE if it is a IPV6 packet
1830  *         FALSE if not
1831  */
1832 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1833 {
1834 	uint16_t ether_type;
1835 
1836 	ether_type = (uint16_t)(*(uint16_t *)(data +
1837 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1838 
1839 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1840 		return true;
1841 	else
1842 		return false;
1843 }
1844 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1845 
1846 /**
1847  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1848  * @data: Pointer to network data buffer
1849  *
1850  * This api is for ipv6 packet.
1851  *
1852  * Return: true if packet is DHCP packet
1853  *	   false otherwise
1854  */
1855 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1856 {
1857 	uint16_t sport;
1858 	uint16_t dport;
1859 
1860 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1861 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1862 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1863 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1864 					sizeof(uint16_t));
1865 
1866 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1867 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1868 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1869 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1870 		return true;
1871 	else
1872 		return false;
1873 }
1874 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1875 
1876 /**
1877  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1878  * @data: Pointer to IPV4 packet data buffer
1879  *
1880  * This func. checks whether it is a IPV4 multicast packet or not.
1881  *
1882  * Return: TRUE if it is a IPV4 multicast packet
1883  *         FALSE if not
1884  */
1885 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1886 {
1887 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1888 		uint32_t *dst_addr =
1889 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1890 
1891 		/*
1892 		 * Check first word of the IPV4 address and if it is
1893 		 * equal to 0xE then it represents multicast IP.
1894 		 */
1895 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1896 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1897 			return true;
1898 		else
1899 			return false;
1900 	} else
1901 		return false;
1902 }
1903 
1904 /**
1905  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1906  * @data: Pointer to IPV6 packet data buffer
1907  *
1908  * This func. checks whether it is a IPV6 multicast packet or not.
1909  *
1910  * Return: TRUE if it is a IPV6 multicast packet
1911  *         FALSE if not
1912  */
1913 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1914 {
1915 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1916 		uint16_t *dst_addr;
1917 
1918 		dst_addr = (uint16_t *)
1919 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1920 
1921 		/*
1922 		 * Check first byte of the IP address and if it
1923 		 * 0xFF00 then it is a IPV6 mcast packet.
1924 		 */
1925 		if (*dst_addr ==
1926 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1927 			return true;
1928 		else
1929 			return false;
1930 	} else
1931 		return false;
1932 }
1933 
1934 /**
1935  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1936  * @data: Pointer to IPV4 ICMP packet data buffer
1937  *
1938  * This func. checks whether it is a ICMP packet or not.
1939  *
1940  * Return: TRUE if it is a ICMP packet
1941  *         FALSE if not
1942  */
1943 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1944 {
1945 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1946 		uint8_t pkt_type;
1947 
1948 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1949 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1950 
1951 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1952 			return true;
1953 		else
1954 			return false;
1955 	} else
1956 		return false;
1957 }
1958 
1959 /**
1960  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1961  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1962  *
1963  * This func. checks whether it is a ICMPV6 packet or not.
1964  *
1965  * Return: TRUE if it is a ICMPV6 packet
1966  *         FALSE if not
1967  */
1968 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1969 {
1970 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1971 		uint8_t pkt_type;
1972 
1973 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1974 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1975 
1976 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1977 			return true;
1978 		else
1979 			return false;
1980 	} else
1981 		return false;
1982 }
1983 
1984 /**
1985  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1986  * @data: Pointer to IPV4 UDP packet data buffer
1987  *
1988  * This func. checks whether it is a IPV4 UDP packet or not.
1989  *
1990  * Return: TRUE if it is a IPV4 UDP packet
1991  *         FALSE if not
1992  */
1993 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1994 {
1995 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1996 		uint8_t pkt_type;
1997 
1998 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1999 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2000 
2001 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2002 			return true;
2003 		else
2004 			return false;
2005 	} else
2006 		return false;
2007 }
2008 
2009 /**
2010  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2011  * @data: Pointer to IPV4 TCP packet data buffer
2012  *
2013  * This func. checks whether it is a IPV4 TCP packet or not.
2014  *
2015  * Return: TRUE if it is a IPV4 TCP packet
2016  *         FALSE if not
2017  */
2018 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2019 {
2020 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2021 		uint8_t pkt_type;
2022 
2023 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2024 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2025 
2026 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2027 			return true;
2028 		else
2029 			return false;
2030 	} else
2031 		return false;
2032 }
2033 
2034 /**
2035  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2036  * @data: Pointer to IPV6 UDP packet data buffer
2037  *
2038  * This func. checks whether it is a IPV6 UDP packet or not.
2039  *
2040  * Return: TRUE if it is a IPV6 UDP packet
2041  *         FALSE if not
2042  */
2043 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2044 {
2045 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2046 		uint8_t pkt_type;
2047 
2048 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2049 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2050 
2051 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2052 			return true;
2053 		else
2054 			return false;
2055 	} else
2056 		return false;
2057 }
2058 
2059 /**
2060  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2061  * @data: Pointer to IPV6 TCP packet data buffer
2062  *
2063  * This func. checks whether it is a IPV6 TCP packet or not.
2064  *
2065  * Return: TRUE if it is a IPV6 TCP packet
2066  *         FALSE if not
2067  */
2068 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2069 {
2070 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2071 		uint8_t pkt_type;
2072 
2073 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2074 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2075 
2076 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2077 			return true;
2078 		else
2079 			return false;
2080 	} else
2081 		return false;
2082 }
2083 
2084 /**
2085  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2086  * @nbuf - sk buff
2087  *
2088  * Return: true if packet is broadcast
2089  *	   false otherwise
2090  */
2091 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2092 {
2093 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2094 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2095 }
2096 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2097 
2098 #ifdef NBUF_MEMORY_DEBUG
2099 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2100 
2101 /**
2102  * struct qdf_nbuf_track_t - Network buffer track structure
2103  *
2104  * @p_next: Pointer to next
2105  * @net_buf: Pointer to network buffer
2106  * @file_name: File name
2107  * @line_num: Line number
2108  * @size: Size
2109  */
2110 struct qdf_nbuf_track_t {
2111 	struct qdf_nbuf_track_t *p_next;
2112 	qdf_nbuf_t net_buf;
2113 	char file_name[QDF_MEM_FILE_NAME_SIZE];
2114 	uint32_t line_num;
2115 	size_t size;
2116 };
2117 
2118 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2119 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2120 
2121 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2122 static struct kmem_cache *nbuf_tracking_cache;
2123 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2124 static spinlock_t qdf_net_buf_track_free_list_lock;
2125 static uint32_t qdf_net_buf_track_free_list_count;
2126 static uint32_t qdf_net_buf_track_used_list_count;
2127 static uint32_t qdf_net_buf_track_max_used;
2128 static uint32_t qdf_net_buf_track_max_free;
2129 static uint32_t qdf_net_buf_track_max_allocated;
2130 
2131 /**
2132  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2133  *
2134  * tracks the max number of network buffers that the wlan driver was tracking
2135  * at any one time.
2136  *
2137  * Return: none
2138  */
2139 static inline void update_max_used(void)
2140 {
2141 	int sum;
2142 
2143 	if (qdf_net_buf_track_max_used <
2144 	    qdf_net_buf_track_used_list_count)
2145 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2146 	sum = qdf_net_buf_track_free_list_count +
2147 		qdf_net_buf_track_used_list_count;
2148 	if (qdf_net_buf_track_max_allocated < sum)
2149 		qdf_net_buf_track_max_allocated = sum;
2150 }
2151 
2152 /**
2153  * update_max_free() - update qdf_net_buf_track_free_list_count
2154  *
2155  * tracks the max number tracking buffers kept in the freelist.
2156  *
2157  * Return: none
2158  */
2159 static inline void update_max_free(void)
2160 {
2161 	if (qdf_net_buf_track_max_free <
2162 	    qdf_net_buf_track_free_list_count)
2163 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2164 }
2165 
2166 /**
2167  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2168  *
2169  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2170  * This function also ads fexibility to adjust the allocation and freelist
2171  * scheems.
2172  *
2173  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2174  */
2175 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2176 {
2177 	int flags = GFP_KERNEL;
2178 	unsigned long irq_flag;
2179 	QDF_NBUF_TRACK *new_node = NULL;
2180 
2181 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2182 	qdf_net_buf_track_used_list_count++;
2183 	if (qdf_net_buf_track_free_list != NULL) {
2184 		new_node = qdf_net_buf_track_free_list;
2185 		qdf_net_buf_track_free_list =
2186 			qdf_net_buf_track_free_list->p_next;
2187 		qdf_net_buf_track_free_list_count--;
2188 	}
2189 	update_max_used();
2190 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2191 
2192 	if (new_node != NULL)
2193 		return new_node;
2194 
2195 	if (in_interrupt() || irqs_disabled() || in_atomic())
2196 		flags = GFP_ATOMIC;
2197 
2198 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2199 }
2200 
2201 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2202 #define FREEQ_POOLSIZE 2048
2203 
2204 /**
2205  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2206  *
2207  * Matches calls to qdf_nbuf_track_alloc.
2208  * Either frees the tracking cookie to kernel or an internal
2209  * freelist based on the size of the freelist.
2210  *
2211  * Return: none
2212  */
2213 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2214 {
2215 	unsigned long irq_flag;
2216 
2217 	if (!node)
2218 		return;
2219 
2220 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2221 	 * only shrink the freelist if it is bigger than twice the number of
2222 	 * nbufs in use. If the driver is stalling in a consistent bursty
2223 	 * fasion, this will keep 3/4 of thee allocations from the free list
2224 	 * while also allowing the system to recover memory as less frantic
2225 	 * traffic occurs.
2226 	 */
2227 
2228 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2229 
2230 	qdf_net_buf_track_used_list_count--;
2231 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2232 	   (qdf_net_buf_track_free_list_count >
2233 	    qdf_net_buf_track_used_list_count << 1)) {
2234 		kmem_cache_free(nbuf_tracking_cache, node);
2235 	} else {
2236 		node->p_next = qdf_net_buf_track_free_list;
2237 		qdf_net_buf_track_free_list = node;
2238 		qdf_net_buf_track_free_list_count++;
2239 	}
2240 	update_max_free();
2241 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2242 }
2243 
2244 /**
2245  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2246  *
2247  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2248  * the freelist first makes it performant for the first iperf udp burst
2249  * as well as steady state.
2250  *
2251  * Return: None
2252  */
2253 static void qdf_nbuf_track_prefill(void)
2254 {
2255 	int i;
2256 	QDF_NBUF_TRACK *node, *head;
2257 
2258 	/* prepopulate the freelist */
2259 	head = NULL;
2260 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2261 		node = qdf_nbuf_track_alloc();
2262 		if (node == NULL)
2263 			continue;
2264 		node->p_next = head;
2265 		head = node;
2266 	}
2267 	while (head) {
2268 		node = head->p_next;
2269 		qdf_nbuf_track_free(head);
2270 		head = node;
2271 	}
2272 
2273 	/* prefilled buffers should not count as used */
2274 	qdf_net_buf_track_max_used = 0;
2275 }
2276 
2277 /**
2278  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2279  *
2280  * This initializes the memory manager for the nbuf tracking cookies.  Because
2281  * these cookies are all the same size and only used in this feature, we can
2282  * use a kmem_cache to provide tracking as well as to speed up allocations.
2283  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2284  * features) a freelist is prepopulated here.
2285  *
2286  * Return: None
2287  */
2288 static void qdf_nbuf_track_memory_manager_create(void)
2289 {
2290 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2291 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2292 						sizeof(QDF_NBUF_TRACK),
2293 						0, 0, NULL);
2294 
2295 	qdf_nbuf_track_prefill();
2296 }
2297 
2298 /**
2299  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2300  *
2301  * Empty the freelist and print out usage statistics when it is no longer
2302  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2303  * any nbuf tracking cookies were leaked.
2304  *
2305  * Return: None
2306  */
2307 static void qdf_nbuf_track_memory_manager_destroy(void)
2308 {
2309 	QDF_NBUF_TRACK *node, *tmp;
2310 	unsigned long irq_flag;
2311 
2312 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2313 	node = qdf_net_buf_track_free_list;
2314 
2315 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2316 		qdf_print("%s: unexpectedly large max_used count %d",
2317 			  __func__, qdf_net_buf_track_max_used);
2318 
2319 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2320 		qdf_print("%s: %d unused trackers were allocated",
2321 			  __func__,
2322 			  qdf_net_buf_track_max_allocated -
2323 			  qdf_net_buf_track_max_used);
2324 
2325 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2326 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2327 		qdf_print("%s: check freelist shrinking functionality",
2328 			  __func__);
2329 
2330 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2331 		  "%s: %d residual freelist size",
2332 		  __func__, qdf_net_buf_track_free_list_count);
2333 
2334 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2335 		  "%s: %d max freelist size observed",
2336 		  __func__, qdf_net_buf_track_max_free);
2337 
2338 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2339 		  "%s: %d max buffers used observed",
2340 		  __func__, qdf_net_buf_track_max_used);
2341 
2342 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2343 		  "%s: %d max buffers allocated observed",
2344 		  __func__, qdf_net_buf_track_max_allocated);
2345 
2346 	while (node) {
2347 		tmp = node;
2348 		node = node->p_next;
2349 		kmem_cache_free(nbuf_tracking_cache, tmp);
2350 		qdf_net_buf_track_free_list_count--;
2351 	}
2352 
2353 	if (qdf_net_buf_track_free_list_count != 0)
2354 		qdf_info("%d unfreed tracking memory lost in freelist",
2355 			 qdf_net_buf_track_free_list_count);
2356 
2357 	if (qdf_net_buf_track_used_list_count != 0)
2358 		qdf_info("%d unfreed tracking memory still in use",
2359 			 qdf_net_buf_track_used_list_count);
2360 
2361 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2362 	kmem_cache_destroy(nbuf_tracking_cache);
2363 	qdf_net_buf_track_free_list = NULL;
2364 }
2365 
2366 /**
2367  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2368  *
2369  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2370  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2371  * WLAN driver module whose allocated SKB is freed by network stack are
2372  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2373  * reported as memory leak.
2374  *
2375  * Return: none
2376  */
2377 void qdf_net_buf_debug_init(void)
2378 {
2379 	uint32_t i;
2380 
2381 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2382 
2383 	qdf_nbuf_map_tracking_init();
2384 	qdf_nbuf_track_memory_manager_create();
2385 
2386 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2387 		gp_qdf_net_buf_track_tbl[i] = NULL;
2388 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2389 	}
2390 }
2391 qdf_export_symbol(qdf_net_buf_debug_init);
2392 
2393 /**
2394  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2395  *
2396  * Exit network buffer tracking debug functionality and log SKB memory leaks
2397  * As part of exiting the functionality, free the leaked memory and
2398  * cleanup the tracking buffers.
2399  *
2400  * Return: none
2401  */
2402 void qdf_net_buf_debug_exit(void)
2403 {
2404 	uint32_t i;
2405 	uint32_t count = 0;
2406 	unsigned long irq_flag;
2407 	QDF_NBUF_TRACK *p_node;
2408 	QDF_NBUF_TRACK *p_prev;
2409 
2410 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2411 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2412 		p_node = gp_qdf_net_buf_track_tbl[i];
2413 		while (p_node) {
2414 			p_prev = p_node;
2415 			p_node = p_node->p_next;
2416 			count++;
2417 			qdf_info("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK",
2418 				 p_prev->file_name, p_prev->line_num,
2419 				 p_prev->size, p_prev->net_buf);
2420 			qdf_nbuf_track_free(p_prev);
2421 		}
2422 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2423 	}
2424 
2425 	qdf_nbuf_track_memory_manager_destroy();
2426 	qdf_nbuf_map_tracking_deinit();
2427 
2428 #ifdef CONFIG_HALT_KMEMLEAK
2429 	if (count) {
2430 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2431 		QDF_BUG(0);
2432 	}
2433 #endif
2434 }
2435 qdf_export_symbol(qdf_net_buf_debug_exit);
2436 
2437 /**
2438  * qdf_net_buf_debug_hash() - hash network buffer pointer
2439  *
2440  * Return: hash value
2441  */
2442 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2443 {
2444 	uint32_t i;
2445 
2446 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2447 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2448 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2449 
2450 	return i;
2451 }
2452 
2453 /**
2454  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2455  *
2456  * Return: If skb is found in hash table then return pointer to network buffer
2457  *	else return %NULL
2458  */
2459 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2460 {
2461 	uint32_t i;
2462 	QDF_NBUF_TRACK *p_node;
2463 
2464 	i = qdf_net_buf_debug_hash(net_buf);
2465 	p_node = gp_qdf_net_buf_track_tbl[i];
2466 
2467 	while (p_node) {
2468 		if (p_node->net_buf == net_buf)
2469 			return p_node;
2470 		p_node = p_node->p_next;
2471 	}
2472 
2473 	return NULL;
2474 }
2475 
2476 /**
2477  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2478  *
2479  * Return: none
2480  */
2481 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2482 				uint8_t *file_name, uint32_t line_num)
2483 {
2484 	uint32_t i;
2485 	unsigned long irq_flag;
2486 	QDF_NBUF_TRACK *p_node;
2487 	QDF_NBUF_TRACK *new_node;
2488 
2489 	new_node = qdf_nbuf_track_alloc();
2490 
2491 	i = qdf_net_buf_debug_hash(net_buf);
2492 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2493 
2494 	p_node = qdf_net_buf_debug_look_up(net_buf);
2495 
2496 	if (p_node) {
2497 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2498 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2499 			  net_buf, kbasename(file_name), line_num);
2500 		qdf_nbuf_track_free(new_node);
2501 	} else {
2502 		p_node = new_node;
2503 		if (p_node) {
2504 			p_node->net_buf = net_buf;
2505 			qdf_str_lcopy(p_node->file_name, kbasename(file_name),
2506 				      QDF_MEM_FILE_NAME_SIZE);
2507 			p_node->line_num = line_num;
2508 			p_node->size = size;
2509 			qdf_mem_skb_inc(size);
2510 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2511 			gp_qdf_net_buf_track_tbl[i] = p_node;
2512 		} else
2513 			qdf_print(
2514 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2515 				  kbasename(file_name), line_num, size);
2516 	}
2517 
2518 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2519 }
2520 qdf_export_symbol(qdf_net_buf_debug_add_node);
2521 
2522 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, uint8_t *file_name,
2523 				   uint32_t line_num)
2524 {
2525 	uint32_t i;
2526 	unsigned long irq_flag;
2527 	QDF_NBUF_TRACK *p_node;
2528 
2529 	i = qdf_net_buf_debug_hash(net_buf);
2530 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2531 
2532 	p_node = qdf_net_buf_debug_look_up(net_buf);
2533 
2534 	if (p_node) {
2535 		qdf_str_lcopy(p_node->file_name, kbasename(file_name),
2536 			      QDF_MEM_FILE_NAME_SIZE);
2537 		p_node->line_num = line_num;
2538 	}
2539 
2540 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2541 }
2542 
2543 qdf_export_symbol(qdf_net_buf_debug_update_node);
2544 
2545 /**
2546  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2547  *
2548  * Return: none
2549  */
2550 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2551 {
2552 	uint32_t i;
2553 	QDF_NBUF_TRACK *p_head;
2554 	QDF_NBUF_TRACK *p_node = NULL;
2555 	unsigned long irq_flag;
2556 	QDF_NBUF_TRACK *p_prev;
2557 
2558 	i = qdf_net_buf_debug_hash(net_buf);
2559 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2560 
2561 	p_head = gp_qdf_net_buf_track_tbl[i];
2562 
2563 	/* Unallocated SKB */
2564 	if (!p_head)
2565 		goto done;
2566 
2567 	p_node = p_head;
2568 	/* Found at head of the table */
2569 	if (p_head->net_buf == net_buf) {
2570 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2571 		goto done;
2572 	}
2573 
2574 	/* Search in collision list */
2575 	while (p_node) {
2576 		p_prev = p_node;
2577 		p_node = p_node->p_next;
2578 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2579 			p_prev->p_next = p_node->p_next;
2580 			break;
2581 		}
2582 	}
2583 
2584 done:
2585 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2586 
2587 	if (p_node) {
2588 		qdf_mem_skb_dec(p_node->size);
2589 		qdf_nbuf_track_free(p_node);
2590 	} else {
2591 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2592 			  net_buf);
2593 		QDF_BUG(0);
2594 	}
2595 }
2596 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2597 
2598 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2599 			uint8_t *file_name, uint32_t line_num)
2600 {
2601 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2602 
2603 	while (ext_list) {
2604 		/*
2605 		 * Take care to add if it is Jumbo packet connected using
2606 		 * frag_list
2607 		 */
2608 		qdf_nbuf_t next;
2609 
2610 		next = qdf_nbuf_queue_next(ext_list);
2611 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2612 		ext_list = next;
2613 	}
2614 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2615 }
2616 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2617 
2618 /**
2619  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2620  * @net_buf: Network buf holding head segment (single)
2621  *
2622  * WLAN driver module whose allocated SKB is freed by network stack are
2623  * suppose to call this API before returning SKB to network stack such
2624  * that the SKB is not reported as memory leak.
2625  *
2626  * Return: none
2627  */
2628 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2629 {
2630 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2631 
2632 	while (ext_list) {
2633 		/*
2634 		 * Take care to free if it is Jumbo packet connected using
2635 		 * frag_list
2636 		 */
2637 		qdf_nbuf_t next;
2638 
2639 		next = qdf_nbuf_queue_next(ext_list);
2640 
2641 		if (qdf_nbuf_is_tso(ext_list) &&
2642 			qdf_nbuf_get_users(ext_list) > 1) {
2643 			ext_list = next;
2644 			continue;
2645 		}
2646 
2647 		qdf_net_buf_debug_delete_node(ext_list);
2648 		ext_list = next;
2649 	}
2650 
2651 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2652 		return;
2653 
2654 	qdf_net_buf_debug_delete_node(net_buf);
2655 }
2656 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2657 
2658 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2659 				int reserve, int align, int prio,
2660 				uint8_t *file, uint32_t line)
2661 {
2662 	qdf_nbuf_t nbuf;
2663 
2664 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, file, line);
2665 
2666 	/* Store SKB in internal QDF tracking table */
2667 	if (qdf_likely(nbuf)) {
2668 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2669 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2670 	}
2671 
2672 	return nbuf;
2673 }
2674 qdf_export_symbol(qdf_nbuf_alloc_debug);
2675 
2676 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2677 {
2678 	if (qdf_unlikely(!nbuf))
2679 		return;
2680 
2681 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2682 		goto free_buf;
2683 
2684 	/* Remove SKB from internal QDF tracking table */
2685 	qdf_nbuf_panic_on_free_if_mapped(nbuf, file, line);
2686 	qdf_net_buf_debug_delete_node(nbuf);
2687 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2688 
2689 free_buf:
2690 	__qdf_nbuf_free(nbuf);
2691 }
2692 qdf_export_symbol(qdf_nbuf_free_debug);
2693 
2694 #endif /* NBUF_MEMORY_DEBUG */
2695 
2696 #if defined(FEATURE_TSO)
2697 
2698 /**
2699  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2700  *
2701  * @ethproto: ethernet type of the msdu
2702  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2703  * @l2_len: L2 length for the msdu
2704  * @eit_hdr: pointer to EIT header
2705  * @eit_hdr_len: EIT header length for the msdu
2706  * @eit_hdr_dma_map_addr: dma addr for EIT header
2707  * @tcphdr: pointer to tcp header
2708  * @ipv4_csum_en: ipv4 checksum enable
2709  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2710  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2711  * @ip_id: IP id
2712  * @tcp_seq_num: TCP sequence number
2713  *
2714  * This structure holds the TSO common info that is common
2715  * across all the TCP segments of the jumbo packet.
2716  */
2717 struct qdf_tso_cmn_seg_info_t {
2718 	uint16_t ethproto;
2719 	uint16_t ip_tcp_hdr_len;
2720 	uint16_t l2_len;
2721 	uint8_t *eit_hdr;
2722 	uint32_t eit_hdr_len;
2723 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2724 	struct tcphdr *tcphdr;
2725 	uint16_t ipv4_csum_en;
2726 	uint16_t tcp_ipv4_csum_en;
2727 	uint16_t tcp_ipv6_csum_en;
2728 	uint16_t ip_id;
2729 	uint32_t tcp_seq_num;
2730 };
2731 
2732 /**
2733  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2734  * information
2735  * @osdev: qdf device handle
2736  * @skb: skb buffer
2737  * @tso_info: Parameters common to all segements
2738  *
2739  * Get the TSO information that is common across all the TCP
2740  * segments of the jumbo packet
2741  *
2742  * Return: 0 - success 1 - failure
2743  */
2744 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2745 			struct sk_buff *skb,
2746 			struct qdf_tso_cmn_seg_info_t *tso_info)
2747 {
2748 	/* Get ethernet type and ethernet header length */
2749 	tso_info->ethproto = vlan_get_protocol(skb);
2750 
2751 	/* Determine whether this is an IPv4 or IPv6 packet */
2752 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2753 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2754 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2755 
2756 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2757 		tso_info->ipv4_csum_en = 1;
2758 		tso_info->tcp_ipv4_csum_en = 1;
2759 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2760 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2761 				ipv4_hdr->protocol);
2762 			return 1;
2763 		}
2764 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2765 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2766 		tso_info->tcp_ipv6_csum_en = 1;
2767 	} else {
2768 		qdf_err("TSO: ethertype 0x%x is not supported!",
2769 			tso_info->ethproto);
2770 		return 1;
2771 	}
2772 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2773 	tso_info->tcphdr = tcp_hdr(skb);
2774 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2775 	/* get pointer to the ethernet + IP + TCP header and their length */
2776 	tso_info->eit_hdr = skb->data;
2777 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2778 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2779 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2780 							tso_info->eit_hdr,
2781 							tso_info->eit_hdr_len,
2782 							DMA_TO_DEVICE);
2783 	if (unlikely(dma_mapping_error(osdev->dev,
2784 				       tso_info->eit_hdr_dma_map_addr))) {
2785 		qdf_err("DMA mapping error!");
2786 		qdf_assert(0);
2787 		return 1;
2788 	}
2789 
2790 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2791 		/* inlcude IPv4 header length for IPV4 (total length) */
2792 		tso_info->ip_tcp_hdr_len =
2793 			tso_info->eit_hdr_len - tso_info->l2_len;
2794 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2795 		/* exclude IPv6 header length for IPv6 (payload length) */
2796 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2797 	}
2798 	/*
2799 	 * The length of the payload (application layer data) is added to
2800 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2801 	 * descriptor.
2802 	 */
2803 
2804 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2805 		tso_info->tcp_seq_num,
2806 		tso_info->eit_hdr_len,
2807 		tso_info->l2_len,
2808 		skb->len);
2809 	return 0;
2810 }
2811 
2812 
2813 /**
2814  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2815  *
2816  * @curr_seg: Segment whose contents are initialized
2817  * @tso_cmn_info: Parameters common to all segements
2818  *
2819  * Return: None
2820  */
2821 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2822 				struct qdf_tso_seg_elem_t *curr_seg,
2823 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2824 {
2825 	/* Initialize the flags to 0 */
2826 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2827 
2828 	/*
2829 	 * The following fields remain the same across all segments of
2830 	 * a jumbo packet
2831 	 */
2832 	curr_seg->seg.tso_flags.tso_enable = 1;
2833 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2834 		tso_cmn_info->ipv4_csum_en;
2835 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2836 		tso_cmn_info->tcp_ipv6_csum_en;
2837 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2838 		tso_cmn_info->tcp_ipv4_csum_en;
2839 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2840 
2841 	/* The following fields change for the segments */
2842 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2843 	tso_cmn_info->ip_id++;
2844 
2845 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2846 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2847 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2848 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2849 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2850 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2851 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2852 
2853 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2854 
2855 	/*
2856 	 * First fragment for each segment always contains the ethernet,
2857 	 * IP and TCP header
2858 	 */
2859 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2860 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2861 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2862 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2863 
2864 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2865 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2866 		   tso_cmn_info->eit_hdr_len,
2867 		   curr_seg->seg.tso_flags.tcp_seq_num,
2868 		   curr_seg->seg.total_len);
2869 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2870 }
2871 
2872 /**
2873  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2874  * into segments
2875  * @nbuf: network buffer to be segmented
2876  * @tso_info: This is the output. The information about the
2877  *           TSO segments will be populated within this.
2878  *
2879  * This function fragments a TCP jumbo packet into smaller
2880  * segments to be transmitted by the driver. It chains the TSO
2881  * segments created into a list.
2882  *
2883  * Return: number of TSO segments
2884  */
2885 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2886 		struct qdf_tso_info_t *tso_info)
2887 {
2888 	/* common across all segments */
2889 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2890 	/* segment specific */
2891 	void *tso_frag_vaddr;
2892 	qdf_dma_addr_t tso_frag_paddr = 0;
2893 	uint32_t num_seg = 0;
2894 	struct qdf_tso_seg_elem_t *curr_seg;
2895 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2896 	struct skb_frag_struct *frag = NULL;
2897 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2898 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2899 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2900 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2901 	int j = 0; /* skb fragment index */
2902 
2903 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2904 
2905 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2906 						skb, &tso_cmn_info))) {
2907 		qdf_warn("TSO: error getting common segment info");
2908 		return 0;
2909 	}
2910 
2911 	total_num_seg = tso_info->tso_num_seg_list;
2912 	curr_seg = tso_info->tso_seg_list;
2913 
2914 	/* length of the first chunk of data in the skb */
2915 	skb_frag_len = skb_headlen(skb);
2916 
2917 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2918 	/* update the remaining skb fragment length and TSO segment length */
2919 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2920 	skb_proc -= tso_cmn_info.eit_hdr_len;
2921 
2922 	/* get the address to the next tso fragment */
2923 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2924 	/* get the length of the next tso fragment */
2925 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2926 
2927 	if (tso_frag_len != 0) {
2928 		tso_frag_paddr = dma_map_single(osdev->dev,
2929 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2930 	}
2931 
2932 	if (unlikely(dma_mapping_error(osdev->dev,
2933 					tso_frag_paddr))) {
2934 		qdf_err("DMA mapping error!");
2935 		qdf_assert(0);
2936 		return 0;
2937 	}
2938 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2939 		__LINE__, skb_frag_len, tso_frag_len);
2940 	num_seg = tso_info->num_segs;
2941 	tso_info->num_segs = 0;
2942 	tso_info->is_tso = 1;
2943 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2944 
2945 	while (num_seg && curr_seg) {
2946 		int i = 1; /* tso fragment index */
2947 		uint8_t more_tso_frags = 1;
2948 
2949 		curr_seg->seg.num_frags = 0;
2950 		tso_info->num_segs++;
2951 		total_num_seg->num_seg.tso_cmn_num_seg++;
2952 
2953 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2954 						 &tso_cmn_info);
2955 
2956 		if (unlikely(skb_proc == 0))
2957 			return tso_info->num_segs;
2958 
2959 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2960 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2961 		/* frag len is added to ip_len in while loop below*/
2962 
2963 		curr_seg->seg.num_frags++;
2964 
2965 		while (more_tso_frags) {
2966 			if (tso_frag_len != 0) {
2967 				curr_seg->seg.tso_frags[i].vaddr =
2968 					tso_frag_vaddr;
2969 				curr_seg->seg.tso_frags[i].length =
2970 					tso_frag_len;
2971 				curr_seg->seg.total_len += tso_frag_len;
2972 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2973 				curr_seg->seg.num_frags++;
2974 				skb_proc = skb_proc - tso_frag_len;
2975 
2976 				/* increment the TCP sequence number */
2977 
2978 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2979 				curr_seg->seg.tso_frags[i].paddr =
2980 					tso_frag_paddr;
2981 			}
2982 
2983 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2984 					__func__, __LINE__,
2985 					i,
2986 					tso_frag_len,
2987 					curr_seg->seg.total_len,
2988 					curr_seg->seg.tso_frags[i].vaddr);
2989 
2990 			/* if there is no more data left in the skb */
2991 			if (!skb_proc)
2992 				return tso_info->num_segs;
2993 
2994 			/* get the next payload fragment information */
2995 			/* check if there are more fragments in this segment */
2996 			if (tso_frag_len < tso_seg_size) {
2997 				more_tso_frags = 1;
2998 				if (tso_frag_len != 0) {
2999 					tso_seg_size = tso_seg_size -
3000 						tso_frag_len;
3001 					i++;
3002 					if (curr_seg->seg.num_frags ==
3003 								FRAG_NUM_MAX) {
3004 						more_tso_frags = 0;
3005 						/*
3006 						 * reset i and the tso
3007 						 * payload size
3008 						 */
3009 						i = 1;
3010 						tso_seg_size =
3011 							skb_shinfo(skb)->
3012 								gso_size;
3013 					}
3014 				}
3015 			} else {
3016 				more_tso_frags = 0;
3017 				/* reset i and the tso payload size */
3018 				i = 1;
3019 				tso_seg_size = skb_shinfo(skb)->gso_size;
3020 			}
3021 
3022 			/* if the next fragment is contiguous */
3023 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3024 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3025 				skb_frag_len = skb_frag_len - tso_frag_len;
3026 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3027 
3028 			} else { /* the next fragment is not contiguous */
3029 				if (skb_shinfo(skb)->nr_frags == 0) {
3030 					qdf_info("TSO: nr_frags == 0!");
3031 					qdf_assert(0);
3032 					return 0;
3033 				}
3034 				if (j >= skb_shinfo(skb)->nr_frags) {
3035 					qdf_info("TSO: nr_frags %d j %d",
3036 						 skb_shinfo(skb)->nr_frags, j);
3037 					qdf_assert(0);
3038 					return 0;
3039 				}
3040 				frag = &skb_shinfo(skb)->frags[j];
3041 				skb_frag_len = skb_frag_size(frag);
3042 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3043 				tso_frag_vaddr = skb_frag_address_safe(frag);
3044 				j++;
3045 			}
3046 
3047 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3048 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3049 				tso_seg_size);
3050 
3051 			if (!(tso_frag_vaddr)) {
3052 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3053 						__func__);
3054 				return 0;
3055 			}
3056 
3057 			tso_frag_paddr =
3058 					 dma_map_single(osdev->dev,
3059 						 tso_frag_vaddr,
3060 						 tso_frag_len,
3061 						 DMA_TO_DEVICE);
3062 			if (unlikely(dma_mapping_error(osdev->dev,
3063 							tso_frag_paddr))) {
3064 				qdf_err("DMA mapping error!");
3065 				qdf_assert(0);
3066 				return 0;
3067 			}
3068 		}
3069 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3070 				curr_seg->seg.tso_flags.tcp_seq_num);
3071 		num_seg--;
3072 		/* if TCP FIN flag was set, set it in the last segment */
3073 		if (!num_seg)
3074 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3075 
3076 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3077 		curr_seg = curr_seg->next;
3078 	}
3079 	return tso_info->num_segs;
3080 }
3081 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3082 
3083 /**
3084  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3085  *
3086  * @osdev: qdf device handle
3087  * @tso_seg: TSO segment element to be unmapped
3088  * @is_last_seg: whether this is last tso seg or not
3089  *
3090  * Return: none
3091  */
3092 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3093 			  struct qdf_tso_seg_elem_t *tso_seg,
3094 			  bool is_last_seg)
3095 {
3096 	uint32_t num_frags = 0;
3097 
3098 	if (tso_seg->seg.num_frags > 0)
3099 		num_frags = tso_seg->seg.num_frags - 1;
3100 
3101 	/*Num of frags in a tso seg cannot be less than 2 */
3102 	if (num_frags < 1) {
3103 		qdf_assert(0);
3104 		qdf_err("ERROR: num of frags in a tso segment is %d",
3105 			(num_frags + 1));
3106 		return;
3107 	}
3108 
3109 	while (num_frags) {
3110 		/*Do dma unmap the tso seg except the 0th frag */
3111 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3112 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3113 				num_frags);
3114 			qdf_assert(0);
3115 			return;
3116 		}
3117 		dma_unmap_single(osdev->dev,
3118 				 tso_seg->seg.tso_frags[num_frags].paddr,
3119 				 tso_seg->seg.tso_frags[num_frags].length,
3120 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3121 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3122 		num_frags--;
3123 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3124 	}
3125 
3126 	if (is_last_seg) {
3127 		/*Do dma unmap for the tso seg 0th frag */
3128 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3129 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3130 			qdf_assert(0);
3131 			return;
3132 		}
3133 		dma_unmap_single(osdev->dev,
3134 				 tso_seg->seg.tso_frags[0].paddr,
3135 				 tso_seg->seg.tso_frags[0].length,
3136 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3137 		tso_seg->seg.tso_frags[0].paddr = 0;
3138 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3139 	}
3140 }
3141 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3142 
3143 /**
3144  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3145  * into segments
3146  * @nbuf:   network buffer to be segmented
3147  * @tso_info:  This is the output. The information about the
3148  *      TSO segments will be populated within this.
3149  *
3150  * This function fragments a TCP jumbo packet into smaller
3151  * segments to be transmitted by the driver. It chains the TSO
3152  * segments created into a list.
3153  *
3154  * Return: 0 - success, 1 - failure
3155  */
3156 #ifndef BUILD_X86
3157 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3158 {
3159 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3160 	uint32_t remainder, num_segs = 0;
3161 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3162 	uint8_t frags_per_tso = 0;
3163 	uint32_t skb_frag_len = 0;
3164 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3165 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3166 	struct skb_frag_struct *frag = NULL;
3167 	int j = 0;
3168 	uint32_t temp_num_seg = 0;
3169 
3170 	/* length of the first chunk of data in the skb minus eit header*/
3171 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3172 
3173 	/* Calculate num of segs for skb's first chunk of data*/
3174 	remainder = skb_frag_len % tso_seg_size;
3175 	num_segs = skb_frag_len / tso_seg_size;
3176 	/**
3177 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3178 	 * In that case, one more tso seg is required to accommodate
3179 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3180 	 * then remaining data will be accomodated while doing the calculation
3181 	 * for nr_frags data. Hence, frags_per_tso++.
3182 	 */
3183 	if (remainder) {
3184 		if (!skb_nr_frags)
3185 			num_segs++;
3186 		else
3187 			frags_per_tso++;
3188 	}
3189 
3190 	while (skb_nr_frags) {
3191 		if (j >= skb_shinfo(skb)->nr_frags) {
3192 			qdf_info("TSO: nr_frags %d j %d",
3193 				 skb_shinfo(skb)->nr_frags, j);
3194 			qdf_assert(0);
3195 			return 0;
3196 		}
3197 		/**
3198 		 * Calculate the number of tso seg for nr_frags data:
3199 		 * Get the length of each frag in skb_frag_len, add to
3200 		 * remainder.Get the number of segments by dividing it to
3201 		 * tso_seg_size and calculate the new remainder.
3202 		 * Decrement the nr_frags value and keep
3203 		 * looping all the skb_fragments.
3204 		 */
3205 		frag = &skb_shinfo(skb)->frags[j];
3206 		skb_frag_len = skb_frag_size(frag);
3207 		temp_num_seg = num_segs;
3208 		remainder += skb_frag_len;
3209 		num_segs += remainder / tso_seg_size;
3210 		remainder = remainder % tso_seg_size;
3211 		skb_nr_frags--;
3212 		if (remainder) {
3213 			if (num_segs > temp_num_seg)
3214 				frags_per_tso = 0;
3215 			/**
3216 			 * increment the tso per frags whenever remainder is
3217 			 * positive. If frags_per_tso reaches the (max-1),
3218 			 * [First frags always have EIT header, therefore max-1]
3219 			 * increment the num_segs as no more data can be
3220 			 * accomodated in the curr tso seg. Reset the remainder
3221 			 * and frags per tso and keep looping.
3222 			 */
3223 			frags_per_tso++;
3224 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3225 				num_segs++;
3226 				frags_per_tso = 0;
3227 				remainder = 0;
3228 			}
3229 			/**
3230 			 * If this is the last skb frag and still remainder is
3231 			 * non-zero(frags_per_tso is not reached to the max-1)
3232 			 * then increment the num_segs to take care of the
3233 			 * remaining length.
3234 			 */
3235 			if (!skb_nr_frags && remainder) {
3236 				num_segs++;
3237 				frags_per_tso = 0;
3238 			}
3239 		} else {
3240 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3241 			frags_per_tso = 0;
3242 		}
3243 		j++;
3244 	}
3245 
3246 	return num_segs;
3247 }
3248 #else
3249 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3250 {
3251 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3252 	struct skb_frag_struct *frag = NULL;
3253 
3254 	/*
3255 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3256 	 * region which cannot be accessed by Target
3257 	 */
3258 	if (virt_to_phys(skb->data) < 0x50000040) {
3259 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3260 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3261 				virt_to_phys(skb->data));
3262 		goto fail;
3263 
3264 	}
3265 
3266 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3267 		frag = &skb_shinfo(skb)->frags[i];
3268 
3269 		if (!frag)
3270 			goto fail;
3271 
3272 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3273 			goto fail;
3274 	}
3275 
3276 
3277 	gso_size = skb_shinfo(skb)->gso_size;
3278 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3279 			+ tcp_hdrlen(skb));
3280 	while (tmp_len) {
3281 		num_segs++;
3282 		if (tmp_len > gso_size)
3283 			tmp_len -= gso_size;
3284 		else
3285 			break;
3286 	}
3287 
3288 	return num_segs;
3289 
3290 	/*
3291 	 * Do not free this frame, just do socket level accounting
3292 	 * so that this is not reused.
3293 	 */
3294 fail:
3295 	if (skb->sk)
3296 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3297 
3298 	return 0;
3299 }
3300 #endif
3301 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3302 
3303 #endif /* FEATURE_TSO */
3304 
3305 /**
3306  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3307  *
3308  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3309  *
3310  * Return: N/A
3311  */
3312 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3313 			  uint32_t *lo, uint32_t *hi)
3314 {
3315 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3316 		*lo = lower_32_bits(dmaaddr);
3317 		*hi = upper_32_bits(dmaaddr);
3318 	} else {
3319 		*lo = dmaaddr;
3320 		*hi = 0;
3321 	}
3322 }
3323 
3324 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3325 
3326 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3327 {
3328 	qdf_nbuf_users_inc(&skb->users);
3329 	return skb;
3330 }
3331 qdf_export_symbol(__qdf_nbuf_inc_users);
3332 
3333 int __qdf_nbuf_get_users(struct sk_buff *skb)
3334 {
3335 	return qdf_nbuf_users_read(&skb->users);
3336 }
3337 qdf_export_symbol(__qdf_nbuf_get_users);
3338 
3339 /**
3340  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3341  * @skb: sk_buff handle
3342  *
3343  * Return: none
3344  */
3345 
3346 void __qdf_nbuf_ref(struct sk_buff *skb)
3347 {
3348 	skb_get(skb);
3349 }
3350 qdf_export_symbol(__qdf_nbuf_ref);
3351 
3352 /**
3353  * __qdf_nbuf_shared() - Check whether the buffer is shared
3354  *  @skb: sk_buff buffer
3355  *
3356  *  Return: true if more than one person has a reference to this buffer.
3357  */
3358 int __qdf_nbuf_shared(struct sk_buff *skb)
3359 {
3360 	return skb_shared(skb);
3361 }
3362 qdf_export_symbol(__qdf_nbuf_shared);
3363 
3364 /**
3365  * __qdf_nbuf_dmamap_create() - create a DMA map.
3366  * @osdev: qdf device handle
3367  * @dmap: dma map handle
3368  *
3369  * This can later be used to map networking buffers. They :
3370  * - need space in adf_drv's software descriptor
3371  * - are typically created during adf_drv_create
3372  * - need to be created before any API(qdf_nbuf_map) that uses them
3373  *
3374  * Return: QDF STATUS
3375  */
3376 QDF_STATUS
3377 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3378 {
3379 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3380 	/*
3381 	 * driver can tell its SG capablity, it must be handled.
3382 	 * Bounce buffers if they are there
3383 	 */
3384 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3385 	if (!(*dmap))
3386 		error = QDF_STATUS_E_NOMEM;
3387 
3388 	return error;
3389 }
3390 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3391 /**
3392  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3393  * @osdev: qdf device handle
3394  * @dmap: dma map handle
3395  *
3396  * Return: none
3397  */
3398 void
3399 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3400 {
3401 	kfree(dmap);
3402 }
3403 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3404 
3405 /**
3406  * __qdf_nbuf_map_nbytes_single() - map nbytes
3407  * @osdev: os device
3408  * @buf: buffer
3409  * @dir: direction
3410  * @nbytes: number of bytes
3411  *
3412  * Return: QDF_STATUS
3413  */
3414 #ifdef A_SIMOS_DEVHOST
3415 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3416 		qdf_device_t osdev, struct sk_buff *buf,
3417 		 qdf_dma_dir_t dir, int nbytes)
3418 {
3419 	qdf_dma_addr_t paddr;
3420 
3421 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3422 	return QDF_STATUS_SUCCESS;
3423 }
3424 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3425 #else
3426 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3427 		qdf_device_t osdev, struct sk_buff *buf,
3428 		 qdf_dma_dir_t dir, int nbytes)
3429 {
3430 	qdf_dma_addr_t paddr;
3431 
3432 	/* assume that the OS only provides a single fragment */
3433 	QDF_NBUF_CB_PADDR(buf) = paddr =
3434 		dma_map_single(osdev->dev, buf->data,
3435 			nbytes, __qdf_dma_dir_to_os(dir));
3436 	return dma_mapping_error(osdev->dev, paddr) ?
3437 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3438 }
3439 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3440 #endif
3441 /**
3442  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3443  * @osdev: os device
3444  * @buf: buffer
3445  * @dir: direction
3446  * @nbytes: number of bytes
3447  *
3448  * Return: none
3449  */
3450 #if defined(A_SIMOS_DEVHOST)
3451 void
3452 __qdf_nbuf_unmap_nbytes_single(
3453 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3454 {
3455 }
3456 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3457 
3458 #else
3459 void
3460 __qdf_nbuf_unmap_nbytes_single(
3461 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3462 {
3463 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3464 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3465 		return;
3466 	}
3467 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3468 			nbytes, __qdf_dma_dir_to_os(dir));
3469 }
3470 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3471 #endif
3472 /**
3473  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3474  * @osdev: os device
3475  * @skb: skb handle
3476  * @dir: dma direction
3477  * @nbytes: number of bytes to be mapped
3478  *
3479  * Return: QDF_STATUS
3480  */
3481 #ifdef QDF_OS_DEBUG
3482 QDF_STATUS
3483 __qdf_nbuf_map_nbytes(
3484 	qdf_device_t osdev,
3485 	struct sk_buff *skb,
3486 	qdf_dma_dir_t dir,
3487 	int nbytes)
3488 {
3489 	struct skb_shared_info  *sh = skb_shinfo(skb);
3490 
3491 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3492 
3493 	/*
3494 	 * Assume there's only a single fragment.
3495 	 * To support multiple fragments, it would be necessary to change
3496 	 * adf_nbuf_t to be a separate object that stores meta-info
3497 	 * (including the bus address for each fragment) and a pointer
3498 	 * to the underlying sk_buff.
3499 	 */
3500 	qdf_assert(sh->nr_frags == 0);
3501 
3502 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3503 }
3504 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3505 #else
3506 QDF_STATUS
3507 __qdf_nbuf_map_nbytes(
3508 	qdf_device_t osdev,
3509 	struct sk_buff *skb,
3510 	qdf_dma_dir_t dir,
3511 	int nbytes)
3512 {
3513 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3514 }
3515 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3516 #endif
3517 /**
3518  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3519  * @osdev: OS device
3520  * @skb: skb handle
3521  * @dir: direction
3522  * @nbytes: number of bytes
3523  *
3524  * Return: none
3525  */
3526 void
3527 __qdf_nbuf_unmap_nbytes(
3528 	qdf_device_t osdev,
3529 	struct sk_buff *skb,
3530 	qdf_dma_dir_t dir,
3531 	int nbytes)
3532 {
3533 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3534 
3535 	/*
3536 	 * Assume there's a single fragment.
3537 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3538 	 */
3539 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3540 }
3541 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3542 
3543 /**
3544  * __qdf_nbuf_dma_map_info() - return the dma map info
3545  * @bmap: dma map
3546  * @sg: dma map info
3547  *
3548  * Return: none
3549  */
3550 void
3551 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3552 {
3553 	qdf_assert(bmap->mapped);
3554 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3555 
3556 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3557 			sizeof(struct __qdf_segment));
3558 	sg->nsegs = bmap->nsegs;
3559 }
3560 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3561 /**
3562  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3563  *			specified by the index
3564  * @skb: sk buff
3565  * @sg: scatter/gather list of all the frags
3566  *
3567  * Return: none
3568  */
3569 #if defined(__QDF_SUPPORT_FRAG_MEM)
3570 void
3571 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3572 {
3573 	qdf_assert(skb != NULL);
3574 	sg->sg_segs[0].vaddr = skb->data;
3575 	sg->sg_segs[0].len   = skb->len;
3576 	sg->nsegs            = 1;
3577 
3578 	for (int i = 1; i <= sh->nr_frags; i++) {
3579 		skb_frag_t    *f        = &sh->frags[i - 1];
3580 
3581 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3582 			f->page_offset);
3583 		sg->sg_segs[i].len      = f->size;
3584 
3585 		qdf_assert(i < QDF_MAX_SGLIST);
3586 	}
3587 	sg->nsegs += i;
3588 
3589 }
3590 qdf_export_symbol(__qdf_nbuf_frag_info);
3591 #else
3592 #ifdef QDF_OS_DEBUG
3593 void
3594 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3595 {
3596 
3597 	struct skb_shared_info  *sh = skb_shinfo(skb);
3598 
3599 	qdf_assert(skb != NULL);
3600 	sg->sg_segs[0].vaddr = skb->data;
3601 	sg->sg_segs[0].len   = skb->len;
3602 	sg->nsegs            = 1;
3603 
3604 	qdf_assert(sh->nr_frags == 0);
3605 }
3606 qdf_export_symbol(__qdf_nbuf_frag_info);
3607 #else
3608 void
3609 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3610 {
3611 	sg->sg_segs[0].vaddr = skb->data;
3612 	sg->sg_segs[0].len   = skb->len;
3613 	sg->nsegs            = 1;
3614 }
3615 qdf_export_symbol(__qdf_nbuf_frag_info);
3616 #endif
3617 #endif
3618 /**
3619  * __qdf_nbuf_get_frag_size() - get frag size
3620  * @nbuf: sk buffer
3621  * @cur_frag: current frag
3622  *
3623  * Return: frag size
3624  */
3625 uint32_t
3626 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3627 {
3628 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3629 	const skb_frag_t *frag = sh->frags + cur_frag;
3630 
3631 	return skb_frag_size(frag);
3632 }
3633 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3634 
3635 /**
3636  * __qdf_nbuf_frag_map() - dma map frag
3637  * @osdev: os device
3638  * @nbuf: sk buff
3639  * @offset: offset
3640  * @dir: direction
3641  * @cur_frag: current fragment
3642  *
3643  * Return: QDF status
3644  */
3645 #ifdef A_SIMOS_DEVHOST
3646 QDF_STATUS __qdf_nbuf_frag_map(
3647 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3648 	int offset, qdf_dma_dir_t dir, int cur_frag)
3649 {
3650 	int32_t paddr, frag_len;
3651 
3652 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3653 	return QDF_STATUS_SUCCESS;
3654 }
3655 qdf_export_symbol(__qdf_nbuf_frag_map);
3656 #else
3657 QDF_STATUS __qdf_nbuf_frag_map(
3658 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3659 	int offset, qdf_dma_dir_t dir, int cur_frag)
3660 {
3661 	dma_addr_t paddr, frag_len;
3662 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3663 	const skb_frag_t *frag = sh->frags + cur_frag;
3664 
3665 	frag_len = skb_frag_size(frag);
3666 
3667 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3668 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3669 					__qdf_dma_dir_to_os(dir));
3670 	return dma_mapping_error(osdev->dev, paddr) ?
3671 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3672 }
3673 qdf_export_symbol(__qdf_nbuf_frag_map);
3674 #endif
3675 /**
3676  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3677  * @dmap: dma map
3678  * @cb: callback
3679  * @arg: argument
3680  *
3681  * Return: none
3682  */
3683 void
3684 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3685 {
3686 	return;
3687 }
3688 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3689 
3690 
3691 /**
3692  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3693  * @osdev: os device
3694  * @buf: sk buff
3695  * @dir: direction
3696  *
3697  * Return: none
3698  */
3699 #if defined(A_SIMOS_DEVHOST)
3700 static void __qdf_nbuf_sync_single_for_cpu(
3701 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3702 {
3703 	return;
3704 }
3705 #else
3706 static void __qdf_nbuf_sync_single_for_cpu(
3707 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3708 {
3709 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3710 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3711 		return;
3712 	}
3713 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3714 		skb_end_offset(buf) - skb_headroom(buf),
3715 		__qdf_dma_dir_to_os(dir));
3716 }
3717 #endif
3718 /**
3719  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3720  * @osdev: os device
3721  * @skb: sk buff
3722  * @dir: direction
3723  *
3724  * Return: none
3725  */
3726 void
3727 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3728 	struct sk_buff *skb, qdf_dma_dir_t dir)
3729 {
3730 	qdf_assert(
3731 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3732 
3733 	/*
3734 	 * Assume there's a single fragment.
3735 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3736 	 */
3737 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3738 }
3739 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3740 
3741 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3742 /**
3743  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3744  * @rx_status: Pointer to rx_status.
3745  * @rtap_buf: Buf to which VHT info has to be updated.
3746  * @rtap_len: Current length of radiotap buffer
3747  *
3748  * Return: Length of radiotap after VHT flags updated.
3749  */
3750 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3751 					struct mon_rx_status *rx_status,
3752 					int8_t *rtap_buf,
3753 					uint32_t rtap_len)
3754 {
3755 	uint16_t vht_flags = 0;
3756 
3757 	rtap_len = qdf_align(rtap_len, 2);
3758 
3759 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3760 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3761 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3762 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3763 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3764 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3765 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3766 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3767 	rtap_len += 2;
3768 
3769 	rtap_buf[rtap_len] |=
3770 		(rx_status->is_stbc ?
3771 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3772 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3773 		(rx_status->ldpc ?
3774 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3775 		(rx_status->beamformed ?
3776 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3777 	rtap_len += 1;
3778 	switch (rx_status->vht_flag_values2) {
3779 	case IEEE80211_RADIOTAP_VHT_BW_20:
3780 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3781 		break;
3782 	case IEEE80211_RADIOTAP_VHT_BW_40:
3783 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3784 		break;
3785 	case IEEE80211_RADIOTAP_VHT_BW_80:
3786 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3787 		break;
3788 	case IEEE80211_RADIOTAP_VHT_BW_160:
3789 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3790 		break;
3791 	}
3792 	rtap_len += 1;
3793 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3794 	rtap_len += 1;
3795 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3796 	rtap_len += 1;
3797 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3798 	rtap_len += 1;
3799 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3800 	rtap_len += 1;
3801 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3802 	rtap_len += 1;
3803 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3804 	rtap_len += 1;
3805 	put_unaligned_le16(rx_status->vht_flag_values6,
3806 			   &rtap_buf[rtap_len]);
3807 	rtap_len += 2;
3808 
3809 	return rtap_len;
3810 }
3811 
3812 /**
3813  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3814  * @rx_status: Pointer to rx_status.
3815  * @rtap_buf: buffer to which radiotap has to be updated
3816  * @rtap_len: radiotap length
3817  *
3818  * API update high-efficiency (11ax) fields in the radiotap header
3819  *
3820  * Return: length of rtap_len updated.
3821  */
3822 static unsigned int
3823 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3824 				     int8_t *rtap_buf, uint32_t rtap_len)
3825 {
3826 	/*
3827 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3828 	 * Enable all "known" HE radiotap flags for now
3829 	 */
3830 	rtap_len = qdf_align(rtap_len, 2);
3831 
3832 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3833 	rtap_len += 2;
3834 
3835 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3836 	rtap_len += 2;
3837 
3838 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3839 	rtap_len += 2;
3840 
3841 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3842 	rtap_len += 2;
3843 
3844 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3845 	rtap_len += 2;
3846 
3847 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3848 	rtap_len += 2;
3849 	qdf_debug("he data %x %x %x %x %x %x",
3850 		  rx_status->he_data1,
3851 		  rx_status->he_data2, rx_status->he_data3,
3852 		  rx_status->he_data4, rx_status->he_data5,
3853 		  rx_status->he_data6);
3854 	return rtap_len;
3855 }
3856 
3857 
3858 /**
3859  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3860  * @rx_status: Pointer to rx_status.
3861  * @rtap_buf: buffer to which radiotap has to be updated
3862  * @rtap_len: radiotap length
3863  *
3864  * API update HE-MU fields in the radiotap header
3865  *
3866  * Return: length of rtap_len updated.
3867  */
3868 static unsigned int
3869 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3870 				     int8_t *rtap_buf, uint32_t rtap_len)
3871 {
3872 	rtap_len = qdf_align(rtap_len, 2);
3873 
3874 	/*
3875 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3876 	 * Enable all "known" he-mu radiotap flags for now
3877 	 */
3878 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3879 	rtap_len += 2;
3880 
3881 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3882 	rtap_len += 2;
3883 
3884 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3885 	rtap_len += 1;
3886 
3887 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3888 	rtap_len += 1;
3889 
3890 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3891 	rtap_len += 1;
3892 
3893 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3894 	rtap_len += 1;
3895 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3896 		  rx_status->he_flags1,
3897 		  rx_status->he_flags2, rx_status->he_RU[0],
3898 		  rx_status->he_RU[1], rx_status->he_RU[2],
3899 		  rx_status->he_RU[3]);
3900 
3901 	return rtap_len;
3902 }
3903 
3904 /**
3905  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3906  * @rx_status: Pointer to rx_status.
3907  * @rtap_buf: buffer to which radiotap has to be updated
3908  * @rtap_len: radiotap length
3909  *
3910  * API update he-mu-other fields in the radiotap header
3911  *
3912  * Return: length of rtap_len updated.
3913  */
3914 static unsigned int
3915 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3916 				     int8_t *rtap_buf, uint32_t rtap_len)
3917 {
3918 	rtap_len = qdf_align(rtap_len, 2);
3919 
3920 	/*
3921 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3922 	 * Enable all "known" he-mu-other radiotap flags for now
3923 	 */
3924 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3925 	rtap_len += 2;
3926 
3927 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3928 	rtap_len += 2;
3929 
3930 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3931 	rtap_len += 1;
3932 
3933 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3934 	rtap_len += 1;
3935 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3936 		  rx_status->he_per_user_1,
3937 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3938 		  rx_status->he_per_user_known);
3939 	return rtap_len;
3940 }
3941 
3942 
3943 /**
3944  * This is the length for radiotap, combined length
3945  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3946  * cannot be more than available headroom_sz.
3947  * increase this when we add more radiotap elements.
3948  * Number after '+' indicates maximum possible increase due to alignment
3949  */
3950 
3951 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3952 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3953 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3954 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3955 #define RADIOTAP_FIXED_HEADER_LEN 17
3956 #define RADIOTAP_HT_FLAGS_LEN 3
3957 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3958 #define RADIOTAP_VENDOR_NS_LEN \
3959 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3960 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3961 				RADIOTAP_FIXED_HEADER_LEN + \
3962 				RADIOTAP_HT_FLAGS_LEN + \
3963 				RADIOTAP_VHT_FLAGS_LEN + \
3964 				RADIOTAP_AMPDU_STATUS_LEN + \
3965 				RADIOTAP_HE_FLAGS_LEN + \
3966 				RADIOTAP_HE_MU_FLAGS_LEN + \
3967 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
3968 				RADIOTAP_VENDOR_NS_LEN)
3969 
3970 #define IEEE80211_RADIOTAP_HE 23
3971 #define IEEE80211_RADIOTAP_HE_MU	24
3972 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3973 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
3974 
3975 /**
3976  * radiotap_num_to_freq() - Get frequency from chan number
3977  * @chan_num - Input channel number
3978  *
3979  * Return - Channel frequency in Mhz
3980  */
3981 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3982 {
3983 	if (chan_num == CHANNEL_NUM_14)
3984 		return CHANNEL_FREQ_2484;
3985 	if (chan_num < CHANNEL_NUM_14)
3986 		return CHANNEL_FREQ_2407 +
3987 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3988 
3989 	if (chan_num < CHANNEL_NUM_27)
3990 		return CHANNEL_FREQ_2512 +
3991 			((chan_num - CHANNEL_NUM_15) *
3992 			 FREQ_MULTIPLIER_CONST_20MHZ);
3993 
3994 	if (chan_num > CHANNEL_NUM_182 &&
3995 			chan_num < CHANNEL_NUM_197)
3996 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3997 			CHANNEL_FREQ_4000);
3998 
3999 	return CHANNEL_FREQ_5000 +
4000 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4001 }
4002 
4003 /**
4004  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4005  * @rx_status: Pointer to rx_status.
4006  * @rtap_buf: Buf to which AMPDU info has to be updated.
4007  * @rtap_len: Current length of radiotap buffer
4008  *
4009  * Return: Length of radiotap after AMPDU flags updated.
4010  */
4011 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4012 					struct mon_rx_status *rx_status,
4013 					uint8_t *rtap_buf,
4014 					uint32_t rtap_len)
4015 {
4016 	/*
4017 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4018 	 * First 32 bits of AMPDU represents the reference number
4019 	 */
4020 
4021 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4022 	uint16_t ampdu_flags = 0;
4023 	uint16_t ampdu_reserved_flags = 0;
4024 
4025 	rtap_len = qdf_align(rtap_len, 4);
4026 
4027 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4028 	rtap_len += 4;
4029 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4030 	rtap_len += 2;
4031 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4032 	rtap_len += 2;
4033 
4034 	return rtap_len;
4035 }
4036 
4037 /**
4038  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4039  * @rx_status: Pointer to rx_status.
4040  * @nbuf:      nbuf pointer to which radiotap has to be updated
4041  * @headroom_sz: Available headroom size.
4042  *
4043  * Return: length of rtap_len updated.
4044  */
4045 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4046 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4047 {
4048 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4049 	struct ieee80211_radiotap_header *rthdr =
4050 		(struct ieee80211_radiotap_header *)rtap_buf;
4051 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4052 	uint32_t rtap_len = rtap_hdr_len;
4053 	uint8_t length = rtap_len;
4054 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4055 
4056 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4057 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4058 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4059 	rtap_len += 8;
4060 
4061 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4062 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4063 
4064 	if (rx_status->rs_fcs_err)
4065 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4066 
4067 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4068 	rtap_len += 1;
4069 
4070 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4071 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4072 	    !rx_status->he_flags) {
4073 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4074 		rtap_buf[rtap_len] = rx_status->rate;
4075 	} else
4076 		rtap_buf[rtap_len] = 0;
4077 	rtap_len += 1;
4078 
4079 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4080 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4081 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4082 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4083 	rtap_len += 2;
4084 	/* Channel flags. */
4085 	if (rx_status->chan_num > CHANNEL_NUM_35)
4086 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4087 	else
4088 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4089 	if (rx_status->cck_flag)
4090 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4091 	if (rx_status->ofdm_flag)
4092 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4093 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4094 	rtap_len += 2;
4095 
4096 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4097 	 *					(dBm)
4098 	 */
4099 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4100 	/*
4101 	 * rssi_comb is int dB, need to convert it to dBm.
4102 	 * normalize value to noise floor of -96 dBm
4103 	 */
4104 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4105 	rtap_len += 1;
4106 
4107 	/* RX signal noise floor */
4108 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4109 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4110 	rtap_len += 1;
4111 
4112 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4113 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4114 	rtap_buf[rtap_len] = rx_status->nr_ant;
4115 	rtap_len += 1;
4116 
4117 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4118 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4119 		return 0;
4120 	}
4121 
4122 	if (rx_status->ht_flags) {
4123 		length = rtap_len;
4124 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4125 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4126 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4127 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4128 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4129 		rtap_len += 1;
4130 
4131 		if (rx_status->sgi)
4132 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4133 		if (rx_status->bw)
4134 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4135 		else
4136 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4137 		rtap_len += 1;
4138 
4139 		rtap_buf[rtap_len] = rx_status->mcs;
4140 		rtap_len += 1;
4141 
4142 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4143 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4144 			return 0;
4145 		}
4146 	}
4147 
4148 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4149 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4150 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4151 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4152 								rtap_buf,
4153 								rtap_len);
4154 	}
4155 
4156 	if (rx_status->vht_flags) {
4157 		length = rtap_len;
4158 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4159 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4160 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4161 								rtap_buf,
4162 								rtap_len);
4163 
4164 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4165 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4166 			return 0;
4167 		}
4168 	}
4169 
4170 	if (rx_status->he_flags) {
4171 		length = rtap_len;
4172 		/* IEEE80211_RADIOTAP_HE */
4173 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4174 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4175 								rtap_buf,
4176 								rtap_len);
4177 
4178 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4179 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4180 			return 0;
4181 		}
4182 	}
4183 
4184 	if (rx_status->he_mu_flags) {
4185 		length = rtap_len;
4186 		/* IEEE80211_RADIOTAP_HE-MU */
4187 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4188 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4189 								rtap_buf,
4190 								rtap_len);
4191 
4192 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4193 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4194 			return 0;
4195 		}
4196 	}
4197 
4198 	if (rx_status->he_mu_other_flags) {
4199 		length = rtap_len;
4200 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4201 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4202 		rtap_len =
4203 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4204 								rtap_buf,
4205 								rtap_len);
4206 
4207 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4208 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4209 			return 0;
4210 		}
4211 	}
4212 
4213 	rtap_len = qdf_align(rtap_len, 2);
4214 	/*
4215 	 * Radiotap Vendor Namespace
4216 	 */
4217 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4218 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4219 					(rtap_buf + rtap_len);
4220 	/*
4221 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4222 	 */
4223 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4224 	/*
4225 	 * Name space selector = 0
4226 	 * We only will have one namespace for now
4227 	 */
4228 	radiotap_vendor_ns_ath->hdr.selector = 0;
4229 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4230 					sizeof(*radiotap_vendor_ns_ath) -
4231 					sizeof(radiotap_vendor_ns_ath->hdr));
4232 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4233 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4234 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4235 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4236 				cpu_to_le32(rx_status->ppdu_timestamp);
4237 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4238 
4239 	rthdr->it_len = cpu_to_le16(rtap_len);
4240 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4241 
4242 	if (headroom_sz < rtap_len) {
4243 		qdf_err("ERROR: not enough space to update radiotap");
4244 		return 0;
4245 	}
4246 	qdf_nbuf_push_head(nbuf, rtap_len);
4247 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4248 	return rtap_len;
4249 }
4250 #else
4251 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4252 					struct mon_rx_status *rx_status,
4253 					int8_t *rtap_buf,
4254 					uint32_t rtap_len)
4255 {
4256 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4257 	return 0;
4258 }
4259 
4260 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4261 				      int8_t *rtap_buf, uint32_t rtap_len)
4262 {
4263 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4264 	return 0;
4265 }
4266 
4267 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4268 					struct mon_rx_status *rx_status,
4269 					uint8_t *rtap_buf,
4270 					uint32_t rtap_len)
4271 {
4272 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4273 	return 0;
4274 }
4275 
4276 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4277 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4278 {
4279 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4280 	return 0;
4281 }
4282 #endif
4283 qdf_export_symbol(qdf_nbuf_update_radiotap);
4284 
4285 /**
4286  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4287  * @cb_func_ptr: function pointer to the nbuf free callback
4288  *
4289  * This function registers a callback function for nbuf free.
4290  *
4291  * Return: none
4292  */
4293 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4294 {
4295 	nbuf_free_cb = cb_func_ptr;
4296 }
4297 
4298 /**
4299  * qdf_nbuf_classify_pkt() - classify packet
4300  * @skb - sk buff
4301  *
4302  * Return: none
4303  */
4304 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4305 {
4306 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4307 
4308 	/* check destination mac address is broadcast/multicast */
4309 	if (is_broadcast_ether_addr((uint8_t *)eh))
4310 		QDF_NBUF_CB_SET_BCAST(skb);
4311 	else if (is_multicast_ether_addr((uint8_t *)eh))
4312 		QDF_NBUF_CB_SET_MCAST(skb);
4313 
4314 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4315 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4316 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4317 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4318 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4319 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4320 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4321 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4322 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4323 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4324 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4325 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4326 }
4327 qdf_export_symbol(qdf_nbuf_classify_pkt);
4328 
4329 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4330 {
4331 	qdf_nbuf_users_set(&nbuf->users, 1);
4332 	nbuf->data = nbuf->head + NET_SKB_PAD;
4333 	skb_reset_tail_pointer(nbuf);
4334 }
4335 qdf_export_symbol(__qdf_nbuf_init);
4336 
4337 #ifdef WLAN_FEATURE_FASTPATH
4338 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4339 {
4340 	qdf_nbuf_users_set(&nbuf->users, 1);
4341 	nbuf->data = nbuf->head + NET_SKB_PAD;
4342 	skb_reset_tail_pointer(nbuf);
4343 }
4344 qdf_export_symbol(qdf_nbuf_init_fast);
4345 #endif /* WLAN_FEATURE_FASTPATH */
4346 
4347 
4348 #ifdef QDF_NBUF_GLOBAL_COUNT
4349 /**
4350  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4351  *
4352  * Return void
4353  */
4354 void __qdf_nbuf_mod_init(void)
4355 {
4356 	qdf_atomic_init(&nbuf_count);
4357 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4358 }
4359 
4360 /**
4361  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4362  *
4363  * Return void
4364  */
4365 void __qdf_nbuf_mod_exit(void)
4366 {
4367 }
4368 #endif
4369