xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 #include "qdf_str.h"
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* channel number to freq conversion */
78 #define CHANNEL_NUM_14 14
79 #define CHANNEL_NUM_15 15
80 #define CHANNEL_NUM_27 27
81 #define CHANNEL_NUM_35 35
82 #define CHANNEL_NUM_182 182
83 #define CHANNEL_NUM_197 197
84 #define CHANNEL_FREQ_2484 2484
85 #define CHANNEL_FREQ_2407 2407
86 #define CHANNEL_FREQ_2512 2512
87 #define CHANNEL_FREQ_5000 5000
88 #define CHANNEL_FREQ_4000 4000
89 #define FREQ_MULTIPLIER_CONST_5MHZ 5
90 #define FREQ_MULTIPLIER_CONST_20MHZ 20
91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
93 #define RADIOTAP_CCK_CHANNEL 0x0020
94 #define RADIOTAP_OFDM_CHANNEL 0x0040
95 
96 #ifdef CONFIG_MCL
97 #include <qdf_mc_timer.h>
98 
99 struct qdf_track_timer {
100 	qdf_mc_timer_t track_timer;
101 	qdf_atomic_t alloc_fail_cnt;
102 };
103 
104 static struct qdf_track_timer alloc_track_timer;
105 
106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
108 #endif
109 
110 /* Packet Counter */
111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
113 #ifdef QDF_NBUF_GLOBAL_COUNT
114 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
115 static qdf_atomic_t nbuf_count;
116 #endif
117 
118 /**
119  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
120  *
121  * Return: none
122  */
123 void qdf_nbuf_tx_desc_count_display(void)
124 {
125 	qdf_debug("Current Snapshot of the Driver:");
126 	qdf_debug("Data Packets:");
127 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
129 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
138 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
146 	qdf_debug("Mgmt Packets:");
147 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
161 }
162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
163 
164 /**
165  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
166  * @packet_type   : packet type either mgmt/data
167  * @current_state : layer at which the packet currently present
168  *
169  * Return: none
170  */
171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
172 			uint8_t current_state)
173 {
174 	switch (packet_type) {
175 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
176 		nbuf_tx_mgmt[current_state]++;
177 		break;
178 	case QDF_NBUF_TX_PKT_DATA_TRACK:
179 		nbuf_tx_data[current_state]++;
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
186 
187 /**
188  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_clear(void)
193 {
194 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
195 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
196 }
197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
198 
199 /**
200  * qdf_nbuf_set_state() - Updates the packet state
201  * @nbuf:            network buffer
202  * @current_state :  layer at which the packet currently is
203  *
204  * This function updates the packet state to the layer at which the packet
205  * currently is
206  *
207  * Return: none
208  */
209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
210 {
211 	/*
212 	 * Only Mgmt, Data Packets are tracked. WMI messages
213 	 * such as scan commands are not tracked
214 	 */
215 	uint8_t packet_type;
216 
217 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
218 
219 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
220 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
221 		return;
222 	}
223 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
224 	qdf_nbuf_tx_desc_count_update(packet_type,
225 					current_state);
226 }
227 qdf_export_symbol(qdf_nbuf_set_state);
228 
229 #ifdef CONFIG_MCL
230 /**
231  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
232  *
233  * This function starts the alloc fail replenish timer.
234  *
235  * Return: void
236  */
237 static void __qdf_nbuf_start_replenish_timer(void)
238 {
239 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
240 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
241 	    QDF_TIMER_STATE_RUNNING)
242 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
243 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
244 }
245 
246 /**
247  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
248  *
249  * This function stops the alloc fail replenish timer.
250  *
251  * Return: void
252  */
253 static void __qdf_nbuf_stop_replenish_timer(void)
254 {
255 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
256 		return;
257 
258 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
259 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
260 	    QDF_TIMER_STATE_RUNNING)
261 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
262 }
263 
264 /**
265  * qdf_replenish_expire_handler - Replenish expire handler
266  *
267  * This function triggers when the alloc fail replenish timer expires.
268  *
269  * Return: void
270  */
271 static void qdf_replenish_expire_handler(void *arg)
272 {
273 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
274 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
275 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
276 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
277 
278 		/* Error handling here */
279 	}
280 }
281 
282 /**
283  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
284  *
285  * This function initializes the nbuf alloc fail replenish timer.
286  *
287  * Return: void
288  */
289 void __qdf_nbuf_init_replenish_timer(void)
290 {
291 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
292 			  qdf_replenish_expire_handler, NULL);
293 }
294 
295 /**
296  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
297  *
298  * This function deinitializes the nbuf alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 void __qdf_nbuf_deinit_replenish_timer(void)
303 {
304 	__qdf_nbuf_stop_replenish_timer();
305 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
306 }
307 #else
308 
309 static inline void __qdf_nbuf_start_replenish_timer(void) {}
310 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
311 #endif
312 
313 /* globals do not need to be initialized to NULL/0 */
314 qdf_nbuf_trace_update_t qdf_trace_update_cb;
315 qdf_nbuf_free_t nbuf_free_cb;
316 
317 #ifdef QDF_NBUF_GLOBAL_COUNT
318 
319 /**
320  * __qdf_nbuf_count_get() - get nbuf global count
321  *
322  * Return: nbuf global count
323  */
324 int __qdf_nbuf_count_get(void)
325 {
326 	return qdf_atomic_read(&nbuf_count);
327 }
328 qdf_export_symbol(__qdf_nbuf_count_get);
329 
330 /**
331  * __qdf_nbuf_count_inc() - increment nbuf global count
332  *
333  * @buf: sk buff
334  *
335  * Return: void
336  */
337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
338 {
339 	qdf_atomic_inc(&nbuf_count);
340 }
341 qdf_export_symbol(__qdf_nbuf_count_inc);
342 
343 /**
344  * __qdf_nbuf_count_dec() - decrement nbuf global count
345  *
346  * @buf: sk buff
347  *
348  * Return: void
349  */
350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
351 {
352 	qdf_atomic_dec(&nbuf_count);
353 }
354 qdf_export_symbol(__qdf_nbuf_count_dec);
355 #endif
356 
357 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
358 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
359 				 int align, int prio, const char *func,
360 				 uint32_t line)
361 {
362 	struct sk_buff *skb;
363 	unsigned long offset;
364 	uint32_t lowmem_alloc_tries = 0;
365 
366 	if (align)
367 		size += (align - 1);
368 
369 realloc:
370 	skb = dev_alloc_skb(size);
371 
372 	if (skb)
373 		goto skb_alloc;
374 
375 	skb = pld_nbuf_pre_alloc(size);
376 
377 	if (!skb) {
378 		qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
379 			     size, func, line);
380 		return NULL;
381 	}
382 
383 skb_alloc:
384 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
385 	 * Though we are trying to reserve low memory upfront to prevent this,
386 	 * we sometimes see SKBs allocated from low memory.
387 	 */
388 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
389 		lowmem_alloc_tries++;
390 		if (lowmem_alloc_tries > 100) {
391 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
392 				     size, func, line);
393 			return NULL;
394 		} else {
395 			/* Not freeing to make sure it
396 			 * will not get allocated again
397 			 */
398 			goto realloc;
399 		}
400 	}
401 	memset(skb->cb, 0x0, sizeof(skb->cb));
402 
403 	/*
404 	 * The default is for netbuf fragments to be interpreted
405 	 * as wordstreams rather than bytestreams.
406 	 */
407 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
408 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
409 
410 	/*
411 	 * XXX:how about we reserve first then align
412 	 * Align & make sure that the tail & data are adjusted properly
413 	 */
414 
415 	if (align) {
416 		offset = ((unsigned long)skb->data) % align;
417 		if (offset)
418 			skb_reserve(skb, align - offset);
419 	}
420 
421 	/*
422 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
423 	 * pointer
424 	 */
425 	skb_reserve(skb, reserve);
426 	qdf_nbuf_count_inc(skb);
427 
428 	return skb;
429 }
430 #else
431 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
432 				 int align, int prio, const char *func,
433 				 uint32_t line)
434 {
435 	struct sk_buff *skb;
436 	unsigned long offset;
437 	int flags = GFP_KERNEL;
438 
439 	if (align)
440 		size += (align - 1);
441 
442 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
443 		flags = GFP_ATOMIC;
444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
445 		/*
446 		 * Observed that kcompactd burns out CPU to make order-3 page.
447 		 *__netdev_alloc_skb has 4k page fallback option just in case of
448 		 * failing high order page allocation so we don't need to be
449 		 * hard. Make kcompactd rest in piece.
450 		 */
451 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
452 #endif
453 	}
454 
455 	skb = __netdev_alloc_skb(NULL, size, flags);
456 
457 	if (skb)
458 		goto skb_alloc;
459 
460 	skb = pld_nbuf_pre_alloc(size);
461 
462 	if (!skb) {
463 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
464 				size, func, line);
465 		__qdf_nbuf_start_replenish_timer();
466 		return NULL;
467 	} else {
468 		__qdf_nbuf_stop_replenish_timer();
469 	}
470 
471 skb_alloc:
472 	memset(skb->cb, 0x0, sizeof(skb->cb));
473 
474 	/*
475 	 * The default is for netbuf fragments to be interpreted
476 	 * as wordstreams rather than bytestreams.
477 	 */
478 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
479 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
480 
481 	/*
482 	 * XXX:how about we reserve first then align
483 	 * Align & make sure that the tail & data are adjusted properly
484 	 */
485 
486 	if (align) {
487 		offset = ((unsigned long)skb->data) % align;
488 		if (offset)
489 			skb_reserve(skb, align - offset);
490 	}
491 
492 	/*
493 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
494 	 * pointer
495 	 */
496 	skb_reserve(skb, reserve);
497 	qdf_nbuf_count_inc(skb);
498 
499 	return skb;
500 }
501 #endif
502 qdf_export_symbol(__qdf_nbuf_alloc);
503 
504 /**
505  * __qdf_nbuf_free() - free the nbuf its interrupt safe
506  * @skb: Pointer to network buffer
507  *
508  * Return: none
509  */
510 
511 #ifdef CONFIG_MCL
512 void __qdf_nbuf_free(struct sk_buff *skb)
513 {
514 	if (pld_nbuf_pre_alloc_free(skb))
515 		return;
516 
517 	qdf_nbuf_count_dec(skb);
518 	if (nbuf_free_cb)
519 		nbuf_free_cb(skb);
520 	else
521 		dev_kfree_skb_any(skb);
522 }
523 #else
524 void __qdf_nbuf_free(struct sk_buff *skb)
525 {
526 	if (pld_nbuf_pre_alloc_free(skb))
527 		return;
528 
529 	qdf_nbuf_count_dec(skb);
530 	dev_kfree_skb_any(skb);
531 }
532 #endif
533 
534 qdf_export_symbol(__qdf_nbuf_free);
535 
536 #ifdef NBUF_MEMORY_DEBUG
537 enum qdf_nbuf_event_type {
538 	QDF_NBUF_ALLOC,
539 	QDF_NBUF_ALLOC_FAILURE,
540 	QDF_NBUF_FREE,
541 	QDF_NBUF_MAP,
542 	QDF_NBUF_UNMAP,
543 };
544 
545 struct qdf_nbuf_event {
546 	qdf_nbuf_t nbuf;
547 	char file[QDF_MEM_FILE_NAME_SIZE];
548 	uint32_t line;
549 	enum qdf_nbuf_event_type type;
550 	uint64_t timestamp;
551 };
552 
553 #define QDF_NBUF_HISTORY_SIZE 4096
554 static qdf_atomic_t qdf_nbuf_history_index;
555 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
556 
557 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
558 {
559 	int32_t next = qdf_atomic_inc_return(index);
560 
561 	if (next == size)
562 		qdf_atomic_sub(size, index);
563 
564 	return next % size;
565 }
566 
567 static void
568 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
569 		     enum qdf_nbuf_event_type type)
570 {
571 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
572 						   QDF_NBUF_HISTORY_SIZE);
573 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
574 
575 	event->nbuf = nbuf;
576 	qdf_str_lcopy(event->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
577 	event->line = line;
578 	event->type = type;
579 	event->timestamp = qdf_get_log_timestamp();
580 }
581 #endif /* NBUF_MEMORY_DEBUG */
582 
583 #ifdef NBUF_MAP_UNMAP_DEBUG
584 struct qdf_nbuf_map_metadata {
585 	struct hlist_node node;
586 	qdf_nbuf_t nbuf;
587 	char file[QDF_MEM_FILE_NAME_SIZE];
588 	uint32_t line;
589 };
590 
591 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
592 			 sizeof(struct qdf_nbuf_map_metadata), 0);
593 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
594 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
595 static qdf_spinlock_t qdf_nbuf_map_lock;
596 
597 static void qdf_nbuf_map_tracking_init(void)
598 {
599 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
600 	hash_init(qdf_nbuf_map_ht);
601 	qdf_spinlock_create(&qdf_nbuf_map_lock);
602 }
603 
604 void qdf_nbuf_map_check_for_leaks(void)
605 {
606 	struct qdf_nbuf_map_metadata *meta;
607 	int bucket;
608 	uint32_t count = 0;
609 	bool is_empty;
610 
611 	qdf_flex_mem_release(&qdf_nbuf_map_pool);
612 
613 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
614 	is_empty = hash_empty(qdf_nbuf_map_ht);
615 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
616 
617 	if (is_empty)
618 		return;
619 
620 	qdf_err("Nbuf map without unmap events detected!");
621 	qdf_err("------------------------------------------------------------");
622 
623 	/* Hold the lock for the entire iteration for safe list/meta access. We
624 	 * are explicitly preferring the chance to watchdog on the print, over
625 	 * the posibility of invalid list/memory access. Since we are going to
626 	 * panic anyway, the worst case is loading up the crash dump to find out
627 	 * what was in the hash table.
628 	 */
629 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
630 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
631 		count++;
632 		qdf_err("0x%pk @ %s:%u",
633 			meta->nbuf, meta->file, meta->line);
634 	}
635 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
636 
637 	panic("%u fatal nbuf map without unmap events detected!", count);
638 }
639 
640 static void qdf_nbuf_map_tracking_deinit(void)
641 {
642 	qdf_nbuf_map_check_for_leaks();
643 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
644 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
645 }
646 
647 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
648 {
649 	struct qdf_nbuf_map_metadata *meta;
650 
651 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
652 		if (meta->nbuf == nbuf)
653 			return meta;
654 	}
655 
656 	return NULL;
657 }
658 
659 static QDF_STATUS
660 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
661 {
662 	struct qdf_nbuf_map_metadata *meta;
663 
664 	QDF_BUG(nbuf);
665 	if (!nbuf) {
666 		qdf_err("Cannot map null nbuf");
667 		return QDF_STATUS_E_INVAL;
668 	}
669 
670 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
671 	meta = qdf_nbuf_meta_get(nbuf);
672 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
673 	if (meta)
674 		QDF_DEBUG_PANIC(
675 			"Double nbuf map detected @ %s:%u; last map from %s:%u",
676 			kbasename(file), line, meta->file, meta->line);
677 
678 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
679 	if (!meta) {
680 		qdf_err("Failed to allocate nbuf map tracking metadata");
681 		return QDF_STATUS_E_NOMEM;
682 	}
683 
684 	meta->nbuf = nbuf;
685 	qdf_str_lcopy(meta->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
686 	meta->line = line;
687 
688 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
689 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
690 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
691 
692 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
693 
694 	return QDF_STATUS_SUCCESS;
695 }
696 
697 static void
698 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
699 {
700 	struct qdf_nbuf_map_metadata *meta;
701 
702 	QDF_BUG(nbuf);
703 	if (!nbuf) {
704 		qdf_err("Cannot unmap null nbuf");
705 		return;
706 	}
707 
708 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
709 	meta = qdf_nbuf_meta_get(nbuf);
710 
711 	if (!meta)
712 		QDF_DEBUG_PANIC(
713 		      "Double nbuf unmap or unmap without map detected @ %s:%u",
714 		      kbasename(file), line);
715 
716 	hash_del(&meta->node);
717 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
718 
719 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
720 
721 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
722 }
723 
724 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
725 			      qdf_nbuf_t buf,
726 			      qdf_dma_dir_t dir,
727 			      const char *file,
728 			      uint32_t line)
729 {
730 	QDF_STATUS status;
731 
732 	status = qdf_nbuf_track_map(buf, file, line);
733 	if (QDF_IS_STATUS_ERROR(status))
734 		return status;
735 
736 	status = __qdf_nbuf_map(osdev, buf, dir);
737 	if (QDF_IS_STATUS_ERROR(status))
738 		qdf_nbuf_untrack_map(buf, file, line);
739 
740 	return status;
741 }
742 
743 qdf_export_symbol(qdf_nbuf_map_debug);
744 
745 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
746 			  qdf_nbuf_t buf,
747 			  qdf_dma_dir_t dir,
748 			  const char *file,
749 			  uint32_t line)
750 {
751 	qdf_nbuf_untrack_map(buf, file, line);
752 	__qdf_nbuf_unmap_single(osdev, buf, dir);
753 }
754 
755 qdf_export_symbol(qdf_nbuf_unmap_debug);
756 
757 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
758 				     qdf_nbuf_t buf,
759 				     qdf_dma_dir_t dir,
760 				     const char *file,
761 				     uint32_t line)
762 {
763 	QDF_STATUS status;
764 
765 	status = qdf_nbuf_track_map(buf, file, line);
766 	if (QDF_IS_STATUS_ERROR(status))
767 		return status;
768 
769 	status = __qdf_nbuf_map_single(osdev, buf, dir);
770 	if (QDF_IS_STATUS_ERROR(status))
771 		qdf_nbuf_untrack_map(buf, file, line);
772 
773 	return status;
774 }
775 
776 qdf_export_symbol(qdf_nbuf_map_single_debug);
777 
778 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
779 				 qdf_nbuf_t buf,
780 				 qdf_dma_dir_t dir,
781 				 const char *file,
782 				 uint32_t line)
783 {
784 	qdf_nbuf_untrack_map(buf, file, line);
785 	__qdf_nbuf_unmap_single(osdev, buf, dir);
786 }
787 
788 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
789 
790 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
791 				     qdf_nbuf_t buf,
792 				     qdf_dma_dir_t dir,
793 				     int nbytes,
794 				     const char *file,
795 				     uint32_t line)
796 {
797 	QDF_STATUS status;
798 
799 	status = qdf_nbuf_track_map(buf, file, line);
800 	if (QDF_IS_STATUS_ERROR(status))
801 		return status;
802 
803 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
804 	if (QDF_IS_STATUS_ERROR(status))
805 		qdf_nbuf_untrack_map(buf, file, line);
806 
807 	return status;
808 }
809 
810 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
811 
812 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
813 				 qdf_nbuf_t buf,
814 				 qdf_dma_dir_t dir,
815 				 int nbytes,
816 				 const char *file,
817 				 uint32_t line)
818 {
819 	qdf_nbuf_untrack_map(buf, file, line);
820 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
821 }
822 
823 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
824 
825 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
826 					    qdf_nbuf_t buf,
827 					    qdf_dma_dir_t dir,
828 					    int nbytes,
829 					    const char *file,
830 					    uint32_t line)
831 {
832 	QDF_STATUS status;
833 
834 	status = qdf_nbuf_track_map(buf, file, line);
835 	if (QDF_IS_STATUS_ERROR(status))
836 		return status;
837 
838 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
839 	if (QDF_IS_STATUS_ERROR(status))
840 		qdf_nbuf_untrack_map(buf, file, line);
841 
842 	return status;
843 }
844 
845 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
846 
847 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
848 					qdf_nbuf_t buf,
849 					qdf_dma_dir_t dir,
850 					int nbytes,
851 					const char *file,
852 					uint32_t line)
853 {
854 	qdf_nbuf_untrack_map(buf, file, line);
855 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
856 }
857 
858 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
859 
860 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, uint8_t *file,
861 					     uint32_t line)
862 {
863 	struct qdf_nbuf_map_metadata *meta;
864 
865 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
866 	meta = qdf_nbuf_meta_get(nbuf);
867 	if (meta)
868 		QDF_DEBUG_PANIC(
869 			"Nbuf freed @ %s:%u while mapped from %s:%u",
870 			kbasename(file), line, meta->file, meta->line);
871 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
872 }
873 #else
874 static inline void qdf_nbuf_map_tracking_init(void)
875 {
876 }
877 
878 static inline void qdf_nbuf_map_tracking_deinit(void)
879 {
880 }
881 
882 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
883 						    uint8_t *file,
884 						    uint32_t line)
885 {
886 }
887 #endif /* NBUF_MAP_UNMAP_DEBUG */
888 
889 /**
890  * __qdf_nbuf_map() - map a buffer to local bus address space
891  * @osdev: OS device
892  * @bmap: Bitmap
893  * @skb: Pointer to network buffer
894  * @dir: Direction
895  *
896  * Return: QDF_STATUS
897  */
898 #ifdef QDF_OS_DEBUG
899 QDF_STATUS
900 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
901 {
902 	struct skb_shared_info *sh = skb_shinfo(skb);
903 
904 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
905 			|| (dir == QDF_DMA_FROM_DEVICE));
906 
907 	/*
908 	 * Assume there's only a single fragment.
909 	 * To support multiple fragments, it would be necessary to change
910 	 * qdf_nbuf_t to be a separate object that stores meta-info
911 	 * (including the bus address for each fragment) and a pointer
912 	 * to the underlying sk_buff.
913 	 */
914 	qdf_assert(sh->nr_frags == 0);
915 
916 	return __qdf_nbuf_map_single(osdev, skb, dir);
917 }
918 qdf_export_symbol(__qdf_nbuf_map);
919 
920 #else
921 QDF_STATUS
922 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
923 {
924 	return __qdf_nbuf_map_single(osdev, skb, dir);
925 }
926 qdf_export_symbol(__qdf_nbuf_map);
927 #endif
928 /**
929  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
930  * @osdev: OS device
931  * @skb: Pointer to network buffer
932  * @dir: dma direction
933  *
934  * Return: none
935  */
936 void
937 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
938 			qdf_dma_dir_t dir)
939 {
940 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
941 		   || (dir == QDF_DMA_FROM_DEVICE));
942 
943 	/*
944 	 * Assume there's a single fragment.
945 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
946 	 */
947 	__qdf_nbuf_unmap_single(osdev, skb, dir);
948 }
949 qdf_export_symbol(__qdf_nbuf_unmap);
950 
951 /**
952  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
953  * @osdev: OS device
954  * @skb: Pointer to network buffer
955  * @dir: Direction
956  *
957  * Return: QDF_STATUS
958  */
959 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
960 QDF_STATUS
961 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
962 {
963 	qdf_dma_addr_t paddr;
964 
965 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
966 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
967 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
968 	return QDF_STATUS_SUCCESS;
969 }
970 qdf_export_symbol(__qdf_nbuf_map_single);
971 #else
972 QDF_STATUS
973 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
974 {
975 	qdf_dma_addr_t paddr;
976 
977 	/* assume that the OS only provides a single fragment */
978 	QDF_NBUF_CB_PADDR(buf) = paddr =
979 		dma_map_single(osdev->dev, buf->data,
980 				skb_end_pointer(buf) - buf->data,
981 				__qdf_dma_dir_to_os(dir));
982 	return dma_mapping_error(osdev->dev, paddr)
983 		? QDF_STATUS_E_FAILURE
984 		: QDF_STATUS_SUCCESS;
985 }
986 qdf_export_symbol(__qdf_nbuf_map_single);
987 #endif
988 /**
989  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
990  * @osdev: OS device
991  * @skb: Pointer to network buffer
992  * @dir: Direction
993  *
994  * Return: none
995  */
996 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
997 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
998 				qdf_dma_dir_t dir)
999 {
1000 }
1001 #else
1002 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1003 					qdf_dma_dir_t dir)
1004 {
1005 	if (QDF_NBUF_CB_PADDR(buf))
1006 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1007 			skb_end_pointer(buf) - buf->data,
1008 			__qdf_dma_dir_to_os(dir));
1009 }
1010 #endif
1011 qdf_export_symbol(__qdf_nbuf_unmap_single);
1012 
1013 /**
1014  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1015  * @skb: Pointer to network buffer
1016  * @cksum: Pointer to checksum value
1017  *
1018  * Return: QDF_STATUS
1019  */
1020 QDF_STATUS
1021 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1022 {
1023 	switch (cksum->l4_result) {
1024 	case QDF_NBUF_RX_CKSUM_NONE:
1025 		skb->ip_summed = CHECKSUM_NONE;
1026 		break;
1027 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1028 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1029 		break;
1030 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1031 		skb->ip_summed = CHECKSUM_PARTIAL;
1032 		skb->csum = cksum->val;
1033 		break;
1034 	default:
1035 		pr_err("Unknown checksum type\n");
1036 		qdf_assert(0);
1037 		return QDF_STATUS_E_NOSUPPORT;
1038 	}
1039 	return QDF_STATUS_SUCCESS;
1040 }
1041 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1042 
1043 /**
1044  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1045  * @skb: Pointer to network buffer
1046  *
1047  * Return: TX checksum value
1048  */
1049 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1050 {
1051 	switch (skb->ip_summed) {
1052 	case CHECKSUM_NONE:
1053 		return QDF_NBUF_TX_CKSUM_NONE;
1054 	case CHECKSUM_PARTIAL:
1055 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1056 	case CHECKSUM_COMPLETE:
1057 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1058 	default:
1059 		return QDF_NBUF_TX_CKSUM_NONE;
1060 	}
1061 }
1062 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1063 
1064 /**
1065  * __qdf_nbuf_get_tid() - get tid
1066  * @skb: Pointer to network buffer
1067  *
1068  * Return: tid
1069  */
1070 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1071 {
1072 	return skb->priority;
1073 }
1074 qdf_export_symbol(__qdf_nbuf_get_tid);
1075 
1076 /**
1077  * __qdf_nbuf_set_tid() - set tid
1078  * @skb: Pointer to network buffer
1079  *
1080  * Return: none
1081  */
1082 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1083 {
1084 	skb->priority = tid;
1085 }
1086 qdf_export_symbol(__qdf_nbuf_set_tid);
1087 
1088 /**
1089  * __qdf_nbuf_set_tid() - set tid
1090  * @skb: Pointer to network buffer
1091  *
1092  * Return: none
1093  */
1094 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1095 {
1096 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1097 }
1098 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1099 
1100 /**
1101  * __qdf_nbuf_reg_trace_cb() - register trace callback
1102  * @cb_func_ptr: Pointer to trace callback function
1103  *
1104  * Return: none
1105  */
1106 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1107 {
1108 	qdf_trace_update_cb = cb_func_ptr;
1109 }
1110 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1111 
1112 /**
1113  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1114  *              of DHCP packet.
1115  * @data: Pointer to DHCP packet data buffer
1116  *
1117  * This func. returns the subtype of DHCP packet.
1118  *
1119  * Return: subtype of the DHCP packet.
1120  */
1121 enum qdf_proto_subtype
1122 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1123 {
1124 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1125 
1126 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1127 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1128 					QDF_DHCP_OPTION53_LENGTH)) {
1129 
1130 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1131 		case QDF_DHCP_DISCOVER:
1132 			subtype = QDF_PROTO_DHCP_DISCOVER;
1133 			break;
1134 		case QDF_DHCP_REQUEST:
1135 			subtype = QDF_PROTO_DHCP_REQUEST;
1136 			break;
1137 		case QDF_DHCP_OFFER:
1138 			subtype = QDF_PROTO_DHCP_OFFER;
1139 			break;
1140 		case QDF_DHCP_ACK:
1141 			subtype = QDF_PROTO_DHCP_ACK;
1142 			break;
1143 		case QDF_DHCP_NAK:
1144 			subtype = QDF_PROTO_DHCP_NACK;
1145 			break;
1146 		case QDF_DHCP_RELEASE:
1147 			subtype = QDF_PROTO_DHCP_RELEASE;
1148 			break;
1149 		case QDF_DHCP_INFORM:
1150 			subtype = QDF_PROTO_DHCP_INFORM;
1151 			break;
1152 		case QDF_DHCP_DECLINE:
1153 			subtype = QDF_PROTO_DHCP_DECLINE;
1154 			break;
1155 		default:
1156 			break;
1157 		}
1158 	}
1159 
1160 	return subtype;
1161 }
1162 
1163 /**
1164  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1165  *            of EAPOL packet.
1166  * @data: Pointer to EAPOL packet data buffer
1167  *
1168  * This func. returns the subtype of EAPOL packet.
1169  *
1170  * Return: subtype of the EAPOL packet.
1171  */
1172 enum qdf_proto_subtype
1173 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1174 {
1175 	uint16_t eapol_key_info;
1176 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1177 	uint16_t mask;
1178 
1179 	eapol_key_info = (uint16_t)(*(uint16_t *)
1180 			(data + EAPOL_KEY_INFO_OFFSET));
1181 
1182 	mask = eapol_key_info & EAPOL_MASK;
1183 	switch (mask) {
1184 	case EAPOL_M1_BIT_MASK:
1185 		subtype = QDF_PROTO_EAPOL_M1;
1186 		break;
1187 	case EAPOL_M2_BIT_MASK:
1188 		subtype = QDF_PROTO_EAPOL_M2;
1189 		break;
1190 	case EAPOL_M3_BIT_MASK:
1191 		subtype = QDF_PROTO_EAPOL_M3;
1192 		break;
1193 	case EAPOL_M4_BIT_MASK:
1194 		subtype = QDF_PROTO_EAPOL_M4;
1195 		break;
1196 	default:
1197 		break;
1198 	}
1199 
1200 	return subtype;
1201 }
1202 
1203 /**
1204  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1205  *            of ARP packet.
1206  * @data: Pointer to ARP packet data buffer
1207  *
1208  * This func. returns the subtype of ARP packet.
1209  *
1210  * Return: subtype of the ARP packet.
1211  */
1212 enum qdf_proto_subtype
1213 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1214 {
1215 	uint16_t subtype;
1216 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1217 
1218 	subtype = (uint16_t)(*(uint16_t *)
1219 			(data + ARP_SUB_TYPE_OFFSET));
1220 
1221 	switch (QDF_SWAP_U16(subtype)) {
1222 	case ARP_REQUEST:
1223 		proto_subtype = QDF_PROTO_ARP_REQ;
1224 		break;
1225 	case ARP_RESPONSE:
1226 		proto_subtype = QDF_PROTO_ARP_RES;
1227 		break;
1228 	default:
1229 		break;
1230 	}
1231 
1232 	return proto_subtype;
1233 }
1234 
1235 /**
1236  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1237  *            of IPV4 ICMP packet.
1238  * @data: Pointer to IPV4 ICMP packet data buffer
1239  *
1240  * This func. returns the subtype of ICMP packet.
1241  *
1242  * Return: subtype of the ICMP packet.
1243  */
1244 enum qdf_proto_subtype
1245 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1246 {
1247 	uint8_t subtype;
1248 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1249 
1250 	subtype = (uint8_t)(*(uint8_t *)
1251 			(data + ICMP_SUBTYPE_OFFSET));
1252 
1253 	switch (subtype) {
1254 	case ICMP_REQUEST:
1255 		proto_subtype = QDF_PROTO_ICMP_REQ;
1256 		break;
1257 	case ICMP_RESPONSE:
1258 		proto_subtype = QDF_PROTO_ICMP_RES;
1259 		break;
1260 	default:
1261 		break;
1262 	}
1263 
1264 	return proto_subtype;
1265 }
1266 
1267 /**
1268  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1269  *            of IPV6 ICMPV6 packet.
1270  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1271  *
1272  * This func. returns the subtype of ICMPV6 packet.
1273  *
1274  * Return: subtype of the ICMPV6 packet.
1275  */
1276 enum qdf_proto_subtype
1277 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1278 {
1279 	uint8_t subtype;
1280 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1281 
1282 	subtype = (uint8_t)(*(uint8_t *)
1283 			(data + ICMPV6_SUBTYPE_OFFSET));
1284 
1285 	switch (subtype) {
1286 	case ICMPV6_REQUEST:
1287 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1288 		break;
1289 	case ICMPV6_RESPONSE:
1290 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1291 		break;
1292 	case ICMPV6_RS:
1293 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1294 		break;
1295 	case ICMPV6_RA:
1296 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1297 		break;
1298 	case ICMPV6_NS:
1299 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1300 		break;
1301 	case ICMPV6_NA:
1302 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1303 		break;
1304 	default:
1305 		break;
1306 	}
1307 
1308 	return proto_subtype;
1309 }
1310 
1311 /**
1312  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1313  *            of IPV4 packet.
1314  * @data: Pointer to IPV4 packet data buffer
1315  *
1316  * This func. returns the proto type of IPV4 packet.
1317  *
1318  * Return: proto type of IPV4 packet.
1319  */
1320 uint8_t
1321 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1322 {
1323 	uint8_t proto_type;
1324 
1325 	proto_type = (uint8_t)(*(uint8_t *)(data +
1326 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1327 	return proto_type;
1328 }
1329 
1330 /**
1331  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1332  *            of IPV6 packet.
1333  * @data: Pointer to IPV6 packet data buffer
1334  *
1335  * This func. returns the proto type of IPV6 packet.
1336  *
1337  * Return: proto type of IPV6 packet.
1338  */
1339 uint8_t
1340 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1341 {
1342 	uint8_t proto_type;
1343 
1344 	proto_type = (uint8_t)(*(uint8_t *)(data +
1345 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1346 	return proto_type;
1347 }
1348 
1349 /**
1350  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1351  * @data: Pointer to network data
1352  *
1353  * This api is for Tx packets.
1354  *
1355  * Return: true if packet is ipv4 packet
1356  *	   false otherwise
1357  */
1358 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1359 {
1360 	uint16_t ether_type;
1361 
1362 	ether_type = (uint16_t)(*(uint16_t *)(data +
1363 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1364 
1365 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1366 		return true;
1367 	else
1368 		return false;
1369 }
1370 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1371 
1372 /**
1373  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1374  * @data: Pointer to network data buffer
1375  *
1376  * This api is for ipv4 packet.
1377  *
1378  * Return: true if packet is DHCP packet
1379  *	   false otherwise
1380  */
1381 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1382 {
1383 	uint16_t sport;
1384 	uint16_t dport;
1385 
1386 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1387 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1388 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1389 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1390 					 sizeof(uint16_t)));
1391 
1392 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1393 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1394 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1395 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1396 		return true;
1397 	else
1398 		return false;
1399 }
1400 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1401 
1402 /**
1403  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1404  * @data: Pointer to network data buffer
1405  *
1406  * This api is for ipv4 packet.
1407  *
1408  * Return: true if packet is EAPOL packet
1409  *	   false otherwise.
1410  */
1411 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1412 {
1413 	uint16_t ether_type;
1414 
1415 	ether_type = (uint16_t)(*(uint16_t *)(data +
1416 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1417 
1418 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1419 		return true;
1420 	else
1421 		return false;
1422 }
1423 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1424 
1425 /**
1426  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1427  * @skb: Pointer to network buffer
1428  *
1429  * This api is for ipv4 packet.
1430  *
1431  * Return: true if packet is WAPI packet
1432  *	   false otherwise.
1433  */
1434 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1435 {
1436 	uint16_t ether_type;
1437 
1438 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1439 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1440 
1441 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1442 		return true;
1443 	else
1444 		return false;
1445 }
1446 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1447 
1448 /**
1449  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1450  * @skb: Pointer to network buffer
1451  *
1452  * This api is for ipv4 packet.
1453  *
1454  * Return: true if packet is tdls packet
1455  *	   false otherwise.
1456  */
1457 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1458 {
1459 	uint16_t ether_type;
1460 
1461 	ether_type = *(uint16_t *)(skb->data +
1462 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1463 
1464 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1465 		return true;
1466 	else
1467 		return false;
1468 }
1469 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1470 
1471 /**
1472  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1473  * @data: Pointer to network data buffer
1474  *
1475  * This api is for ipv4 packet.
1476  *
1477  * Return: true if packet is ARP packet
1478  *	   false otherwise.
1479  */
1480 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1481 {
1482 	uint16_t ether_type;
1483 
1484 	ether_type = (uint16_t)(*(uint16_t *)(data +
1485 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1486 
1487 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1488 		return true;
1489 	else
1490 		return false;
1491 }
1492 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1493 
1494 /**
1495  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1496  * @data: Pointer to network data buffer
1497  *
1498  * This api is for ipv4 packet.
1499  *
1500  * Return: true if packet is ARP request
1501  *	   false otherwise.
1502  */
1503 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1504 {
1505 	uint16_t op_code;
1506 
1507 	op_code = (uint16_t)(*(uint16_t *)(data +
1508 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1509 
1510 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1511 		return true;
1512 	return false;
1513 }
1514 
1515 /**
1516  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1517  * @data: Pointer to network data buffer
1518  *
1519  * This api is for ipv4 packet.
1520  *
1521  * Return: true if packet is ARP response
1522  *	   false otherwise.
1523  */
1524 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1525 {
1526 	uint16_t op_code;
1527 
1528 	op_code = (uint16_t)(*(uint16_t *)(data +
1529 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1530 
1531 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1532 		return true;
1533 	return false;
1534 }
1535 
1536 /**
1537  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1538  * @data: Pointer to network data buffer
1539  *
1540  * This api is for ipv4 packet.
1541  *
1542  * Return: ARP packet source IP value.
1543  */
1544 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1545 {
1546 	uint32_t src_ip;
1547 
1548 	src_ip = (uint32_t)(*(uint32_t *)(data +
1549 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1550 
1551 	return src_ip;
1552 }
1553 
1554 /**
1555  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1556  * @data: Pointer to network data buffer
1557  *
1558  * This api is for ipv4 packet.
1559  *
1560  * Return: ARP packet target IP value.
1561  */
1562 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1563 {
1564 	uint32_t tgt_ip;
1565 
1566 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1567 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1568 
1569 	return tgt_ip;
1570 }
1571 
1572 /**
1573  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1574  * @data: Pointer to network data buffer
1575  * @len: length to copy
1576  *
1577  * This api is for dns domain name
1578  *
1579  * Return: dns domain name.
1580  */
1581 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1582 {
1583 	uint8_t *domain_name;
1584 
1585 	domain_name = (uint8_t *)
1586 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1587 	return domain_name;
1588 }
1589 
1590 
1591 /**
1592  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1593  * @data: Pointer to network data buffer
1594  *
1595  * This api is for dns query packet.
1596  *
1597  * Return: true if packet is dns query packet.
1598  *	   false otherwise.
1599  */
1600 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1601 {
1602 	uint16_t op_code;
1603 	uint16_t tgt_port;
1604 
1605 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1606 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1607 	/* Standard DNS query always happen on Dest Port 53. */
1608 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1609 		op_code = (uint16_t)(*(uint16_t *)(data +
1610 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1611 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1612 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1613 			return true;
1614 	}
1615 	return false;
1616 }
1617 
1618 /**
1619  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1620  * @data: Pointer to network data buffer
1621  *
1622  * This api is for dns query response.
1623  *
1624  * Return: true if packet is dns response packet.
1625  *	   false otherwise.
1626  */
1627 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1628 {
1629 	uint16_t op_code;
1630 	uint16_t src_port;
1631 
1632 	src_port = (uint16_t)(*(uint16_t *)(data +
1633 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1634 	/* Standard DNS response always comes on Src Port 53. */
1635 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1636 		op_code = (uint16_t)(*(uint16_t *)(data +
1637 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1638 
1639 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1640 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1641 			return true;
1642 	}
1643 	return false;
1644 }
1645 
1646 /**
1647  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1648  * @data: Pointer to network data buffer
1649  *
1650  * This api is for tcp syn packet.
1651  *
1652  * Return: true if packet is tcp syn packet.
1653  *	   false otherwise.
1654  */
1655 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1656 {
1657 	uint8_t op_code;
1658 
1659 	op_code = (uint8_t)(*(uint8_t *)(data +
1660 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1661 
1662 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1663 		return true;
1664 	return false;
1665 }
1666 
1667 /**
1668  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1669  * @data: Pointer to network data buffer
1670  *
1671  * This api is for tcp syn ack packet.
1672  *
1673  * Return: true if packet is tcp syn ack packet.
1674  *	   false otherwise.
1675  */
1676 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1677 {
1678 	uint8_t op_code;
1679 
1680 	op_code = (uint8_t)(*(uint8_t *)(data +
1681 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1682 
1683 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1684 		return true;
1685 	return false;
1686 }
1687 
1688 /**
1689  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1690  * @data: Pointer to network data buffer
1691  *
1692  * This api is for tcp ack packet.
1693  *
1694  * Return: true if packet is tcp ack packet.
1695  *	   false otherwise.
1696  */
1697 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1698 {
1699 	uint8_t op_code;
1700 
1701 	op_code = (uint8_t)(*(uint8_t *)(data +
1702 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1703 
1704 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1705 		return true;
1706 	return false;
1707 }
1708 
1709 /**
1710  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1711  * @data: Pointer to network data buffer
1712  *
1713  * This api is for tcp packet.
1714  *
1715  * Return: tcp source port value.
1716  */
1717 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1718 {
1719 	uint16_t src_port;
1720 
1721 	src_port = (uint16_t)(*(uint16_t *)(data +
1722 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1723 
1724 	return src_port;
1725 }
1726 
1727 /**
1728  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1729  * @data: Pointer to network data buffer
1730  *
1731  * This api is for tcp packet.
1732  *
1733  * Return: tcp destination port value.
1734  */
1735 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1736 {
1737 	uint16_t tgt_port;
1738 
1739 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1740 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1741 
1742 	return tgt_port;
1743 }
1744 
1745 /**
1746  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1747  * @data: Pointer to network data buffer
1748  *
1749  * This api is for ipv4 req packet.
1750  *
1751  * Return: true if packet is icmpv4 request
1752  *	   false otherwise.
1753  */
1754 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1755 {
1756 	uint8_t op_code;
1757 
1758 	op_code = (uint8_t)(*(uint8_t *)(data +
1759 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1760 
1761 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1762 		return true;
1763 	return false;
1764 }
1765 
1766 /**
1767  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1768  * @data: Pointer to network data buffer
1769  *
1770  * This api is for ipv4 res packet.
1771  *
1772  * Return: true if packet is icmpv4 response
1773  *	   false otherwise.
1774  */
1775 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1776 {
1777 	uint8_t op_code;
1778 
1779 	op_code = (uint8_t)(*(uint8_t *)(data +
1780 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1781 
1782 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1783 		return true;
1784 	return false;
1785 }
1786 
1787 /**
1788  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1789  * @data: Pointer to network data buffer
1790  *
1791  * This api is for ipv4 packet.
1792  *
1793  * Return: icmpv4 packet source IP value.
1794  */
1795 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1796 {
1797 	uint32_t src_ip;
1798 
1799 	src_ip = (uint32_t)(*(uint32_t *)(data +
1800 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1801 
1802 	return src_ip;
1803 }
1804 
1805 /**
1806  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1807  * @data: Pointer to network data buffer
1808  *
1809  * This api is for ipv4 packet.
1810  *
1811  * Return: icmpv4 packet target IP value.
1812  */
1813 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1814 {
1815 	uint32_t tgt_ip;
1816 
1817 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1818 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1819 
1820 	return tgt_ip;
1821 }
1822 
1823 
1824 /**
1825  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1826  * @data: Pointer to IPV6 packet data buffer
1827  *
1828  * This func. checks whether it is a IPV6 packet or not.
1829  *
1830  * Return: TRUE if it is a IPV6 packet
1831  *         FALSE if not
1832  */
1833 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1834 {
1835 	uint16_t ether_type;
1836 
1837 	ether_type = (uint16_t)(*(uint16_t *)(data +
1838 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1839 
1840 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1841 		return true;
1842 	else
1843 		return false;
1844 }
1845 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1846 
1847 /**
1848  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1849  * @data: Pointer to network data buffer
1850  *
1851  * This api is for ipv6 packet.
1852  *
1853  * Return: true if packet is DHCP packet
1854  *	   false otherwise
1855  */
1856 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1857 {
1858 	uint16_t sport;
1859 	uint16_t dport;
1860 
1861 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1862 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1863 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1864 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1865 					sizeof(uint16_t));
1866 
1867 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1868 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1869 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1870 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1871 		return true;
1872 	else
1873 		return false;
1874 }
1875 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1876 
1877 /**
1878  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1879  * @data: Pointer to IPV4 packet data buffer
1880  *
1881  * This func. checks whether it is a IPV4 multicast packet or not.
1882  *
1883  * Return: TRUE if it is a IPV4 multicast packet
1884  *         FALSE if not
1885  */
1886 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1887 {
1888 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1889 		uint32_t *dst_addr =
1890 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1891 
1892 		/*
1893 		 * Check first word of the IPV4 address and if it is
1894 		 * equal to 0xE then it represents multicast IP.
1895 		 */
1896 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1897 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1898 			return true;
1899 		else
1900 			return false;
1901 	} else
1902 		return false;
1903 }
1904 
1905 /**
1906  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1907  * @data: Pointer to IPV6 packet data buffer
1908  *
1909  * This func. checks whether it is a IPV6 multicast packet or not.
1910  *
1911  * Return: TRUE if it is a IPV6 multicast packet
1912  *         FALSE if not
1913  */
1914 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1915 {
1916 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1917 		uint16_t *dst_addr;
1918 
1919 		dst_addr = (uint16_t *)
1920 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1921 
1922 		/*
1923 		 * Check first byte of the IP address and if it
1924 		 * 0xFF00 then it is a IPV6 mcast packet.
1925 		 */
1926 		if (*dst_addr ==
1927 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1928 			return true;
1929 		else
1930 			return false;
1931 	} else
1932 		return false;
1933 }
1934 
1935 /**
1936  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1937  * @data: Pointer to IPV4 ICMP packet data buffer
1938  *
1939  * This func. checks whether it is a ICMP packet or not.
1940  *
1941  * Return: TRUE if it is a ICMP packet
1942  *         FALSE if not
1943  */
1944 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1945 {
1946 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1947 		uint8_t pkt_type;
1948 
1949 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1950 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1951 
1952 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1953 			return true;
1954 		else
1955 			return false;
1956 	} else
1957 		return false;
1958 }
1959 
1960 /**
1961  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1962  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1963  *
1964  * This func. checks whether it is a ICMPV6 packet or not.
1965  *
1966  * Return: TRUE if it is a ICMPV6 packet
1967  *         FALSE if not
1968  */
1969 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1970 {
1971 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1972 		uint8_t pkt_type;
1973 
1974 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1975 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1976 
1977 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1978 			return true;
1979 		else
1980 			return false;
1981 	} else
1982 		return false;
1983 }
1984 
1985 /**
1986  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1987  * @data: Pointer to IPV4 UDP packet data buffer
1988  *
1989  * This func. checks whether it is a IPV4 UDP packet or not.
1990  *
1991  * Return: TRUE if it is a IPV4 UDP packet
1992  *         FALSE if not
1993  */
1994 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1995 {
1996 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1997 		uint8_t pkt_type;
1998 
1999 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2000 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2001 
2002 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2003 			return true;
2004 		else
2005 			return false;
2006 	} else
2007 		return false;
2008 }
2009 
2010 /**
2011  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2012  * @data: Pointer to IPV4 TCP packet data buffer
2013  *
2014  * This func. checks whether it is a IPV4 TCP packet or not.
2015  *
2016  * Return: TRUE if it is a IPV4 TCP packet
2017  *         FALSE if not
2018  */
2019 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2020 {
2021 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2022 		uint8_t pkt_type;
2023 
2024 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2025 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2026 
2027 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2028 			return true;
2029 		else
2030 			return false;
2031 	} else
2032 		return false;
2033 }
2034 
2035 /**
2036  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2037  * @data: Pointer to IPV6 UDP packet data buffer
2038  *
2039  * This func. checks whether it is a IPV6 UDP packet or not.
2040  *
2041  * Return: TRUE if it is a IPV6 UDP packet
2042  *         FALSE if not
2043  */
2044 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2045 {
2046 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2047 		uint8_t pkt_type;
2048 
2049 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2050 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2051 
2052 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2053 			return true;
2054 		else
2055 			return false;
2056 	} else
2057 		return false;
2058 }
2059 
2060 /**
2061  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2062  * @data: Pointer to IPV6 TCP packet data buffer
2063  *
2064  * This func. checks whether it is a IPV6 TCP packet or not.
2065  *
2066  * Return: TRUE if it is a IPV6 TCP packet
2067  *         FALSE if not
2068  */
2069 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2070 {
2071 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2072 		uint8_t pkt_type;
2073 
2074 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2075 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2076 
2077 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2078 			return true;
2079 		else
2080 			return false;
2081 	} else
2082 		return false;
2083 }
2084 
2085 /**
2086  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2087  * @nbuf - sk buff
2088  *
2089  * Return: true if packet is broadcast
2090  *	   false otherwise
2091  */
2092 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2093 {
2094 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2095 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2096 }
2097 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2098 
2099 #ifdef NBUF_MEMORY_DEBUG
2100 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2101 
2102 /**
2103  * struct qdf_nbuf_track_t - Network buffer track structure
2104  *
2105  * @p_next: Pointer to next
2106  * @net_buf: Pointer to network buffer
2107  * @file_name: File name
2108  * @line_num: Line number
2109  * @size: Size
2110  */
2111 struct qdf_nbuf_track_t {
2112 	struct qdf_nbuf_track_t *p_next;
2113 	qdf_nbuf_t net_buf;
2114 	char file_name[QDF_MEM_FILE_NAME_SIZE];
2115 	uint32_t line_num;
2116 	size_t size;
2117 };
2118 
2119 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2120 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2121 
2122 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2123 static struct kmem_cache *nbuf_tracking_cache;
2124 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2125 static spinlock_t qdf_net_buf_track_free_list_lock;
2126 static uint32_t qdf_net_buf_track_free_list_count;
2127 static uint32_t qdf_net_buf_track_used_list_count;
2128 static uint32_t qdf_net_buf_track_max_used;
2129 static uint32_t qdf_net_buf_track_max_free;
2130 static uint32_t qdf_net_buf_track_max_allocated;
2131 
2132 /**
2133  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2134  *
2135  * tracks the max number of network buffers that the wlan driver was tracking
2136  * at any one time.
2137  *
2138  * Return: none
2139  */
2140 static inline void update_max_used(void)
2141 {
2142 	int sum;
2143 
2144 	if (qdf_net_buf_track_max_used <
2145 	    qdf_net_buf_track_used_list_count)
2146 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2147 	sum = qdf_net_buf_track_free_list_count +
2148 		qdf_net_buf_track_used_list_count;
2149 	if (qdf_net_buf_track_max_allocated < sum)
2150 		qdf_net_buf_track_max_allocated = sum;
2151 }
2152 
2153 /**
2154  * update_max_free() - update qdf_net_buf_track_free_list_count
2155  *
2156  * tracks the max number tracking buffers kept in the freelist.
2157  *
2158  * Return: none
2159  */
2160 static inline void update_max_free(void)
2161 {
2162 	if (qdf_net_buf_track_max_free <
2163 	    qdf_net_buf_track_free_list_count)
2164 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2165 }
2166 
2167 /**
2168  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2169  *
2170  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2171  * This function also ads fexibility to adjust the allocation and freelist
2172  * scheems.
2173  *
2174  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2175  */
2176 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2177 {
2178 	int flags = GFP_KERNEL;
2179 	unsigned long irq_flag;
2180 	QDF_NBUF_TRACK *new_node = NULL;
2181 
2182 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2183 	qdf_net_buf_track_used_list_count++;
2184 	if (qdf_net_buf_track_free_list != NULL) {
2185 		new_node = qdf_net_buf_track_free_list;
2186 		qdf_net_buf_track_free_list =
2187 			qdf_net_buf_track_free_list->p_next;
2188 		qdf_net_buf_track_free_list_count--;
2189 	}
2190 	update_max_used();
2191 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2192 
2193 	if (new_node != NULL)
2194 		return new_node;
2195 
2196 	if (in_interrupt() || irqs_disabled() || in_atomic())
2197 		flags = GFP_ATOMIC;
2198 
2199 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2200 }
2201 
2202 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2203 #define FREEQ_POOLSIZE 2048
2204 
2205 /**
2206  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2207  *
2208  * Matches calls to qdf_nbuf_track_alloc.
2209  * Either frees the tracking cookie to kernel or an internal
2210  * freelist based on the size of the freelist.
2211  *
2212  * Return: none
2213  */
2214 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2215 {
2216 	unsigned long irq_flag;
2217 
2218 	if (!node)
2219 		return;
2220 
2221 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2222 	 * only shrink the freelist if it is bigger than twice the number of
2223 	 * nbufs in use. If the driver is stalling in a consistent bursty
2224 	 * fasion, this will keep 3/4 of thee allocations from the free list
2225 	 * while also allowing the system to recover memory as less frantic
2226 	 * traffic occurs.
2227 	 */
2228 
2229 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2230 
2231 	qdf_net_buf_track_used_list_count--;
2232 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2233 	   (qdf_net_buf_track_free_list_count >
2234 	    qdf_net_buf_track_used_list_count << 1)) {
2235 		kmem_cache_free(nbuf_tracking_cache, node);
2236 	} else {
2237 		node->p_next = qdf_net_buf_track_free_list;
2238 		qdf_net_buf_track_free_list = node;
2239 		qdf_net_buf_track_free_list_count++;
2240 	}
2241 	update_max_free();
2242 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2243 }
2244 
2245 /**
2246  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2247  *
2248  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2249  * the freelist first makes it performant for the first iperf udp burst
2250  * as well as steady state.
2251  *
2252  * Return: None
2253  */
2254 static void qdf_nbuf_track_prefill(void)
2255 {
2256 	int i;
2257 	QDF_NBUF_TRACK *node, *head;
2258 
2259 	/* prepopulate the freelist */
2260 	head = NULL;
2261 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2262 		node = qdf_nbuf_track_alloc();
2263 		if (node == NULL)
2264 			continue;
2265 		node->p_next = head;
2266 		head = node;
2267 	}
2268 	while (head) {
2269 		node = head->p_next;
2270 		qdf_nbuf_track_free(head);
2271 		head = node;
2272 	}
2273 
2274 	/* prefilled buffers should not count as used */
2275 	qdf_net_buf_track_max_used = 0;
2276 }
2277 
2278 /**
2279  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2280  *
2281  * This initializes the memory manager for the nbuf tracking cookies.  Because
2282  * these cookies are all the same size and only used in this feature, we can
2283  * use a kmem_cache to provide tracking as well as to speed up allocations.
2284  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2285  * features) a freelist is prepopulated here.
2286  *
2287  * Return: None
2288  */
2289 static void qdf_nbuf_track_memory_manager_create(void)
2290 {
2291 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2292 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2293 						sizeof(QDF_NBUF_TRACK),
2294 						0, 0, NULL);
2295 
2296 	qdf_nbuf_track_prefill();
2297 }
2298 
2299 /**
2300  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2301  *
2302  * Empty the freelist and print out usage statistics when it is no longer
2303  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2304  * any nbuf tracking cookies were leaked.
2305  *
2306  * Return: None
2307  */
2308 static void qdf_nbuf_track_memory_manager_destroy(void)
2309 {
2310 	QDF_NBUF_TRACK *node, *tmp;
2311 	unsigned long irq_flag;
2312 
2313 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2314 	node = qdf_net_buf_track_free_list;
2315 
2316 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2317 		qdf_print("%s: unexpectedly large max_used count %d",
2318 			  __func__, qdf_net_buf_track_max_used);
2319 
2320 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2321 		qdf_print("%s: %d unused trackers were allocated",
2322 			  __func__,
2323 			  qdf_net_buf_track_max_allocated -
2324 			  qdf_net_buf_track_max_used);
2325 
2326 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2327 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2328 		qdf_print("%s: check freelist shrinking functionality",
2329 			  __func__);
2330 
2331 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2332 		  "%s: %d residual freelist size",
2333 		  __func__, qdf_net_buf_track_free_list_count);
2334 
2335 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2336 		  "%s: %d max freelist size observed",
2337 		  __func__, qdf_net_buf_track_max_free);
2338 
2339 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2340 		  "%s: %d max buffers used observed",
2341 		  __func__, qdf_net_buf_track_max_used);
2342 
2343 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2344 		  "%s: %d max buffers allocated observed",
2345 		  __func__, qdf_net_buf_track_max_allocated);
2346 
2347 	while (node) {
2348 		tmp = node;
2349 		node = node->p_next;
2350 		kmem_cache_free(nbuf_tracking_cache, tmp);
2351 		qdf_net_buf_track_free_list_count--;
2352 	}
2353 
2354 	if (qdf_net_buf_track_free_list_count != 0)
2355 		qdf_info("%d unfreed tracking memory lost in freelist",
2356 			 qdf_net_buf_track_free_list_count);
2357 
2358 	if (qdf_net_buf_track_used_list_count != 0)
2359 		qdf_info("%d unfreed tracking memory still in use",
2360 			 qdf_net_buf_track_used_list_count);
2361 
2362 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2363 	kmem_cache_destroy(nbuf_tracking_cache);
2364 	qdf_net_buf_track_free_list = NULL;
2365 }
2366 
2367 /**
2368  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2369  *
2370  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2371  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2372  * WLAN driver module whose allocated SKB is freed by network stack are
2373  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2374  * reported as memory leak.
2375  *
2376  * Return: none
2377  */
2378 void qdf_net_buf_debug_init(void)
2379 {
2380 	uint32_t i;
2381 
2382 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2383 
2384 	qdf_nbuf_map_tracking_init();
2385 	qdf_nbuf_track_memory_manager_create();
2386 
2387 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2388 		gp_qdf_net_buf_track_tbl[i] = NULL;
2389 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2390 	}
2391 }
2392 qdf_export_symbol(qdf_net_buf_debug_init);
2393 
2394 /**
2395  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2396  *
2397  * Exit network buffer tracking debug functionality and log SKB memory leaks
2398  * As part of exiting the functionality, free the leaked memory and
2399  * cleanup the tracking buffers.
2400  *
2401  * Return: none
2402  */
2403 void qdf_net_buf_debug_exit(void)
2404 {
2405 	uint32_t i;
2406 	uint32_t count = 0;
2407 	unsigned long irq_flag;
2408 	QDF_NBUF_TRACK *p_node;
2409 	QDF_NBUF_TRACK *p_prev;
2410 
2411 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2412 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2413 		p_node = gp_qdf_net_buf_track_tbl[i];
2414 		while (p_node) {
2415 			p_prev = p_node;
2416 			p_node = p_node->p_next;
2417 			count++;
2418 			qdf_info("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK",
2419 				 p_prev->file_name, p_prev->line_num,
2420 				 p_prev->size, p_prev->net_buf);
2421 			qdf_nbuf_track_free(p_prev);
2422 		}
2423 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2424 	}
2425 
2426 	qdf_nbuf_track_memory_manager_destroy();
2427 	qdf_nbuf_map_tracking_deinit();
2428 
2429 #ifdef CONFIG_HALT_KMEMLEAK
2430 	if (count) {
2431 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2432 		QDF_BUG(0);
2433 	}
2434 #endif
2435 }
2436 qdf_export_symbol(qdf_net_buf_debug_exit);
2437 
2438 /**
2439  * qdf_net_buf_debug_hash() - hash network buffer pointer
2440  *
2441  * Return: hash value
2442  */
2443 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2444 {
2445 	uint32_t i;
2446 
2447 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2448 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2449 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2450 
2451 	return i;
2452 }
2453 
2454 /**
2455  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2456  *
2457  * Return: If skb is found in hash table then return pointer to network buffer
2458  *	else return %NULL
2459  */
2460 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2461 {
2462 	uint32_t i;
2463 	QDF_NBUF_TRACK *p_node;
2464 
2465 	i = qdf_net_buf_debug_hash(net_buf);
2466 	p_node = gp_qdf_net_buf_track_tbl[i];
2467 
2468 	while (p_node) {
2469 		if (p_node->net_buf == net_buf)
2470 			return p_node;
2471 		p_node = p_node->p_next;
2472 	}
2473 
2474 	return NULL;
2475 }
2476 
2477 /**
2478  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2479  *
2480  * Return: none
2481  */
2482 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2483 				uint8_t *file_name, uint32_t line_num)
2484 {
2485 	uint32_t i;
2486 	unsigned long irq_flag;
2487 	QDF_NBUF_TRACK *p_node;
2488 	QDF_NBUF_TRACK *new_node;
2489 
2490 	new_node = qdf_nbuf_track_alloc();
2491 
2492 	i = qdf_net_buf_debug_hash(net_buf);
2493 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2494 
2495 	p_node = qdf_net_buf_debug_look_up(net_buf);
2496 
2497 	if (p_node) {
2498 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2499 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2500 			  net_buf, kbasename(file_name), line_num);
2501 		qdf_nbuf_track_free(new_node);
2502 	} else {
2503 		p_node = new_node;
2504 		if (p_node) {
2505 			p_node->net_buf = net_buf;
2506 			qdf_str_lcopy(p_node->file_name, kbasename(file_name),
2507 				      QDF_MEM_FILE_NAME_SIZE);
2508 			p_node->line_num = line_num;
2509 			p_node->size = size;
2510 			qdf_mem_skb_inc(size);
2511 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2512 			gp_qdf_net_buf_track_tbl[i] = p_node;
2513 		} else
2514 			qdf_print(
2515 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2516 				  kbasename(file_name), line_num, size);
2517 	}
2518 
2519 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2520 }
2521 qdf_export_symbol(qdf_net_buf_debug_add_node);
2522 
2523 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, uint8_t *file_name,
2524 				   uint32_t line_num)
2525 {
2526 	uint32_t i;
2527 	unsigned long irq_flag;
2528 	QDF_NBUF_TRACK *p_node;
2529 
2530 	i = qdf_net_buf_debug_hash(net_buf);
2531 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2532 
2533 	p_node = qdf_net_buf_debug_look_up(net_buf);
2534 
2535 	if (p_node) {
2536 		qdf_str_lcopy(p_node->file_name, kbasename(file_name),
2537 			      QDF_MEM_FILE_NAME_SIZE);
2538 		p_node->line_num = line_num;
2539 	}
2540 
2541 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2542 }
2543 
2544 qdf_export_symbol(qdf_net_buf_debug_update_node);
2545 
2546 /**
2547  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2548  *
2549  * Return: none
2550  */
2551 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2552 {
2553 	uint32_t i;
2554 	QDF_NBUF_TRACK *p_head;
2555 	QDF_NBUF_TRACK *p_node = NULL;
2556 	unsigned long irq_flag;
2557 	QDF_NBUF_TRACK *p_prev;
2558 
2559 	i = qdf_net_buf_debug_hash(net_buf);
2560 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2561 
2562 	p_head = gp_qdf_net_buf_track_tbl[i];
2563 
2564 	/* Unallocated SKB */
2565 	if (!p_head)
2566 		goto done;
2567 
2568 	p_node = p_head;
2569 	/* Found at head of the table */
2570 	if (p_head->net_buf == net_buf) {
2571 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2572 		goto done;
2573 	}
2574 
2575 	/* Search in collision list */
2576 	while (p_node) {
2577 		p_prev = p_node;
2578 		p_node = p_node->p_next;
2579 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2580 			p_prev->p_next = p_node->p_next;
2581 			break;
2582 		}
2583 	}
2584 
2585 done:
2586 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2587 
2588 	if (p_node) {
2589 		qdf_mem_skb_dec(p_node->size);
2590 		qdf_nbuf_track_free(p_node);
2591 	} else {
2592 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2593 			  net_buf);
2594 		QDF_BUG(0);
2595 	}
2596 }
2597 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2598 
2599 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2600 			uint8_t *file_name, uint32_t line_num)
2601 {
2602 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2603 
2604 	while (ext_list) {
2605 		/*
2606 		 * Take care to add if it is Jumbo packet connected using
2607 		 * frag_list
2608 		 */
2609 		qdf_nbuf_t next;
2610 
2611 		next = qdf_nbuf_queue_next(ext_list);
2612 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2613 		ext_list = next;
2614 	}
2615 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2616 }
2617 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2618 
2619 /**
2620  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2621  * @net_buf: Network buf holding head segment (single)
2622  *
2623  * WLAN driver module whose allocated SKB is freed by network stack are
2624  * suppose to call this API before returning SKB to network stack such
2625  * that the SKB is not reported as memory leak.
2626  *
2627  * Return: none
2628  */
2629 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2630 {
2631 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2632 
2633 	while (ext_list) {
2634 		/*
2635 		 * Take care to free if it is Jumbo packet connected using
2636 		 * frag_list
2637 		 */
2638 		qdf_nbuf_t next;
2639 
2640 		next = qdf_nbuf_queue_next(ext_list);
2641 
2642 		if (qdf_nbuf_is_tso(ext_list) &&
2643 			qdf_nbuf_get_users(ext_list) > 1) {
2644 			ext_list = next;
2645 			continue;
2646 		}
2647 
2648 		qdf_net_buf_debug_delete_node(ext_list);
2649 		ext_list = next;
2650 	}
2651 
2652 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2653 		return;
2654 
2655 	qdf_net_buf_debug_delete_node(net_buf);
2656 }
2657 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2658 
2659 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2660 				int reserve, int align, int prio,
2661 				uint8_t *file, uint32_t line)
2662 {
2663 	qdf_nbuf_t nbuf;
2664 
2665 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, file, line);
2666 
2667 	/* Store SKB in internal QDF tracking table */
2668 	if (qdf_likely(nbuf)) {
2669 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2670 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2671 	} else {
2672 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC_FAILURE);
2673 	}
2674 
2675 	return nbuf;
2676 }
2677 qdf_export_symbol(qdf_nbuf_alloc_debug);
2678 
2679 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2680 {
2681 	if (qdf_unlikely(!nbuf))
2682 		return;
2683 
2684 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2685 		goto free_buf;
2686 
2687 	/* Remove SKB from internal QDF tracking table */
2688 	qdf_nbuf_panic_on_free_if_mapped(nbuf, file, line);
2689 	qdf_net_buf_debug_delete_node(nbuf);
2690 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2691 
2692 free_buf:
2693 	__qdf_nbuf_free(nbuf);
2694 }
2695 qdf_export_symbol(qdf_nbuf_free_debug);
2696 
2697 #endif /* NBUF_MEMORY_DEBUG */
2698 
2699 #if defined(FEATURE_TSO)
2700 
2701 /**
2702  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2703  *
2704  * @ethproto: ethernet type of the msdu
2705  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2706  * @l2_len: L2 length for the msdu
2707  * @eit_hdr: pointer to EIT header
2708  * @eit_hdr_len: EIT header length for the msdu
2709  * @eit_hdr_dma_map_addr: dma addr for EIT header
2710  * @tcphdr: pointer to tcp header
2711  * @ipv4_csum_en: ipv4 checksum enable
2712  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2713  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2714  * @ip_id: IP id
2715  * @tcp_seq_num: TCP sequence number
2716  *
2717  * This structure holds the TSO common info that is common
2718  * across all the TCP segments of the jumbo packet.
2719  */
2720 struct qdf_tso_cmn_seg_info_t {
2721 	uint16_t ethproto;
2722 	uint16_t ip_tcp_hdr_len;
2723 	uint16_t l2_len;
2724 	uint8_t *eit_hdr;
2725 	uint32_t eit_hdr_len;
2726 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2727 	struct tcphdr *tcphdr;
2728 	uint16_t ipv4_csum_en;
2729 	uint16_t tcp_ipv4_csum_en;
2730 	uint16_t tcp_ipv6_csum_en;
2731 	uint16_t ip_id;
2732 	uint32_t tcp_seq_num;
2733 };
2734 
2735 /**
2736  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2737  * information
2738  * @osdev: qdf device handle
2739  * @skb: skb buffer
2740  * @tso_info: Parameters common to all segements
2741  *
2742  * Get the TSO information that is common across all the TCP
2743  * segments of the jumbo packet
2744  *
2745  * Return: 0 - success 1 - failure
2746  */
2747 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2748 			struct sk_buff *skb,
2749 			struct qdf_tso_cmn_seg_info_t *tso_info)
2750 {
2751 	/* Get ethernet type and ethernet header length */
2752 	tso_info->ethproto = vlan_get_protocol(skb);
2753 
2754 	/* Determine whether this is an IPv4 or IPv6 packet */
2755 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2756 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2757 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2758 
2759 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2760 		tso_info->ipv4_csum_en = 1;
2761 		tso_info->tcp_ipv4_csum_en = 1;
2762 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2763 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2764 				ipv4_hdr->protocol);
2765 			return 1;
2766 		}
2767 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2768 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2769 		tso_info->tcp_ipv6_csum_en = 1;
2770 	} else {
2771 		qdf_err("TSO: ethertype 0x%x is not supported!",
2772 			tso_info->ethproto);
2773 		return 1;
2774 	}
2775 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2776 	tso_info->tcphdr = tcp_hdr(skb);
2777 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2778 	/* get pointer to the ethernet + IP + TCP header and their length */
2779 	tso_info->eit_hdr = skb->data;
2780 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2781 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2782 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2783 							tso_info->eit_hdr,
2784 							tso_info->eit_hdr_len,
2785 							DMA_TO_DEVICE);
2786 	if (unlikely(dma_mapping_error(osdev->dev,
2787 				       tso_info->eit_hdr_dma_map_addr))) {
2788 		qdf_err("DMA mapping error!");
2789 		qdf_assert(0);
2790 		return 1;
2791 	}
2792 
2793 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2794 		/* inlcude IPv4 header length for IPV4 (total length) */
2795 		tso_info->ip_tcp_hdr_len =
2796 			tso_info->eit_hdr_len - tso_info->l2_len;
2797 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2798 		/* exclude IPv6 header length for IPv6 (payload length) */
2799 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2800 	}
2801 	/*
2802 	 * The length of the payload (application layer data) is added to
2803 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2804 	 * descriptor.
2805 	 */
2806 
2807 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2808 		tso_info->tcp_seq_num,
2809 		tso_info->eit_hdr_len,
2810 		tso_info->l2_len,
2811 		skb->len);
2812 	return 0;
2813 }
2814 
2815 
2816 /**
2817  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2818  *
2819  * @curr_seg: Segment whose contents are initialized
2820  * @tso_cmn_info: Parameters common to all segements
2821  *
2822  * Return: None
2823  */
2824 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2825 				struct qdf_tso_seg_elem_t *curr_seg,
2826 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2827 {
2828 	/* Initialize the flags to 0 */
2829 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2830 
2831 	/*
2832 	 * The following fields remain the same across all segments of
2833 	 * a jumbo packet
2834 	 */
2835 	curr_seg->seg.tso_flags.tso_enable = 1;
2836 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2837 		tso_cmn_info->ipv4_csum_en;
2838 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2839 		tso_cmn_info->tcp_ipv6_csum_en;
2840 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2841 		tso_cmn_info->tcp_ipv4_csum_en;
2842 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2843 
2844 	/* The following fields change for the segments */
2845 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2846 	tso_cmn_info->ip_id++;
2847 
2848 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2849 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2850 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2851 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2852 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2853 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2854 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2855 
2856 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2857 
2858 	/*
2859 	 * First fragment for each segment always contains the ethernet,
2860 	 * IP and TCP header
2861 	 */
2862 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2863 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2864 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2865 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2866 
2867 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2868 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2869 		   tso_cmn_info->eit_hdr_len,
2870 		   curr_seg->seg.tso_flags.tcp_seq_num,
2871 		   curr_seg->seg.total_len);
2872 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2873 }
2874 
2875 /**
2876  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2877  * into segments
2878  * @nbuf: network buffer to be segmented
2879  * @tso_info: This is the output. The information about the
2880  *           TSO segments will be populated within this.
2881  *
2882  * This function fragments a TCP jumbo packet into smaller
2883  * segments to be transmitted by the driver. It chains the TSO
2884  * segments created into a list.
2885  *
2886  * Return: number of TSO segments
2887  */
2888 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2889 		struct qdf_tso_info_t *tso_info)
2890 {
2891 	/* common across all segments */
2892 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2893 	/* segment specific */
2894 	void *tso_frag_vaddr;
2895 	qdf_dma_addr_t tso_frag_paddr = 0;
2896 	uint32_t num_seg = 0;
2897 	struct qdf_tso_seg_elem_t *curr_seg;
2898 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2899 	struct skb_frag_struct *frag = NULL;
2900 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2901 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2902 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2903 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2904 	int j = 0; /* skb fragment index */
2905 
2906 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2907 
2908 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2909 						skb, &tso_cmn_info))) {
2910 		qdf_warn("TSO: error getting common segment info");
2911 		return 0;
2912 	}
2913 
2914 	total_num_seg = tso_info->tso_num_seg_list;
2915 	curr_seg = tso_info->tso_seg_list;
2916 
2917 	/* length of the first chunk of data in the skb */
2918 	skb_frag_len = skb_headlen(skb);
2919 
2920 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2921 	/* update the remaining skb fragment length and TSO segment length */
2922 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2923 	skb_proc -= tso_cmn_info.eit_hdr_len;
2924 
2925 	/* get the address to the next tso fragment */
2926 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2927 	/* get the length of the next tso fragment */
2928 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2929 
2930 	if (tso_frag_len != 0) {
2931 		tso_frag_paddr = dma_map_single(osdev->dev,
2932 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2933 	}
2934 
2935 	if (unlikely(dma_mapping_error(osdev->dev,
2936 					tso_frag_paddr))) {
2937 		qdf_err("DMA mapping error!");
2938 		qdf_assert(0);
2939 		return 0;
2940 	}
2941 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2942 		__LINE__, skb_frag_len, tso_frag_len);
2943 	num_seg = tso_info->num_segs;
2944 	tso_info->num_segs = 0;
2945 	tso_info->is_tso = 1;
2946 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2947 
2948 	while (num_seg && curr_seg) {
2949 		int i = 1; /* tso fragment index */
2950 		uint8_t more_tso_frags = 1;
2951 
2952 		curr_seg->seg.num_frags = 0;
2953 		tso_info->num_segs++;
2954 		total_num_seg->num_seg.tso_cmn_num_seg++;
2955 
2956 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2957 						 &tso_cmn_info);
2958 
2959 		if (unlikely(skb_proc == 0))
2960 			return tso_info->num_segs;
2961 
2962 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2963 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2964 		/* frag len is added to ip_len in while loop below*/
2965 
2966 		curr_seg->seg.num_frags++;
2967 
2968 		while (more_tso_frags) {
2969 			if (tso_frag_len != 0) {
2970 				curr_seg->seg.tso_frags[i].vaddr =
2971 					tso_frag_vaddr;
2972 				curr_seg->seg.tso_frags[i].length =
2973 					tso_frag_len;
2974 				curr_seg->seg.total_len += tso_frag_len;
2975 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2976 				curr_seg->seg.num_frags++;
2977 				skb_proc = skb_proc - tso_frag_len;
2978 
2979 				/* increment the TCP sequence number */
2980 
2981 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2982 				curr_seg->seg.tso_frags[i].paddr =
2983 					tso_frag_paddr;
2984 			}
2985 
2986 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2987 					__func__, __LINE__,
2988 					i,
2989 					tso_frag_len,
2990 					curr_seg->seg.total_len,
2991 					curr_seg->seg.tso_frags[i].vaddr);
2992 
2993 			/* if there is no more data left in the skb */
2994 			if (!skb_proc)
2995 				return tso_info->num_segs;
2996 
2997 			/* get the next payload fragment information */
2998 			/* check if there are more fragments in this segment */
2999 			if (tso_frag_len < tso_seg_size) {
3000 				more_tso_frags = 1;
3001 				if (tso_frag_len != 0) {
3002 					tso_seg_size = tso_seg_size -
3003 						tso_frag_len;
3004 					i++;
3005 					if (curr_seg->seg.num_frags ==
3006 								FRAG_NUM_MAX) {
3007 						more_tso_frags = 0;
3008 						/*
3009 						 * reset i and the tso
3010 						 * payload size
3011 						 */
3012 						i = 1;
3013 						tso_seg_size =
3014 							skb_shinfo(skb)->
3015 								gso_size;
3016 					}
3017 				}
3018 			} else {
3019 				more_tso_frags = 0;
3020 				/* reset i and the tso payload size */
3021 				i = 1;
3022 				tso_seg_size = skb_shinfo(skb)->gso_size;
3023 			}
3024 
3025 			/* if the next fragment is contiguous */
3026 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3027 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3028 				skb_frag_len = skb_frag_len - tso_frag_len;
3029 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3030 
3031 			} else { /* the next fragment is not contiguous */
3032 				if (skb_shinfo(skb)->nr_frags == 0) {
3033 					qdf_info("TSO: nr_frags == 0!");
3034 					qdf_assert(0);
3035 					return 0;
3036 				}
3037 				if (j >= skb_shinfo(skb)->nr_frags) {
3038 					qdf_info("TSO: nr_frags %d j %d",
3039 						 skb_shinfo(skb)->nr_frags, j);
3040 					qdf_assert(0);
3041 					return 0;
3042 				}
3043 				frag = &skb_shinfo(skb)->frags[j];
3044 				skb_frag_len = skb_frag_size(frag);
3045 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3046 				tso_frag_vaddr = skb_frag_address_safe(frag);
3047 				j++;
3048 			}
3049 
3050 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3051 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3052 				tso_seg_size);
3053 
3054 			if (!(tso_frag_vaddr)) {
3055 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3056 						__func__);
3057 				return 0;
3058 			}
3059 
3060 			tso_frag_paddr =
3061 					 dma_map_single(osdev->dev,
3062 						 tso_frag_vaddr,
3063 						 tso_frag_len,
3064 						 DMA_TO_DEVICE);
3065 			if (unlikely(dma_mapping_error(osdev->dev,
3066 							tso_frag_paddr))) {
3067 				qdf_err("DMA mapping error!");
3068 				qdf_assert(0);
3069 				return 0;
3070 			}
3071 		}
3072 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3073 				curr_seg->seg.tso_flags.tcp_seq_num);
3074 		num_seg--;
3075 		/* if TCP FIN flag was set, set it in the last segment */
3076 		if (!num_seg)
3077 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3078 
3079 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3080 		curr_seg = curr_seg->next;
3081 	}
3082 	return tso_info->num_segs;
3083 }
3084 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3085 
3086 /**
3087  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3088  *
3089  * @osdev: qdf device handle
3090  * @tso_seg: TSO segment element to be unmapped
3091  * @is_last_seg: whether this is last tso seg or not
3092  *
3093  * Return: none
3094  */
3095 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3096 			  struct qdf_tso_seg_elem_t *tso_seg,
3097 			  bool is_last_seg)
3098 {
3099 	uint32_t num_frags = 0;
3100 
3101 	if (tso_seg->seg.num_frags > 0)
3102 		num_frags = tso_seg->seg.num_frags - 1;
3103 
3104 	/*Num of frags in a tso seg cannot be less than 2 */
3105 	if (num_frags < 1) {
3106 		qdf_assert(0);
3107 		qdf_err("ERROR: num of frags in a tso segment is %d",
3108 			(num_frags + 1));
3109 		return;
3110 	}
3111 
3112 	while (num_frags) {
3113 		/*Do dma unmap the tso seg except the 0th frag */
3114 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3115 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3116 				num_frags);
3117 			qdf_assert(0);
3118 			return;
3119 		}
3120 		dma_unmap_single(osdev->dev,
3121 				 tso_seg->seg.tso_frags[num_frags].paddr,
3122 				 tso_seg->seg.tso_frags[num_frags].length,
3123 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3124 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3125 		num_frags--;
3126 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3127 	}
3128 
3129 	if (is_last_seg) {
3130 		/*Do dma unmap for the tso seg 0th frag */
3131 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3132 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3133 			qdf_assert(0);
3134 			return;
3135 		}
3136 		dma_unmap_single(osdev->dev,
3137 				 tso_seg->seg.tso_frags[0].paddr,
3138 				 tso_seg->seg.tso_frags[0].length,
3139 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3140 		tso_seg->seg.tso_frags[0].paddr = 0;
3141 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3142 	}
3143 }
3144 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3145 
3146 /**
3147  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3148  * into segments
3149  * @nbuf:   network buffer to be segmented
3150  * @tso_info:  This is the output. The information about the
3151  *      TSO segments will be populated within this.
3152  *
3153  * This function fragments a TCP jumbo packet into smaller
3154  * segments to be transmitted by the driver. It chains the TSO
3155  * segments created into a list.
3156  *
3157  * Return: 0 - success, 1 - failure
3158  */
3159 #ifndef BUILD_X86
3160 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3161 {
3162 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3163 	uint32_t remainder, num_segs = 0;
3164 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3165 	uint8_t frags_per_tso = 0;
3166 	uint32_t skb_frag_len = 0;
3167 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3168 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3169 	struct skb_frag_struct *frag = NULL;
3170 	int j = 0;
3171 	uint32_t temp_num_seg = 0;
3172 
3173 	/* length of the first chunk of data in the skb minus eit header*/
3174 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3175 
3176 	/* Calculate num of segs for skb's first chunk of data*/
3177 	remainder = skb_frag_len % tso_seg_size;
3178 	num_segs = skb_frag_len / tso_seg_size;
3179 	/**
3180 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3181 	 * In that case, one more tso seg is required to accommodate
3182 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3183 	 * then remaining data will be accomodated while doing the calculation
3184 	 * for nr_frags data. Hence, frags_per_tso++.
3185 	 */
3186 	if (remainder) {
3187 		if (!skb_nr_frags)
3188 			num_segs++;
3189 		else
3190 			frags_per_tso++;
3191 	}
3192 
3193 	while (skb_nr_frags) {
3194 		if (j >= skb_shinfo(skb)->nr_frags) {
3195 			qdf_info("TSO: nr_frags %d j %d",
3196 				 skb_shinfo(skb)->nr_frags, j);
3197 			qdf_assert(0);
3198 			return 0;
3199 		}
3200 		/**
3201 		 * Calculate the number of tso seg for nr_frags data:
3202 		 * Get the length of each frag in skb_frag_len, add to
3203 		 * remainder.Get the number of segments by dividing it to
3204 		 * tso_seg_size and calculate the new remainder.
3205 		 * Decrement the nr_frags value and keep
3206 		 * looping all the skb_fragments.
3207 		 */
3208 		frag = &skb_shinfo(skb)->frags[j];
3209 		skb_frag_len = skb_frag_size(frag);
3210 		temp_num_seg = num_segs;
3211 		remainder += skb_frag_len;
3212 		num_segs += remainder / tso_seg_size;
3213 		remainder = remainder % tso_seg_size;
3214 		skb_nr_frags--;
3215 		if (remainder) {
3216 			if (num_segs > temp_num_seg)
3217 				frags_per_tso = 0;
3218 			/**
3219 			 * increment the tso per frags whenever remainder is
3220 			 * positive. If frags_per_tso reaches the (max-1),
3221 			 * [First frags always have EIT header, therefore max-1]
3222 			 * increment the num_segs as no more data can be
3223 			 * accomodated in the curr tso seg. Reset the remainder
3224 			 * and frags per tso and keep looping.
3225 			 */
3226 			frags_per_tso++;
3227 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3228 				num_segs++;
3229 				frags_per_tso = 0;
3230 				remainder = 0;
3231 			}
3232 			/**
3233 			 * If this is the last skb frag and still remainder is
3234 			 * non-zero(frags_per_tso is not reached to the max-1)
3235 			 * then increment the num_segs to take care of the
3236 			 * remaining length.
3237 			 */
3238 			if (!skb_nr_frags && remainder) {
3239 				num_segs++;
3240 				frags_per_tso = 0;
3241 			}
3242 		} else {
3243 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3244 			frags_per_tso = 0;
3245 		}
3246 		j++;
3247 	}
3248 
3249 	return num_segs;
3250 }
3251 #else
3252 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3253 {
3254 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3255 	struct skb_frag_struct *frag = NULL;
3256 
3257 	/*
3258 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3259 	 * region which cannot be accessed by Target
3260 	 */
3261 	if (virt_to_phys(skb->data) < 0x50000040) {
3262 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3263 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3264 				virt_to_phys(skb->data));
3265 		goto fail;
3266 
3267 	}
3268 
3269 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3270 		frag = &skb_shinfo(skb)->frags[i];
3271 
3272 		if (!frag)
3273 			goto fail;
3274 
3275 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3276 			goto fail;
3277 	}
3278 
3279 
3280 	gso_size = skb_shinfo(skb)->gso_size;
3281 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3282 			+ tcp_hdrlen(skb));
3283 	while (tmp_len) {
3284 		num_segs++;
3285 		if (tmp_len > gso_size)
3286 			tmp_len -= gso_size;
3287 		else
3288 			break;
3289 	}
3290 
3291 	return num_segs;
3292 
3293 	/*
3294 	 * Do not free this frame, just do socket level accounting
3295 	 * so that this is not reused.
3296 	 */
3297 fail:
3298 	if (skb->sk)
3299 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3300 
3301 	return 0;
3302 }
3303 #endif
3304 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3305 
3306 #endif /* FEATURE_TSO */
3307 
3308 /**
3309  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3310  *
3311  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3312  *
3313  * Return: N/A
3314  */
3315 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3316 			  uint32_t *lo, uint32_t *hi)
3317 {
3318 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3319 		*lo = lower_32_bits(dmaaddr);
3320 		*hi = upper_32_bits(dmaaddr);
3321 	} else {
3322 		*lo = dmaaddr;
3323 		*hi = 0;
3324 	}
3325 }
3326 
3327 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3328 
3329 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3330 {
3331 	qdf_nbuf_users_inc(&skb->users);
3332 	return skb;
3333 }
3334 qdf_export_symbol(__qdf_nbuf_inc_users);
3335 
3336 int __qdf_nbuf_get_users(struct sk_buff *skb)
3337 {
3338 	return qdf_nbuf_users_read(&skb->users);
3339 }
3340 qdf_export_symbol(__qdf_nbuf_get_users);
3341 
3342 /**
3343  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3344  * @skb: sk_buff handle
3345  *
3346  * Return: none
3347  */
3348 
3349 void __qdf_nbuf_ref(struct sk_buff *skb)
3350 {
3351 	skb_get(skb);
3352 }
3353 qdf_export_symbol(__qdf_nbuf_ref);
3354 
3355 /**
3356  * __qdf_nbuf_shared() - Check whether the buffer is shared
3357  *  @skb: sk_buff buffer
3358  *
3359  *  Return: true if more than one person has a reference to this buffer.
3360  */
3361 int __qdf_nbuf_shared(struct sk_buff *skb)
3362 {
3363 	return skb_shared(skb);
3364 }
3365 qdf_export_symbol(__qdf_nbuf_shared);
3366 
3367 /**
3368  * __qdf_nbuf_dmamap_create() - create a DMA map.
3369  * @osdev: qdf device handle
3370  * @dmap: dma map handle
3371  *
3372  * This can later be used to map networking buffers. They :
3373  * - need space in adf_drv's software descriptor
3374  * - are typically created during adf_drv_create
3375  * - need to be created before any API(qdf_nbuf_map) that uses them
3376  *
3377  * Return: QDF STATUS
3378  */
3379 QDF_STATUS
3380 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3381 {
3382 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3383 	/*
3384 	 * driver can tell its SG capablity, it must be handled.
3385 	 * Bounce buffers if they are there
3386 	 */
3387 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3388 	if (!(*dmap))
3389 		error = QDF_STATUS_E_NOMEM;
3390 
3391 	return error;
3392 }
3393 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3394 /**
3395  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3396  * @osdev: qdf device handle
3397  * @dmap: dma map handle
3398  *
3399  * Return: none
3400  */
3401 void
3402 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3403 {
3404 	kfree(dmap);
3405 }
3406 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3407 
3408 /**
3409  * __qdf_nbuf_map_nbytes_single() - map nbytes
3410  * @osdev: os device
3411  * @buf: buffer
3412  * @dir: direction
3413  * @nbytes: number of bytes
3414  *
3415  * Return: QDF_STATUS
3416  */
3417 #ifdef A_SIMOS_DEVHOST
3418 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3419 		qdf_device_t osdev, struct sk_buff *buf,
3420 		 qdf_dma_dir_t dir, int nbytes)
3421 {
3422 	qdf_dma_addr_t paddr;
3423 
3424 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3425 	return QDF_STATUS_SUCCESS;
3426 }
3427 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3428 #else
3429 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3430 		qdf_device_t osdev, struct sk_buff *buf,
3431 		 qdf_dma_dir_t dir, int nbytes)
3432 {
3433 	qdf_dma_addr_t paddr;
3434 
3435 	/* assume that the OS only provides a single fragment */
3436 	QDF_NBUF_CB_PADDR(buf) = paddr =
3437 		dma_map_single(osdev->dev, buf->data,
3438 			nbytes, __qdf_dma_dir_to_os(dir));
3439 	return dma_mapping_error(osdev->dev, paddr) ?
3440 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3441 }
3442 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3443 #endif
3444 /**
3445  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3446  * @osdev: os device
3447  * @buf: buffer
3448  * @dir: direction
3449  * @nbytes: number of bytes
3450  *
3451  * Return: none
3452  */
3453 #if defined(A_SIMOS_DEVHOST)
3454 void
3455 __qdf_nbuf_unmap_nbytes_single(
3456 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3457 {
3458 }
3459 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3460 
3461 #else
3462 void
3463 __qdf_nbuf_unmap_nbytes_single(
3464 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3465 {
3466 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3467 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3468 		return;
3469 	}
3470 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3471 			nbytes, __qdf_dma_dir_to_os(dir));
3472 }
3473 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3474 #endif
3475 /**
3476  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3477  * @osdev: os device
3478  * @skb: skb handle
3479  * @dir: dma direction
3480  * @nbytes: number of bytes to be mapped
3481  *
3482  * Return: QDF_STATUS
3483  */
3484 #ifdef QDF_OS_DEBUG
3485 QDF_STATUS
3486 __qdf_nbuf_map_nbytes(
3487 	qdf_device_t osdev,
3488 	struct sk_buff *skb,
3489 	qdf_dma_dir_t dir,
3490 	int nbytes)
3491 {
3492 	struct skb_shared_info  *sh = skb_shinfo(skb);
3493 
3494 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3495 
3496 	/*
3497 	 * Assume there's only a single fragment.
3498 	 * To support multiple fragments, it would be necessary to change
3499 	 * adf_nbuf_t to be a separate object that stores meta-info
3500 	 * (including the bus address for each fragment) and a pointer
3501 	 * to the underlying sk_buff.
3502 	 */
3503 	qdf_assert(sh->nr_frags == 0);
3504 
3505 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3506 }
3507 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3508 #else
3509 QDF_STATUS
3510 __qdf_nbuf_map_nbytes(
3511 	qdf_device_t osdev,
3512 	struct sk_buff *skb,
3513 	qdf_dma_dir_t dir,
3514 	int nbytes)
3515 {
3516 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3517 }
3518 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3519 #endif
3520 /**
3521  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3522  * @osdev: OS device
3523  * @skb: skb handle
3524  * @dir: direction
3525  * @nbytes: number of bytes
3526  *
3527  * Return: none
3528  */
3529 void
3530 __qdf_nbuf_unmap_nbytes(
3531 	qdf_device_t osdev,
3532 	struct sk_buff *skb,
3533 	qdf_dma_dir_t dir,
3534 	int nbytes)
3535 {
3536 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3537 
3538 	/*
3539 	 * Assume there's a single fragment.
3540 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3541 	 */
3542 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3543 }
3544 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3545 
3546 /**
3547  * __qdf_nbuf_dma_map_info() - return the dma map info
3548  * @bmap: dma map
3549  * @sg: dma map info
3550  *
3551  * Return: none
3552  */
3553 void
3554 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3555 {
3556 	qdf_assert(bmap->mapped);
3557 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3558 
3559 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3560 			sizeof(struct __qdf_segment));
3561 	sg->nsegs = bmap->nsegs;
3562 }
3563 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3564 /**
3565  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3566  *			specified by the index
3567  * @skb: sk buff
3568  * @sg: scatter/gather list of all the frags
3569  *
3570  * Return: none
3571  */
3572 #if defined(__QDF_SUPPORT_FRAG_MEM)
3573 void
3574 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3575 {
3576 	qdf_assert(skb != NULL);
3577 	sg->sg_segs[0].vaddr = skb->data;
3578 	sg->sg_segs[0].len   = skb->len;
3579 	sg->nsegs            = 1;
3580 
3581 	for (int i = 1; i <= sh->nr_frags; i++) {
3582 		skb_frag_t    *f        = &sh->frags[i - 1];
3583 
3584 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3585 			f->page_offset);
3586 		sg->sg_segs[i].len      = f->size;
3587 
3588 		qdf_assert(i < QDF_MAX_SGLIST);
3589 	}
3590 	sg->nsegs += i;
3591 
3592 }
3593 qdf_export_symbol(__qdf_nbuf_frag_info);
3594 #else
3595 #ifdef QDF_OS_DEBUG
3596 void
3597 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3598 {
3599 
3600 	struct skb_shared_info  *sh = skb_shinfo(skb);
3601 
3602 	qdf_assert(skb != NULL);
3603 	sg->sg_segs[0].vaddr = skb->data;
3604 	sg->sg_segs[0].len   = skb->len;
3605 	sg->nsegs            = 1;
3606 
3607 	qdf_assert(sh->nr_frags == 0);
3608 }
3609 qdf_export_symbol(__qdf_nbuf_frag_info);
3610 #else
3611 void
3612 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3613 {
3614 	sg->sg_segs[0].vaddr = skb->data;
3615 	sg->sg_segs[0].len   = skb->len;
3616 	sg->nsegs            = 1;
3617 }
3618 qdf_export_symbol(__qdf_nbuf_frag_info);
3619 #endif
3620 #endif
3621 /**
3622  * __qdf_nbuf_get_frag_size() - get frag size
3623  * @nbuf: sk buffer
3624  * @cur_frag: current frag
3625  *
3626  * Return: frag size
3627  */
3628 uint32_t
3629 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3630 {
3631 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3632 	const skb_frag_t *frag = sh->frags + cur_frag;
3633 
3634 	return skb_frag_size(frag);
3635 }
3636 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3637 
3638 /**
3639  * __qdf_nbuf_frag_map() - dma map frag
3640  * @osdev: os device
3641  * @nbuf: sk buff
3642  * @offset: offset
3643  * @dir: direction
3644  * @cur_frag: current fragment
3645  *
3646  * Return: QDF status
3647  */
3648 #ifdef A_SIMOS_DEVHOST
3649 QDF_STATUS __qdf_nbuf_frag_map(
3650 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3651 	int offset, qdf_dma_dir_t dir, int cur_frag)
3652 {
3653 	int32_t paddr, frag_len;
3654 
3655 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3656 	return QDF_STATUS_SUCCESS;
3657 }
3658 qdf_export_symbol(__qdf_nbuf_frag_map);
3659 #else
3660 QDF_STATUS __qdf_nbuf_frag_map(
3661 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3662 	int offset, qdf_dma_dir_t dir, int cur_frag)
3663 {
3664 	dma_addr_t paddr, frag_len;
3665 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3666 	const skb_frag_t *frag = sh->frags + cur_frag;
3667 
3668 	frag_len = skb_frag_size(frag);
3669 
3670 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3671 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3672 					__qdf_dma_dir_to_os(dir));
3673 	return dma_mapping_error(osdev->dev, paddr) ?
3674 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3675 }
3676 qdf_export_symbol(__qdf_nbuf_frag_map);
3677 #endif
3678 /**
3679  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3680  * @dmap: dma map
3681  * @cb: callback
3682  * @arg: argument
3683  *
3684  * Return: none
3685  */
3686 void
3687 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3688 {
3689 	return;
3690 }
3691 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3692 
3693 
3694 /**
3695  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3696  * @osdev: os device
3697  * @buf: sk buff
3698  * @dir: direction
3699  *
3700  * Return: none
3701  */
3702 #if defined(A_SIMOS_DEVHOST)
3703 static void __qdf_nbuf_sync_single_for_cpu(
3704 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3705 {
3706 	return;
3707 }
3708 #else
3709 static void __qdf_nbuf_sync_single_for_cpu(
3710 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3711 {
3712 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3713 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3714 		return;
3715 	}
3716 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3717 		skb_end_offset(buf) - skb_headroom(buf),
3718 		__qdf_dma_dir_to_os(dir));
3719 }
3720 #endif
3721 /**
3722  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3723  * @osdev: os device
3724  * @skb: sk buff
3725  * @dir: direction
3726  *
3727  * Return: none
3728  */
3729 void
3730 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3731 	struct sk_buff *skb, qdf_dma_dir_t dir)
3732 {
3733 	qdf_assert(
3734 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3735 
3736 	/*
3737 	 * Assume there's a single fragment.
3738 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3739 	 */
3740 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3741 }
3742 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3743 
3744 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3745 /**
3746  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3747  * @rx_status: Pointer to rx_status.
3748  * @rtap_buf: Buf to which VHT info has to be updated.
3749  * @rtap_len: Current length of radiotap buffer
3750  *
3751  * Return: Length of radiotap after VHT flags updated.
3752  */
3753 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3754 					struct mon_rx_status *rx_status,
3755 					int8_t *rtap_buf,
3756 					uint32_t rtap_len)
3757 {
3758 	uint16_t vht_flags = 0;
3759 
3760 	rtap_len = qdf_align(rtap_len, 2);
3761 
3762 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3763 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3764 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3765 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3766 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3767 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3768 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3769 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3770 	rtap_len += 2;
3771 
3772 	rtap_buf[rtap_len] |=
3773 		(rx_status->is_stbc ?
3774 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3775 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3776 		(rx_status->ldpc ?
3777 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3778 		(rx_status->beamformed ?
3779 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3780 	rtap_len += 1;
3781 	switch (rx_status->vht_flag_values2) {
3782 	case IEEE80211_RADIOTAP_VHT_BW_20:
3783 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3784 		break;
3785 	case IEEE80211_RADIOTAP_VHT_BW_40:
3786 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3787 		break;
3788 	case IEEE80211_RADIOTAP_VHT_BW_80:
3789 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3790 		break;
3791 	case IEEE80211_RADIOTAP_VHT_BW_160:
3792 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3793 		break;
3794 	}
3795 	rtap_len += 1;
3796 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3797 	rtap_len += 1;
3798 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3799 	rtap_len += 1;
3800 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3801 	rtap_len += 1;
3802 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3803 	rtap_len += 1;
3804 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3805 	rtap_len += 1;
3806 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3807 	rtap_len += 1;
3808 	put_unaligned_le16(rx_status->vht_flag_values6,
3809 			   &rtap_buf[rtap_len]);
3810 	rtap_len += 2;
3811 
3812 	return rtap_len;
3813 }
3814 
3815 /**
3816  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3817  * @rx_status: Pointer to rx_status.
3818  * @rtap_buf: buffer to which radiotap has to be updated
3819  * @rtap_len: radiotap length
3820  *
3821  * API update high-efficiency (11ax) fields in the radiotap header
3822  *
3823  * Return: length of rtap_len updated.
3824  */
3825 static unsigned int
3826 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3827 				     int8_t *rtap_buf, uint32_t rtap_len)
3828 {
3829 	/*
3830 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3831 	 * Enable all "known" HE radiotap flags for now
3832 	 */
3833 	rtap_len = qdf_align(rtap_len, 2);
3834 
3835 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3836 	rtap_len += 2;
3837 
3838 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3839 	rtap_len += 2;
3840 
3841 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3842 	rtap_len += 2;
3843 
3844 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3845 	rtap_len += 2;
3846 
3847 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3848 	rtap_len += 2;
3849 
3850 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3851 	rtap_len += 2;
3852 	qdf_debug("he data %x %x %x %x %x %x",
3853 		  rx_status->he_data1,
3854 		  rx_status->he_data2, rx_status->he_data3,
3855 		  rx_status->he_data4, rx_status->he_data5,
3856 		  rx_status->he_data6);
3857 	return rtap_len;
3858 }
3859 
3860 
3861 /**
3862  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3863  * @rx_status: Pointer to rx_status.
3864  * @rtap_buf: buffer to which radiotap has to be updated
3865  * @rtap_len: radiotap length
3866  *
3867  * API update HE-MU fields in the radiotap header
3868  *
3869  * Return: length of rtap_len updated.
3870  */
3871 static unsigned int
3872 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3873 				     int8_t *rtap_buf, uint32_t rtap_len)
3874 {
3875 	rtap_len = qdf_align(rtap_len, 2);
3876 
3877 	/*
3878 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3879 	 * Enable all "known" he-mu radiotap flags for now
3880 	 */
3881 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3882 	rtap_len += 2;
3883 
3884 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3885 	rtap_len += 2;
3886 
3887 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3888 	rtap_len += 1;
3889 
3890 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3891 	rtap_len += 1;
3892 
3893 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3894 	rtap_len += 1;
3895 
3896 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3897 	rtap_len += 1;
3898 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3899 		  rx_status->he_flags1,
3900 		  rx_status->he_flags2, rx_status->he_RU[0],
3901 		  rx_status->he_RU[1], rx_status->he_RU[2],
3902 		  rx_status->he_RU[3]);
3903 
3904 	return rtap_len;
3905 }
3906 
3907 /**
3908  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3909  * @rx_status: Pointer to rx_status.
3910  * @rtap_buf: buffer to which radiotap has to be updated
3911  * @rtap_len: radiotap length
3912  *
3913  * API update he-mu-other fields in the radiotap header
3914  *
3915  * Return: length of rtap_len updated.
3916  */
3917 static unsigned int
3918 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3919 				     int8_t *rtap_buf, uint32_t rtap_len)
3920 {
3921 	rtap_len = qdf_align(rtap_len, 2);
3922 
3923 	/*
3924 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3925 	 * Enable all "known" he-mu-other radiotap flags for now
3926 	 */
3927 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3928 	rtap_len += 2;
3929 
3930 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3931 	rtap_len += 2;
3932 
3933 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3934 	rtap_len += 1;
3935 
3936 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3937 	rtap_len += 1;
3938 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3939 		  rx_status->he_per_user_1,
3940 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3941 		  rx_status->he_per_user_known);
3942 	return rtap_len;
3943 }
3944 
3945 
3946 /**
3947  * This is the length for radiotap, combined length
3948  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3949  * cannot be more than available headroom_sz.
3950  * increase this when we add more radiotap elements.
3951  * Number after '+' indicates maximum possible increase due to alignment
3952  */
3953 
3954 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3955 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3956 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3957 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3958 #define RADIOTAP_FIXED_HEADER_LEN 17
3959 #define RADIOTAP_HT_FLAGS_LEN 3
3960 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3961 #define RADIOTAP_VENDOR_NS_LEN \
3962 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3963 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3964 				RADIOTAP_FIXED_HEADER_LEN + \
3965 				RADIOTAP_HT_FLAGS_LEN + \
3966 				RADIOTAP_VHT_FLAGS_LEN + \
3967 				RADIOTAP_AMPDU_STATUS_LEN + \
3968 				RADIOTAP_HE_FLAGS_LEN + \
3969 				RADIOTAP_HE_MU_FLAGS_LEN + \
3970 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
3971 				RADIOTAP_VENDOR_NS_LEN)
3972 
3973 #define IEEE80211_RADIOTAP_HE 23
3974 #define IEEE80211_RADIOTAP_HE_MU	24
3975 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3976 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
3977 
3978 /**
3979  * radiotap_num_to_freq() - Get frequency from chan number
3980  * @chan_num - Input channel number
3981  *
3982  * Return - Channel frequency in Mhz
3983  */
3984 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3985 {
3986 	if (chan_num == CHANNEL_NUM_14)
3987 		return CHANNEL_FREQ_2484;
3988 	if (chan_num < CHANNEL_NUM_14)
3989 		return CHANNEL_FREQ_2407 +
3990 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3991 
3992 	if (chan_num < CHANNEL_NUM_27)
3993 		return CHANNEL_FREQ_2512 +
3994 			((chan_num - CHANNEL_NUM_15) *
3995 			 FREQ_MULTIPLIER_CONST_20MHZ);
3996 
3997 	if (chan_num > CHANNEL_NUM_182 &&
3998 			chan_num < CHANNEL_NUM_197)
3999 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
4000 			CHANNEL_FREQ_4000);
4001 
4002 	return CHANNEL_FREQ_5000 +
4003 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4004 }
4005 
4006 /**
4007  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4008  * @rx_status: Pointer to rx_status.
4009  * @rtap_buf: Buf to which AMPDU info has to be updated.
4010  * @rtap_len: Current length of radiotap buffer
4011  *
4012  * Return: Length of radiotap after AMPDU flags updated.
4013  */
4014 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4015 					struct mon_rx_status *rx_status,
4016 					uint8_t *rtap_buf,
4017 					uint32_t rtap_len)
4018 {
4019 	/*
4020 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4021 	 * First 32 bits of AMPDU represents the reference number
4022 	 */
4023 
4024 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4025 	uint16_t ampdu_flags = 0;
4026 	uint16_t ampdu_reserved_flags = 0;
4027 
4028 	rtap_len = qdf_align(rtap_len, 4);
4029 
4030 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4031 	rtap_len += 4;
4032 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4033 	rtap_len += 2;
4034 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4035 	rtap_len += 2;
4036 
4037 	return rtap_len;
4038 }
4039 
4040 /**
4041  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4042  * @rx_status: Pointer to rx_status.
4043  * @nbuf:      nbuf pointer to which radiotap has to be updated
4044  * @headroom_sz: Available headroom size.
4045  *
4046  * Return: length of rtap_len updated.
4047  */
4048 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4049 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4050 {
4051 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4052 	struct ieee80211_radiotap_header *rthdr =
4053 		(struct ieee80211_radiotap_header *)rtap_buf;
4054 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4055 	uint32_t rtap_len = rtap_hdr_len;
4056 	uint8_t length = rtap_len;
4057 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4058 
4059 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4060 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4061 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4062 	rtap_len += 8;
4063 
4064 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4065 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4066 
4067 	if (rx_status->rs_fcs_err)
4068 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4069 
4070 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4071 	rtap_len += 1;
4072 
4073 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4074 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4075 	    !rx_status->he_flags) {
4076 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4077 		rtap_buf[rtap_len] = rx_status->rate;
4078 	} else
4079 		rtap_buf[rtap_len] = 0;
4080 	rtap_len += 1;
4081 
4082 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4083 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4084 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4085 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4086 	rtap_len += 2;
4087 	/* Channel flags. */
4088 	if (rx_status->chan_num > CHANNEL_NUM_35)
4089 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4090 	else
4091 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4092 	if (rx_status->cck_flag)
4093 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4094 	if (rx_status->ofdm_flag)
4095 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4096 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4097 	rtap_len += 2;
4098 
4099 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4100 	 *					(dBm)
4101 	 */
4102 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4103 	/*
4104 	 * rssi_comb is int dB, need to convert it to dBm.
4105 	 * normalize value to noise floor of -96 dBm
4106 	 */
4107 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4108 	rtap_len += 1;
4109 
4110 	/* RX signal noise floor */
4111 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4112 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4113 	rtap_len += 1;
4114 
4115 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4116 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4117 	rtap_buf[rtap_len] = rx_status->nr_ant;
4118 	rtap_len += 1;
4119 
4120 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4121 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4122 		return 0;
4123 	}
4124 
4125 	if (rx_status->ht_flags) {
4126 		length = rtap_len;
4127 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4128 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4129 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4130 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4131 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4132 		rtap_len += 1;
4133 
4134 		if (rx_status->sgi)
4135 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4136 		if (rx_status->bw)
4137 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4138 		else
4139 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4140 		rtap_len += 1;
4141 
4142 		rtap_buf[rtap_len] = rx_status->mcs;
4143 		rtap_len += 1;
4144 
4145 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4146 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4147 			return 0;
4148 		}
4149 	}
4150 
4151 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4152 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4153 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4154 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4155 								rtap_buf,
4156 								rtap_len);
4157 	}
4158 
4159 	if (rx_status->vht_flags) {
4160 		length = rtap_len;
4161 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4162 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4163 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4164 								rtap_buf,
4165 								rtap_len);
4166 
4167 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4168 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4169 			return 0;
4170 		}
4171 	}
4172 
4173 	if (rx_status->he_flags) {
4174 		length = rtap_len;
4175 		/* IEEE80211_RADIOTAP_HE */
4176 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4177 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4178 								rtap_buf,
4179 								rtap_len);
4180 
4181 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4182 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4183 			return 0;
4184 		}
4185 	}
4186 
4187 	if (rx_status->he_mu_flags) {
4188 		length = rtap_len;
4189 		/* IEEE80211_RADIOTAP_HE-MU */
4190 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4191 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4192 								rtap_buf,
4193 								rtap_len);
4194 
4195 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4196 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4197 			return 0;
4198 		}
4199 	}
4200 
4201 	if (rx_status->he_mu_other_flags) {
4202 		length = rtap_len;
4203 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4204 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4205 		rtap_len =
4206 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4207 								rtap_buf,
4208 								rtap_len);
4209 
4210 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4211 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4212 			return 0;
4213 		}
4214 	}
4215 
4216 	rtap_len = qdf_align(rtap_len, 2);
4217 	/*
4218 	 * Radiotap Vendor Namespace
4219 	 */
4220 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4221 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4222 					(rtap_buf + rtap_len);
4223 	/*
4224 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4225 	 */
4226 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4227 	/*
4228 	 * Name space selector = 0
4229 	 * We only will have one namespace for now
4230 	 */
4231 	radiotap_vendor_ns_ath->hdr.selector = 0;
4232 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4233 					sizeof(*radiotap_vendor_ns_ath) -
4234 					sizeof(radiotap_vendor_ns_ath->hdr));
4235 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4236 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4237 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4238 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4239 				cpu_to_le32(rx_status->ppdu_timestamp);
4240 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4241 
4242 	rthdr->it_len = cpu_to_le16(rtap_len);
4243 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4244 
4245 	if (headroom_sz < rtap_len) {
4246 		qdf_err("ERROR: not enough space to update radiotap");
4247 		return 0;
4248 	}
4249 	qdf_nbuf_push_head(nbuf, rtap_len);
4250 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4251 	return rtap_len;
4252 }
4253 #else
4254 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4255 					struct mon_rx_status *rx_status,
4256 					int8_t *rtap_buf,
4257 					uint32_t rtap_len)
4258 {
4259 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4260 	return 0;
4261 }
4262 
4263 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4264 				      int8_t *rtap_buf, uint32_t rtap_len)
4265 {
4266 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4267 	return 0;
4268 }
4269 
4270 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4271 					struct mon_rx_status *rx_status,
4272 					uint8_t *rtap_buf,
4273 					uint32_t rtap_len)
4274 {
4275 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4276 	return 0;
4277 }
4278 
4279 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4280 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4281 {
4282 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4283 	return 0;
4284 }
4285 #endif
4286 qdf_export_symbol(qdf_nbuf_update_radiotap);
4287 
4288 /**
4289  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4290  * @cb_func_ptr: function pointer to the nbuf free callback
4291  *
4292  * This function registers a callback function for nbuf free.
4293  *
4294  * Return: none
4295  */
4296 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4297 {
4298 	nbuf_free_cb = cb_func_ptr;
4299 }
4300 
4301 /**
4302  * qdf_nbuf_classify_pkt() - classify packet
4303  * @skb - sk buff
4304  *
4305  * Return: none
4306  */
4307 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4308 {
4309 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4310 
4311 	/* check destination mac address is broadcast/multicast */
4312 	if (is_broadcast_ether_addr((uint8_t *)eh))
4313 		QDF_NBUF_CB_SET_BCAST(skb);
4314 	else if (is_multicast_ether_addr((uint8_t *)eh))
4315 		QDF_NBUF_CB_SET_MCAST(skb);
4316 
4317 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4318 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4319 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4320 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4321 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4322 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4323 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4324 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4325 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4326 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4327 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4328 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4329 }
4330 qdf_export_symbol(qdf_nbuf_classify_pkt);
4331 
4332 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4333 {
4334 	qdf_nbuf_users_set(&nbuf->users, 1);
4335 	nbuf->data = nbuf->head + NET_SKB_PAD;
4336 	skb_reset_tail_pointer(nbuf);
4337 }
4338 qdf_export_symbol(__qdf_nbuf_init);
4339 
4340 #ifdef WLAN_FEATURE_FASTPATH
4341 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4342 {
4343 	qdf_nbuf_users_set(&nbuf->users, 1);
4344 	nbuf->data = nbuf->head + NET_SKB_PAD;
4345 	skb_reset_tail_pointer(nbuf);
4346 }
4347 qdf_export_symbol(qdf_nbuf_init_fast);
4348 #endif /* WLAN_FEATURE_FASTPATH */
4349 
4350 
4351 #ifdef QDF_NBUF_GLOBAL_COUNT
4352 /**
4353  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4354  *
4355  * Return void
4356  */
4357 void __qdf_nbuf_mod_init(void)
4358 {
4359 	qdf_atomic_init(&nbuf_count);
4360 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4361 }
4362 
4363 /**
4364  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4365  *
4366  * Return void
4367  */
4368 void __qdf_nbuf_mod_exit(void)
4369 {
4370 }
4371 #endif
4372