xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 #include "qdf_str.h"
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* channel number to freq conversion */
78 #define CHANNEL_NUM_14 14
79 #define CHANNEL_NUM_15 15
80 #define CHANNEL_NUM_27 27
81 #define CHANNEL_NUM_35 35
82 #define CHANNEL_NUM_182 182
83 #define CHANNEL_NUM_197 197
84 #define CHANNEL_FREQ_2484 2484
85 #define CHANNEL_FREQ_2407 2407
86 #define CHANNEL_FREQ_2512 2512
87 #define CHANNEL_FREQ_5000 5000
88 #define CHANNEL_FREQ_4000 4000
89 #define FREQ_MULTIPLIER_CONST_5MHZ 5
90 #define FREQ_MULTIPLIER_CONST_20MHZ 20
91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
93 #define RADIOTAP_CCK_CHANNEL 0x0020
94 #define RADIOTAP_OFDM_CHANNEL 0x0040
95 
96 #ifdef CONFIG_MCL
97 #include <qdf_mc_timer.h>
98 
99 struct qdf_track_timer {
100 	qdf_mc_timer_t track_timer;
101 	qdf_atomic_t alloc_fail_cnt;
102 };
103 
104 static struct qdf_track_timer alloc_track_timer;
105 
106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
108 #endif
109 
110 /* Packet Counter */
111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
113 #ifdef QDF_NBUF_GLOBAL_COUNT
114 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
115 static qdf_atomic_t nbuf_count;
116 #endif
117 
118 /**
119  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
120  *
121  * Return: none
122  */
123 void qdf_nbuf_tx_desc_count_display(void)
124 {
125 	qdf_debug("Current Snapshot of the Driver:");
126 	qdf_debug("Data Packets:");
127 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
129 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
138 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
146 	qdf_debug("Mgmt Packets:");
147 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
161 }
162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
163 
164 /**
165  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
166  * @packet_type   : packet type either mgmt/data
167  * @current_state : layer at which the packet currently present
168  *
169  * Return: none
170  */
171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
172 			uint8_t current_state)
173 {
174 	switch (packet_type) {
175 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
176 		nbuf_tx_mgmt[current_state]++;
177 		break;
178 	case QDF_NBUF_TX_PKT_DATA_TRACK:
179 		nbuf_tx_data[current_state]++;
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
186 
187 /**
188  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_clear(void)
193 {
194 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
195 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
196 }
197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
198 
199 /**
200  * qdf_nbuf_set_state() - Updates the packet state
201  * @nbuf:            network buffer
202  * @current_state :  layer at which the packet currently is
203  *
204  * This function updates the packet state to the layer at which the packet
205  * currently is
206  *
207  * Return: none
208  */
209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
210 {
211 	/*
212 	 * Only Mgmt, Data Packets are tracked. WMI messages
213 	 * such as scan commands are not tracked
214 	 */
215 	uint8_t packet_type;
216 
217 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
218 
219 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
220 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
221 		return;
222 	}
223 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
224 	qdf_nbuf_tx_desc_count_update(packet_type,
225 					current_state);
226 }
227 qdf_export_symbol(qdf_nbuf_set_state);
228 
229 #ifdef CONFIG_MCL
230 /**
231  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
232  *
233  * This function starts the alloc fail replenish timer.
234  *
235  * Return: void
236  */
237 static void __qdf_nbuf_start_replenish_timer(void)
238 {
239 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
240 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
241 	    QDF_TIMER_STATE_RUNNING)
242 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
243 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
244 }
245 
246 /**
247  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
248  *
249  * This function stops the alloc fail replenish timer.
250  *
251  * Return: void
252  */
253 static void __qdf_nbuf_stop_replenish_timer(void)
254 {
255 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
256 		return;
257 
258 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
259 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
260 	    QDF_TIMER_STATE_RUNNING)
261 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
262 }
263 
264 /**
265  * qdf_replenish_expire_handler - Replenish expire handler
266  *
267  * This function triggers when the alloc fail replenish timer expires.
268  *
269  * Return: void
270  */
271 static void qdf_replenish_expire_handler(void *arg)
272 {
273 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
274 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
275 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
276 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
277 
278 		/* Error handling here */
279 	}
280 }
281 
282 /**
283  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
284  *
285  * This function initializes the nbuf alloc fail replenish timer.
286  *
287  * Return: void
288  */
289 void __qdf_nbuf_init_replenish_timer(void)
290 {
291 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
292 			  qdf_replenish_expire_handler, NULL);
293 }
294 
295 /**
296  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
297  *
298  * This function deinitializes the nbuf alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 void __qdf_nbuf_deinit_replenish_timer(void)
303 {
304 	__qdf_nbuf_stop_replenish_timer();
305 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
306 }
307 #else
308 
309 static inline void __qdf_nbuf_start_replenish_timer(void) {}
310 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
311 #endif
312 
313 /* globals do not need to be initialized to NULL/0 */
314 qdf_nbuf_trace_update_t qdf_trace_update_cb;
315 qdf_nbuf_free_t nbuf_free_cb;
316 
317 #ifdef QDF_NBUF_GLOBAL_COUNT
318 
319 /**
320  * __qdf_nbuf_count_get() - get nbuf global count
321  *
322  * Return: nbuf global count
323  */
324 int __qdf_nbuf_count_get(void)
325 {
326 	return qdf_atomic_read(&nbuf_count);
327 }
328 qdf_export_symbol(__qdf_nbuf_count_get);
329 
330 /**
331  * __qdf_nbuf_count_inc() - increment nbuf global count
332  *
333  * @buf: sk buff
334  *
335  * Return: void
336  */
337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
338 {
339 	qdf_atomic_inc(&nbuf_count);
340 }
341 qdf_export_symbol(__qdf_nbuf_count_inc);
342 
343 /**
344  * __qdf_nbuf_count_dec() - decrement nbuf global count
345  *
346  * @buf: sk buff
347  *
348  * Return: void
349  */
350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
351 {
352 	qdf_atomic_dec(&nbuf_count);
353 }
354 qdf_export_symbol(__qdf_nbuf_count_dec);
355 #endif
356 
357 
358 /**
359  * __qdf_nbuf_alloc() - Allocate nbuf
360  * @hdl: Device handle
361  * @size: Netbuf requested size
362  * @reserve: headroom to start with
363  * @align: Align
364  * @prio: Priority
365  *
366  * This allocates an nbuf aligns if needed and reserves some space in the front,
367  * since the reserve is done after alignment the reserve value if being
368  * unaligned will result in an unaligned address.
369  *
370  * Return: nbuf or %NULL if no memory
371  */
372 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
373 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
374 			 int align, int prio)
375 {
376 	struct sk_buff *skb;
377 	unsigned long offset;
378 	uint32_t lowmem_alloc_tries = 0;
379 
380 	if (align)
381 		size += (align - 1);
382 
383 realloc:
384 	skb = dev_alloc_skb(size);
385 
386 	if (skb)
387 		goto skb_alloc;
388 
389 	skb = pld_nbuf_pre_alloc(size);
390 
391 	if (!skb) {
392 		pr_info("ERROR:NBUF alloc failed\n");
393 		return NULL;
394 	}
395 
396 skb_alloc:
397 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
398 	 * Though we are trying to reserve low memory upfront to prevent this,
399 	 * we sometimes see SKBs allocated from low memory.
400 	 */
401 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
402 		lowmem_alloc_tries++;
403 		if (lowmem_alloc_tries > 100) {
404 			qdf_err("Failed");
405 			return NULL;
406 		} else {
407 			/* Not freeing to make sure it
408 			 * will not get allocated again
409 			 */
410 			goto realloc;
411 		}
412 	}
413 	memset(skb->cb, 0x0, sizeof(skb->cb));
414 
415 	/*
416 	 * The default is for netbuf fragments to be interpreted
417 	 * as wordstreams rather than bytestreams.
418 	 */
419 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
420 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
421 
422 	/*
423 	 * XXX:how about we reserve first then align
424 	 * Align & make sure that the tail & data are adjusted properly
425 	 */
426 
427 	if (align) {
428 		offset = ((unsigned long)skb->data) % align;
429 		if (offset)
430 			skb_reserve(skb, align - offset);
431 	}
432 
433 	/*
434 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
435 	 * pointer
436 	 */
437 	skb_reserve(skb, reserve);
438 	qdf_nbuf_count_inc(skb);
439 
440 	return skb;
441 }
442 #else
443 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
444 			 int align, int prio)
445 {
446 	struct sk_buff *skb;
447 	unsigned long offset;
448 	int flags = GFP_KERNEL;
449 
450 	if (align)
451 		size += (align - 1);
452 
453 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
454 		flags = GFP_ATOMIC;
455 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
456 		/*
457 		 * Observed that kcompactd burns out CPU to make order-3 page.
458 		 *__netdev_alloc_skb has 4k page fallback option just in case of
459 		 * failing high order page allocation so we don't need to be
460 		 * hard. Make kcompactd rest in piece.
461 		 */
462 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
463 #endif
464 	}
465 
466 	skb = __netdev_alloc_skb(NULL, size, flags);
467 
468 	if (skb)
469 		goto skb_alloc;
470 
471 	skb = pld_nbuf_pre_alloc(size);
472 
473 	if (!skb) {
474 		pr_err_ratelimited("ERROR:NBUF alloc failed, size = %zu\n",
475 				   size);
476 		__qdf_nbuf_start_replenish_timer();
477 		return NULL;
478 	} else {
479 		__qdf_nbuf_stop_replenish_timer();
480 	}
481 
482 skb_alloc:
483 	memset(skb->cb, 0x0, sizeof(skb->cb));
484 
485 	/*
486 	 * The default is for netbuf fragments to be interpreted
487 	 * as wordstreams rather than bytestreams.
488 	 */
489 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
490 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
491 
492 	/*
493 	 * XXX:how about we reserve first then align
494 	 * Align & make sure that the tail & data are adjusted properly
495 	 */
496 
497 	if (align) {
498 		offset = ((unsigned long)skb->data) % align;
499 		if (offset)
500 			skb_reserve(skb, align - offset);
501 	}
502 
503 	/*
504 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
505 	 * pointer
506 	 */
507 	skb_reserve(skb, reserve);
508 	qdf_nbuf_count_inc(skb);
509 
510 	return skb;
511 }
512 #endif
513 qdf_export_symbol(__qdf_nbuf_alloc);
514 
515 /**
516  * __qdf_nbuf_free() - free the nbuf its interrupt safe
517  * @skb: Pointer to network buffer
518  *
519  * Return: none
520  */
521 
522 #ifdef CONFIG_MCL
523 void __qdf_nbuf_free(struct sk_buff *skb)
524 {
525 	if (pld_nbuf_pre_alloc_free(skb))
526 		return;
527 
528 	qdf_nbuf_count_dec(skb);
529 	if (nbuf_free_cb)
530 		nbuf_free_cb(skb);
531 	else
532 		dev_kfree_skb_any(skb);
533 }
534 #else
535 void __qdf_nbuf_free(struct sk_buff *skb)
536 {
537 	if (pld_nbuf_pre_alloc_free(skb))
538 		return;
539 
540 	qdf_nbuf_count_dec(skb);
541 	dev_kfree_skb_any(skb);
542 }
543 #endif
544 
545 qdf_export_symbol(__qdf_nbuf_free);
546 
547 #ifdef NBUF_MEMORY_DEBUG
548 enum qdf_nbuf_event_type {
549 	QDF_NBUF_ALLOC,
550 	QDF_NBUF_FREE,
551 	QDF_NBUF_MAP,
552 	QDF_NBUF_UNMAP,
553 };
554 
555 struct qdf_nbuf_event {
556 	qdf_nbuf_t nbuf;
557 	char file[QDF_MEM_FILE_NAME_SIZE];
558 	uint32_t line;
559 	enum qdf_nbuf_event_type type;
560 	uint64_t timestamp;
561 };
562 
563 #define QDF_NBUF_HISTORY_SIZE 4096
564 static qdf_atomic_t qdf_nbuf_history_index;
565 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
566 
567 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
568 {
569 	int32_t next = qdf_atomic_inc_return(index);
570 
571 	if (next == size)
572 		qdf_atomic_sub(size, index);
573 
574 	return next % size;
575 }
576 
577 static void
578 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
579 		     enum qdf_nbuf_event_type type)
580 {
581 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
582 						   QDF_NBUF_HISTORY_SIZE);
583 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
584 
585 	event->nbuf = nbuf;
586 	qdf_str_lcopy(event->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
587 	event->line = line;
588 	event->type = type;
589 	event->timestamp = qdf_get_log_timestamp();
590 }
591 
592 struct qdf_nbuf_map_metadata {
593 	struct hlist_node node;
594 	qdf_nbuf_t nbuf;
595 	char file[QDF_MEM_FILE_NAME_SIZE];
596 	uint32_t line;
597 };
598 
599 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
600 			 sizeof(struct qdf_nbuf_map_metadata), 0);
601 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
602 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
603 static qdf_spinlock_t qdf_nbuf_map_lock;
604 
605 static void qdf_nbuf_map_tracking_init(void)
606 {
607 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
608 	hash_init(qdf_nbuf_map_ht);
609 	qdf_spinlock_create(&qdf_nbuf_map_lock);
610 }
611 
612 void qdf_nbuf_map_check_for_leaks(void)
613 {
614 	struct qdf_nbuf_map_metadata *meta;
615 	int bucket;
616 	uint32_t count = 0;
617 	bool is_empty;
618 
619 	qdf_flex_mem_release(&qdf_nbuf_map_pool);
620 
621 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
622 	is_empty = hash_empty(qdf_nbuf_map_ht);
623 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
624 
625 	if (is_empty)
626 		return;
627 
628 	qdf_err("Nbuf map without unmap events detected!");
629 	qdf_err("------------------------------------------------------------");
630 
631 	/* Hold the lock for the entire iteration for safe list/meta access. We
632 	 * are explicitly preferring the chance to watchdog on the print, over
633 	 * the posibility of invalid list/memory access. Since we are going to
634 	 * panic anyway, the worst case is loading up the crash dump to find out
635 	 * what was in the hash table.
636 	 */
637 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
638 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
639 		count++;
640 		qdf_err("0x%pk @ %s:%u",
641 			meta->nbuf, meta->file, meta->line);
642 	}
643 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
644 
645 	panic("%u fatal nbuf map without unmap events detected!", count);
646 }
647 
648 static void qdf_nbuf_map_tracking_deinit(void)
649 {
650 	qdf_nbuf_map_check_for_leaks();
651 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
652 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
653 }
654 
655 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
656 {
657 	struct qdf_nbuf_map_metadata *meta;
658 
659 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
660 		if (meta->nbuf == nbuf)
661 			return meta;
662 	}
663 
664 	return NULL;
665 }
666 
667 static QDF_STATUS
668 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
669 {
670 	struct qdf_nbuf_map_metadata *meta;
671 
672 	QDF_BUG(nbuf);
673 	if (!nbuf) {
674 		qdf_err("Cannot map null nbuf");
675 		return QDF_STATUS_E_INVAL;
676 	}
677 
678 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
679 	meta = qdf_nbuf_meta_get(nbuf);
680 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
681 	if (meta)
682 		QDF_DEBUG_PANIC(
683 			"Double nbuf map detected @ %s:%u; last map from %s:%u",
684 			kbasename(file), line, meta->file, meta->line);
685 
686 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
687 	if (!meta) {
688 		qdf_err("Failed to allocate nbuf map tracking metadata");
689 		return QDF_STATUS_E_NOMEM;
690 	}
691 
692 	meta->nbuf = nbuf;
693 	qdf_str_lcopy(meta->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
694 	meta->line = line;
695 
696 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
697 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
698 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
699 
700 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
701 
702 	return QDF_STATUS_SUCCESS;
703 }
704 
705 static void
706 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
707 {
708 	struct qdf_nbuf_map_metadata *meta;
709 
710 	QDF_BUG(nbuf);
711 	if (!nbuf) {
712 		qdf_err("Cannot unmap null nbuf");
713 		return;
714 	}
715 
716 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
717 	meta = qdf_nbuf_meta_get(nbuf);
718 
719 	if (!meta)
720 		QDF_DEBUG_PANIC(
721 		      "Double nbuf unmap or unmap without map detected @ %s:%u",
722 		      kbasename(file), line);
723 
724 	hash_del(&meta->node);
725 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
726 
727 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
728 
729 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
730 }
731 
732 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
733 			      qdf_nbuf_t buf,
734 			      qdf_dma_dir_t dir,
735 			      const char *file,
736 			      uint32_t line)
737 {
738 	QDF_STATUS status;
739 
740 	status = qdf_nbuf_track_map(buf, file, line);
741 	if (QDF_IS_STATUS_ERROR(status))
742 		return status;
743 
744 	status = __qdf_nbuf_map(osdev, buf, dir);
745 	if (QDF_IS_STATUS_ERROR(status))
746 		qdf_nbuf_untrack_map(buf, file, line);
747 
748 	return status;
749 }
750 
751 qdf_export_symbol(qdf_nbuf_map_debug);
752 
753 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
754 			  qdf_nbuf_t buf,
755 			  qdf_dma_dir_t dir,
756 			  const char *file,
757 			  uint32_t line)
758 {
759 	qdf_nbuf_untrack_map(buf, file, line);
760 	__qdf_nbuf_unmap_single(osdev, buf, dir);
761 }
762 
763 qdf_export_symbol(qdf_nbuf_unmap_debug);
764 
765 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
766 				     qdf_nbuf_t buf,
767 				     qdf_dma_dir_t dir,
768 				     const char *file,
769 				     uint32_t line)
770 {
771 	QDF_STATUS status;
772 
773 	status = qdf_nbuf_track_map(buf, file, line);
774 	if (QDF_IS_STATUS_ERROR(status))
775 		return status;
776 
777 	status = __qdf_nbuf_map_single(osdev, buf, dir);
778 	if (QDF_IS_STATUS_ERROR(status))
779 		qdf_nbuf_untrack_map(buf, file, line);
780 
781 	return status;
782 }
783 
784 qdf_export_symbol(qdf_nbuf_map_single_debug);
785 
786 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
787 				 qdf_nbuf_t buf,
788 				 qdf_dma_dir_t dir,
789 				 const char *file,
790 				 uint32_t line)
791 {
792 	qdf_nbuf_untrack_map(buf, file, line);
793 	__qdf_nbuf_unmap_single(osdev, buf, dir);
794 }
795 
796 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
797 
798 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
799 				     qdf_nbuf_t buf,
800 				     qdf_dma_dir_t dir,
801 				     int nbytes,
802 				     const char *file,
803 				     uint32_t line)
804 {
805 	QDF_STATUS status;
806 
807 	status = qdf_nbuf_track_map(buf, file, line);
808 	if (QDF_IS_STATUS_ERROR(status))
809 		return status;
810 
811 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
812 	if (QDF_IS_STATUS_ERROR(status))
813 		qdf_nbuf_untrack_map(buf, file, line);
814 
815 	return status;
816 }
817 
818 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
819 
820 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
821 				 qdf_nbuf_t buf,
822 				 qdf_dma_dir_t dir,
823 				 int nbytes,
824 				 const char *file,
825 				 uint32_t line)
826 {
827 	qdf_nbuf_untrack_map(buf, file, line);
828 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
829 }
830 
831 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
832 
833 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
834 					    qdf_nbuf_t buf,
835 					    qdf_dma_dir_t dir,
836 					    int nbytes,
837 					    const char *file,
838 					    uint32_t line)
839 {
840 	QDF_STATUS status;
841 
842 	status = qdf_nbuf_track_map(buf, file, line);
843 	if (QDF_IS_STATUS_ERROR(status))
844 		return status;
845 
846 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
847 	if (QDF_IS_STATUS_ERROR(status))
848 		qdf_nbuf_untrack_map(buf, file, line);
849 
850 	return status;
851 }
852 
853 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
854 
855 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
856 					qdf_nbuf_t buf,
857 					qdf_dma_dir_t dir,
858 					int nbytes,
859 					const char *file,
860 					uint32_t line)
861 {
862 	qdf_nbuf_untrack_map(buf, file, line);
863 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
864 }
865 
866 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
867 #endif /* NBUF_MEMORY_DEBUG */
868 
869 /**
870  * __qdf_nbuf_map() - map a buffer to local bus address space
871  * @osdev: OS device
872  * @bmap: Bitmap
873  * @skb: Pointer to network buffer
874  * @dir: Direction
875  *
876  * Return: QDF_STATUS
877  */
878 #ifdef QDF_OS_DEBUG
879 QDF_STATUS
880 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
881 {
882 	struct skb_shared_info *sh = skb_shinfo(skb);
883 
884 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
885 			|| (dir == QDF_DMA_FROM_DEVICE));
886 
887 	/*
888 	 * Assume there's only a single fragment.
889 	 * To support multiple fragments, it would be necessary to change
890 	 * qdf_nbuf_t to be a separate object that stores meta-info
891 	 * (including the bus address for each fragment) and a pointer
892 	 * to the underlying sk_buff.
893 	 */
894 	qdf_assert(sh->nr_frags == 0);
895 
896 	return __qdf_nbuf_map_single(osdev, skb, dir);
897 }
898 qdf_export_symbol(__qdf_nbuf_map);
899 
900 #else
901 QDF_STATUS
902 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
903 {
904 	return __qdf_nbuf_map_single(osdev, skb, dir);
905 }
906 qdf_export_symbol(__qdf_nbuf_map);
907 #endif
908 /**
909  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
910  * @osdev: OS device
911  * @skb: Pointer to network buffer
912  * @dir: dma direction
913  *
914  * Return: none
915  */
916 void
917 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
918 			qdf_dma_dir_t dir)
919 {
920 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
921 		   || (dir == QDF_DMA_FROM_DEVICE));
922 
923 	/*
924 	 * Assume there's a single fragment.
925 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
926 	 */
927 	__qdf_nbuf_unmap_single(osdev, skb, dir);
928 }
929 qdf_export_symbol(__qdf_nbuf_unmap);
930 
931 /**
932  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
933  * @osdev: OS device
934  * @skb: Pointer to network buffer
935  * @dir: Direction
936  *
937  * Return: QDF_STATUS
938  */
939 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
940 QDF_STATUS
941 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
942 {
943 	qdf_dma_addr_t paddr;
944 
945 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
946 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
947 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
948 	return QDF_STATUS_SUCCESS;
949 }
950 qdf_export_symbol(__qdf_nbuf_map_single);
951 #else
952 QDF_STATUS
953 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
954 {
955 	qdf_dma_addr_t paddr;
956 
957 	/* assume that the OS only provides a single fragment */
958 	QDF_NBUF_CB_PADDR(buf) = paddr =
959 		dma_map_single(osdev->dev, buf->data,
960 				skb_end_pointer(buf) - buf->data,
961 				__qdf_dma_dir_to_os(dir));
962 	return dma_mapping_error(osdev->dev, paddr)
963 		? QDF_STATUS_E_FAILURE
964 		: QDF_STATUS_SUCCESS;
965 }
966 qdf_export_symbol(__qdf_nbuf_map_single);
967 #endif
968 /**
969  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
970  * @osdev: OS device
971  * @skb: Pointer to network buffer
972  * @dir: Direction
973  *
974  * Return: none
975  */
976 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
977 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
978 				qdf_dma_dir_t dir)
979 {
980 }
981 #else
982 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
983 					qdf_dma_dir_t dir)
984 {
985 	if (QDF_NBUF_CB_PADDR(buf))
986 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
987 			skb_end_pointer(buf) - buf->data,
988 			__qdf_dma_dir_to_os(dir));
989 }
990 #endif
991 qdf_export_symbol(__qdf_nbuf_unmap_single);
992 
993 /**
994  * __qdf_nbuf_set_rx_cksum() - set rx checksum
995  * @skb: Pointer to network buffer
996  * @cksum: Pointer to checksum value
997  *
998  * Return: QDF_STATUS
999  */
1000 QDF_STATUS
1001 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1002 {
1003 	switch (cksum->l4_result) {
1004 	case QDF_NBUF_RX_CKSUM_NONE:
1005 		skb->ip_summed = CHECKSUM_NONE;
1006 		break;
1007 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1008 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1009 		break;
1010 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1011 		skb->ip_summed = CHECKSUM_PARTIAL;
1012 		skb->csum = cksum->val;
1013 		break;
1014 	default:
1015 		pr_err("Unknown checksum type\n");
1016 		qdf_assert(0);
1017 		return QDF_STATUS_E_NOSUPPORT;
1018 	}
1019 	return QDF_STATUS_SUCCESS;
1020 }
1021 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1022 
1023 /**
1024  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1025  * @skb: Pointer to network buffer
1026  *
1027  * Return: TX checksum value
1028  */
1029 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1030 {
1031 	switch (skb->ip_summed) {
1032 	case CHECKSUM_NONE:
1033 		return QDF_NBUF_TX_CKSUM_NONE;
1034 	case CHECKSUM_PARTIAL:
1035 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1036 	case CHECKSUM_COMPLETE:
1037 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1038 	default:
1039 		return QDF_NBUF_TX_CKSUM_NONE;
1040 	}
1041 }
1042 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1043 
1044 /**
1045  * __qdf_nbuf_get_tid() - get tid
1046  * @skb: Pointer to network buffer
1047  *
1048  * Return: tid
1049  */
1050 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1051 {
1052 	return skb->priority;
1053 }
1054 qdf_export_symbol(__qdf_nbuf_get_tid);
1055 
1056 /**
1057  * __qdf_nbuf_set_tid() - set tid
1058  * @skb: Pointer to network buffer
1059  *
1060  * Return: none
1061  */
1062 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1063 {
1064 	skb->priority = tid;
1065 }
1066 qdf_export_symbol(__qdf_nbuf_set_tid);
1067 
1068 /**
1069  * __qdf_nbuf_set_tid() - set tid
1070  * @skb: Pointer to network buffer
1071  *
1072  * Return: none
1073  */
1074 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1075 {
1076 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1077 }
1078 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1079 
1080 /**
1081  * __qdf_nbuf_reg_trace_cb() - register trace callback
1082  * @cb_func_ptr: Pointer to trace callback function
1083  *
1084  * Return: none
1085  */
1086 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1087 {
1088 	qdf_trace_update_cb = cb_func_ptr;
1089 }
1090 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1091 
1092 /**
1093  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1094  *              of DHCP packet.
1095  * @data: Pointer to DHCP packet data buffer
1096  *
1097  * This func. returns the subtype of DHCP packet.
1098  *
1099  * Return: subtype of the DHCP packet.
1100  */
1101 enum qdf_proto_subtype
1102 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1103 {
1104 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1105 
1106 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1107 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1108 					QDF_DHCP_OPTION53_LENGTH)) {
1109 
1110 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1111 		case QDF_DHCP_DISCOVER:
1112 			subtype = QDF_PROTO_DHCP_DISCOVER;
1113 			break;
1114 		case QDF_DHCP_REQUEST:
1115 			subtype = QDF_PROTO_DHCP_REQUEST;
1116 			break;
1117 		case QDF_DHCP_OFFER:
1118 			subtype = QDF_PROTO_DHCP_OFFER;
1119 			break;
1120 		case QDF_DHCP_ACK:
1121 			subtype = QDF_PROTO_DHCP_ACK;
1122 			break;
1123 		case QDF_DHCP_NAK:
1124 			subtype = QDF_PROTO_DHCP_NACK;
1125 			break;
1126 		case QDF_DHCP_RELEASE:
1127 			subtype = QDF_PROTO_DHCP_RELEASE;
1128 			break;
1129 		case QDF_DHCP_INFORM:
1130 			subtype = QDF_PROTO_DHCP_INFORM;
1131 			break;
1132 		case QDF_DHCP_DECLINE:
1133 			subtype = QDF_PROTO_DHCP_DECLINE;
1134 			break;
1135 		default:
1136 			break;
1137 		}
1138 	}
1139 
1140 	return subtype;
1141 }
1142 
1143 /**
1144  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1145  *            of EAPOL packet.
1146  * @data: Pointer to EAPOL packet data buffer
1147  *
1148  * This func. returns the subtype of EAPOL packet.
1149  *
1150  * Return: subtype of the EAPOL packet.
1151  */
1152 enum qdf_proto_subtype
1153 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1154 {
1155 	uint16_t eapol_key_info;
1156 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1157 	uint16_t mask;
1158 
1159 	eapol_key_info = (uint16_t)(*(uint16_t *)
1160 			(data + EAPOL_KEY_INFO_OFFSET));
1161 
1162 	mask = eapol_key_info & EAPOL_MASK;
1163 	switch (mask) {
1164 	case EAPOL_M1_BIT_MASK:
1165 		subtype = QDF_PROTO_EAPOL_M1;
1166 		break;
1167 	case EAPOL_M2_BIT_MASK:
1168 		subtype = QDF_PROTO_EAPOL_M2;
1169 		break;
1170 	case EAPOL_M3_BIT_MASK:
1171 		subtype = QDF_PROTO_EAPOL_M3;
1172 		break;
1173 	case EAPOL_M4_BIT_MASK:
1174 		subtype = QDF_PROTO_EAPOL_M4;
1175 		break;
1176 	default:
1177 		break;
1178 	}
1179 
1180 	return subtype;
1181 }
1182 
1183 /**
1184  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1185  *            of ARP packet.
1186  * @data: Pointer to ARP packet data buffer
1187  *
1188  * This func. returns the subtype of ARP packet.
1189  *
1190  * Return: subtype of the ARP packet.
1191  */
1192 enum qdf_proto_subtype
1193 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1194 {
1195 	uint16_t subtype;
1196 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1197 
1198 	subtype = (uint16_t)(*(uint16_t *)
1199 			(data + ARP_SUB_TYPE_OFFSET));
1200 
1201 	switch (QDF_SWAP_U16(subtype)) {
1202 	case ARP_REQUEST:
1203 		proto_subtype = QDF_PROTO_ARP_REQ;
1204 		break;
1205 	case ARP_RESPONSE:
1206 		proto_subtype = QDF_PROTO_ARP_RES;
1207 		break;
1208 	default:
1209 		break;
1210 	}
1211 
1212 	return proto_subtype;
1213 }
1214 
1215 /**
1216  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1217  *            of IPV4 ICMP packet.
1218  * @data: Pointer to IPV4 ICMP packet data buffer
1219  *
1220  * This func. returns the subtype of ICMP packet.
1221  *
1222  * Return: subtype of the ICMP packet.
1223  */
1224 enum qdf_proto_subtype
1225 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1226 {
1227 	uint8_t subtype;
1228 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1229 
1230 	subtype = (uint8_t)(*(uint8_t *)
1231 			(data + ICMP_SUBTYPE_OFFSET));
1232 
1233 	switch (subtype) {
1234 	case ICMP_REQUEST:
1235 		proto_subtype = QDF_PROTO_ICMP_REQ;
1236 		break;
1237 	case ICMP_RESPONSE:
1238 		proto_subtype = QDF_PROTO_ICMP_RES;
1239 		break;
1240 	default:
1241 		break;
1242 	}
1243 
1244 	return proto_subtype;
1245 }
1246 
1247 /**
1248  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1249  *            of IPV6 ICMPV6 packet.
1250  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1251  *
1252  * This func. returns the subtype of ICMPV6 packet.
1253  *
1254  * Return: subtype of the ICMPV6 packet.
1255  */
1256 enum qdf_proto_subtype
1257 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1258 {
1259 	uint8_t subtype;
1260 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1261 
1262 	subtype = (uint8_t)(*(uint8_t *)
1263 			(data + ICMPV6_SUBTYPE_OFFSET));
1264 
1265 	switch (subtype) {
1266 	case ICMPV6_REQUEST:
1267 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1268 		break;
1269 	case ICMPV6_RESPONSE:
1270 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1271 		break;
1272 	case ICMPV6_RS:
1273 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1274 		break;
1275 	case ICMPV6_RA:
1276 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1277 		break;
1278 	case ICMPV6_NS:
1279 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1280 		break;
1281 	case ICMPV6_NA:
1282 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1283 		break;
1284 	default:
1285 		break;
1286 	}
1287 
1288 	return proto_subtype;
1289 }
1290 
1291 /**
1292  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1293  *            of IPV4 packet.
1294  * @data: Pointer to IPV4 packet data buffer
1295  *
1296  * This func. returns the proto type of IPV4 packet.
1297  *
1298  * Return: proto type of IPV4 packet.
1299  */
1300 uint8_t
1301 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1302 {
1303 	uint8_t proto_type;
1304 
1305 	proto_type = (uint8_t)(*(uint8_t *)(data +
1306 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1307 	return proto_type;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1312  *            of IPV6 packet.
1313  * @data: Pointer to IPV6 packet data buffer
1314  *
1315  * This func. returns the proto type of IPV6 packet.
1316  *
1317  * Return: proto type of IPV6 packet.
1318  */
1319 uint8_t
1320 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1321 {
1322 	uint8_t proto_type;
1323 
1324 	proto_type = (uint8_t)(*(uint8_t *)(data +
1325 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1326 	return proto_type;
1327 }
1328 
1329 /**
1330  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1331  * @data: Pointer to network data
1332  *
1333  * This api is for Tx packets.
1334  *
1335  * Return: true if packet is ipv4 packet
1336  *	   false otherwise
1337  */
1338 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1339 {
1340 	uint16_t ether_type;
1341 
1342 	ether_type = (uint16_t)(*(uint16_t *)(data +
1343 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1344 
1345 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1346 		return true;
1347 	else
1348 		return false;
1349 }
1350 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1351 
1352 /**
1353  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1354  * @data: Pointer to network data buffer
1355  *
1356  * This api is for ipv4 packet.
1357  *
1358  * Return: true if packet is DHCP packet
1359  *	   false otherwise
1360  */
1361 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1362 {
1363 	uint16_t sport;
1364 	uint16_t dport;
1365 
1366 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1367 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1368 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1369 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1370 					 sizeof(uint16_t)));
1371 
1372 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1373 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1374 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1375 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1376 		return true;
1377 	else
1378 		return false;
1379 }
1380 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1381 
1382 /**
1383  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1384  * @data: Pointer to network data buffer
1385  *
1386  * This api is for ipv4 packet.
1387  *
1388  * Return: true if packet is EAPOL packet
1389  *	   false otherwise.
1390  */
1391 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1392 {
1393 	uint16_t ether_type;
1394 
1395 	ether_type = (uint16_t)(*(uint16_t *)(data +
1396 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1397 
1398 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1399 		return true;
1400 	else
1401 		return false;
1402 }
1403 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1404 
1405 /**
1406  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1407  * @skb: Pointer to network buffer
1408  *
1409  * This api is for ipv4 packet.
1410  *
1411  * Return: true if packet is WAPI packet
1412  *	   false otherwise.
1413  */
1414 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1415 {
1416 	uint16_t ether_type;
1417 
1418 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1419 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1420 
1421 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1422 		return true;
1423 	else
1424 		return false;
1425 }
1426 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1427 
1428 /**
1429  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1430  * @skb: Pointer to network buffer
1431  *
1432  * This api is for ipv4 packet.
1433  *
1434  * Return: true if packet is tdls packet
1435  *	   false otherwise.
1436  */
1437 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1438 {
1439 	uint16_t ether_type;
1440 
1441 	ether_type = *(uint16_t *)(skb->data +
1442 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1443 
1444 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1445 		return true;
1446 	else
1447 		return false;
1448 }
1449 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1450 
1451 /**
1452  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1453  * @data: Pointer to network data buffer
1454  *
1455  * This api is for ipv4 packet.
1456  *
1457  * Return: true if packet is ARP packet
1458  *	   false otherwise.
1459  */
1460 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1461 {
1462 	uint16_t ether_type;
1463 
1464 	ether_type = (uint16_t)(*(uint16_t *)(data +
1465 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1466 
1467 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1468 		return true;
1469 	else
1470 		return false;
1471 }
1472 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1473 
1474 /**
1475  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1476  * @data: Pointer to network data buffer
1477  *
1478  * This api is for ipv4 packet.
1479  *
1480  * Return: true if packet is ARP request
1481  *	   false otherwise.
1482  */
1483 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1484 {
1485 	uint16_t op_code;
1486 
1487 	op_code = (uint16_t)(*(uint16_t *)(data +
1488 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1489 
1490 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1491 		return true;
1492 	return false;
1493 }
1494 
1495 /**
1496  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1497  * @data: Pointer to network data buffer
1498  *
1499  * This api is for ipv4 packet.
1500  *
1501  * Return: true if packet is ARP response
1502  *	   false otherwise.
1503  */
1504 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1505 {
1506 	uint16_t op_code;
1507 
1508 	op_code = (uint16_t)(*(uint16_t *)(data +
1509 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1510 
1511 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1512 		return true;
1513 	return false;
1514 }
1515 
1516 /**
1517  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1518  * @data: Pointer to network data buffer
1519  *
1520  * This api is for ipv4 packet.
1521  *
1522  * Return: ARP packet source IP value.
1523  */
1524 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1525 {
1526 	uint32_t src_ip;
1527 
1528 	src_ip = (uint32_t)(*(uint32_t *)(data +
1529 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1530 
1531 	return src_ip;
1532 }
1533 
1534 /**
1535  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1536  * @data: Pointer to network data buffer
1537  *
1538  * This api is for ipv4 packet.
1539  *
1540  * Return: ARP packet target IP value.
1541  */
1542 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1543 {
1544 	uint32_t tgt_ip;
1545 
1546 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1547 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1548 
1549 	return tgt_ip;
1550 }
1551 
1552 /**
1553  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1554  * @data: Pointer to network data buffer
1555  * @len: length to copy
1556  *
1557  * This api is for dns domain name
1558  *
1559  * Return: dns domain name.
1560  */
1561 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1562 {
1563 	uint8_t *domain_name;
1564 
1565 	domain_name = (uint8_t *)
1566 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1567 	return domain_name;
1568 }
1569 
1570 
1571 /**
1572  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1573  * @data: Pointer to network data buffer
1574  *
1575  * This api is for dns query packet.
1576  *
1577  * Return: true if packet is dns query packet.
1578  *	   false otherwise.
1579  */
1580 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1581 {
1582 	uint16_t op_code;
1583 	uint16_t tgt_port;
1584 
1585 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1586 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1587 	/* Standard DNS query always happen on Dest Port 53. */
1588 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1589 		op_code = (uint16_t)(*(uint16_t *)(data +
1590 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1591 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1592 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1593 			return true;
1594 	}
1595 	return false;
1596 }
1597 
1598 /**
1599  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1600  * @data: Pointer to network data buffer
1601  *
1602  * This api is for dns query response.
1603  *
1604  * Return: true if packet is dns response packet.
1605  *	   false otherwise.
1606  */
1607 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1608 {
1609 	uint16_t op_code;
1610 	uint16_t src_port;
1611 
1612 	src_port = (uint16_t)(*(uint16_t *)(data +
1613 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1614 	/* Standard DNS response always comes on Src Port 53. */
1615 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1616 		op_code = (uint16_t)(*(uint16_t *)(data +
1617 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1618 
1619 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1620 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1621 			return true;
1622 	}
1623 	return false;
1624 }
1625 
1626 /**
1627  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1628  * @data: Pointer to network data buffer
1629  *
1630  * This api is for tcp syn packet.
1631  *
1632  * Return: true if packet is tcp syn packet.
1633  *	   false otherwise.
1634  */
1635 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1636 {
1637 	uint8_t op_code;
1638 
1639 	op_code = (uint8_t)(*(uint8_t *)(data +
1640 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1641 
1642 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1643 		return true;
1644 	return false;
1645 }
1646 
1647 /**
1648  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1649  * @data: Pointer to network data buffer
1650  *
1651  * This api is for tcp syn ack packet.
1652  *
1653  * Return: true if packet is tcp syn ack packet.
1654  *	   false otherwise.
1655  */
1656 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1657 {
1658 	uint8_t op_code;
1659 
1660 	op_code = (uint8_t)(*(uint8_t *)(data +
1661 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1662 
1663 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1664 		return true;
1665 	return false;
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1670  * @data: Pointer to network data buffer
1671  *
1672  * This api is for tcp ack packet.
1673  *
1674  * Return: true if packet is tcp ack packet.
1675  *	   false otherwise.
1676  */
1677 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1678 {
1679 	uint8_t op_code;
1680 
1681 	op_code = (uint8_t)(*(uint8_t *)(data +
1682 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1683 
1684 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1685 		return true;
1686 	return false;
1687 }
1688 
1689 /**
1690  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1691  * @data: Pointer to network data buffer
1692  *
1693  * This api is for tcp packet.
1694  *
1695  * Return: tcp source port value.
1696  */
1697 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1698 {
1699 	uint16_t src_port;
1700 
1701 	src_port = (uint16_t)(*(uint16_t *)(data +
1702 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1703 
1704 	return src_port;
1705 }
1706 
1707 /**
1708  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1709  * @data: Pointer to network data buffer
1710  *
1711  * This api is for tcp packet.
1712  *
1713  * Return: tcp destination port value.
1714  */
1715 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1716 {
1717 	uint16_t tgt_port;
1718 
1719 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1720 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1721 
1722 	return tgt_port;
1723 }
1724 
1725 /**
1726  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1727  * @data: Pointer to network data buffer
1728  *
1729  * This api is for ipv4 req packet.
1730  *
1731  * Return: true if packet is icmpv4 request
1732  *	   false otherwise.
1733  */
1734 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1735 {
1736 	uint8_t op_code;
1737 
1738 	op_code = (uint8_t)(*(uint8_t *)(data +
1739 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1740 
1741 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1742 		return true;
1743 	return false;
1744 }
1745 
1746 /**
1747  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1748  * @data: Pointer to network data buffer
1749  *
1750  * This api is for ipv4 res packet.
1751  *
1752  * Return: true if packet is icmpv4 response
1753  *	   false otherwise.
1754  */
1755 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1756 {
1757 	uint8_t op_code;
1758 
1759 	op_code = (uint8_t)(*(uint8_t *)(data +
1760 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1761 
1762 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1763 		return true;
1764 	return false;
1765 }
1766 
1767 /**
1768  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1769  * @data: Pointer to network data buffer
1770  *
1771  * This api is for ipv4 packet.
1772  *
1773  * Return: icmpv4 packet source IP value.
1774  */
1775 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1776 {
1777 	uint32_t src_ip;
1778 
1779 	src_ip = (uint32_t)(*(uint32_t *)(data +
1780 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1781 
1782 	return src_ip;
1783 }
1784 
1785 /**
1786  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1787  * @data: Pointer to network data buffer
1788  *
1789  * This api is for ipv4 packet.
1790  *
1791  * Return: icmpv4 packet target IP value.
1792  */
1793 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1794 {
1795 	uint32_t tgt_ip;
1796 
1797 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1798 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1799 
1800 	return tgt_ip;
1801 }
1802 
1803 
1804 /**
1805  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1806  * @data: Pointer to IPV6 packet data buffer
1807  *
1808  * This func. checks whether it is a IPV6 packet or not.
1809  *
1810  * Return: TRUE if it is a IPV6 packet
1811  *         FALSE if not
1812  */
1813 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1814 {
1815 	uint16_t ether_type;
1816 
1817 	ether_type = (uint16_t)(*(uint16_t *)(data +
1818 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1819 
1820 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1821 		return true;
1822 	else
1823 		return false;
1824 }
1825 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1826 
1827 /**
1828  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1829  * @data: Pointer to network data buffer
1830  *
1831  * This api is for ipv6 packet.
1832  *
1833  * Return: true if packet is DHCP packet
1834  *	   false otherwise
1835  */
1836 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1837 {
1838 	uint16_t sport;
1839 	uint16_t dport;
1840 
1841 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1842 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1843 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1844 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1845 					sizeof(uint16_t));
1846 
1847 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1848 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1849 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1850 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1851 		return true;
1852 	else
1853 		return false;
1854 }
1855 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1856 
1857 /**
1858  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1859  * @data: Pointer to IPV4 packet data buffer
1860  *
1861  * This func. checks whether it is a IPV4 multicast packet or not.
1862  *
1863  * Return: TRUE if it is a IPV4 multicast packet
1864  *         FALSE if not
1865  */
1866 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1867 {
1868 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1869 		uint32_t *dst_addr =
1870 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1871 
1872 		/*
1873 		 * Check first word of the IPV4 address and if it is
1874 		 * equal to 0xE then it represents multicast IP.
1875 		 */
1876 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1877 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1878 			return true;
1879 		else
1880 			return false;
1881 	} else
1882 		return false;
1883 }
1884 
1885 /**
1886  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1887  * @data: Pointer to IPV6 packet data buffer
1888  *
1889  * This func. checks whether it is a IPV6 multicast packet or not.
1890  *
1891  * Return: TRUE if it is a IPV6 multicast packet
1892  *         FALSE if not
1893  */
1894 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1895 {
1896 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1897 		uint16_t *dst_addr;
1898 
1899 		dst_addr = (uint16_t *)
1900 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1901 
1902 		/*
1903 		 * Check first byte of the IP address and if it
1904 		 * 0xFF00 then it is a IPV6 mcast packet.
1905 		 */
1906 		if (*dst_addr ==
1907 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1908 			return true;
1909 		else
1910 			return false;
1911 	} else
1912 		return false;
1913 }
1914 
1915 /**
1916  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1917  * @data: Pointer to IPV4 ICMP packet data buffer
1918  *
1919  * This func. checks whether it is a ICMP packet or not.
1920  *
1921  * Return: TRUE if it is a ICMP packet
1922  *         FALSE if not
1923  */
1924 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1925 {
1926 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1927 		uint8_t pkt_type;
1928 
1929 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1930 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1931 
1932 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1933 			return true;
1934 		else
1935 			return false;
1936 	} else
1937 		return false;
1938 }
1939 
1940 /**
1941  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1942  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1943  *
1944  * This func. checks whether it is a ICMPV6 packet or not.
1945  *
1946  * Return: TRUE if it is a ICMPV6 packet
1947  *         FALSE if not
1948  */
1949 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1950 {
1951 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1952 		uint8_t pkt_type;
1953 
1954 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1955 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1956 
1957 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1958 			return true;
1959 		else
1960 			return false;
1961 	} else
1962 		return false;
1963 }
1964 
1965 /**
1966  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1967  * @data: Pointer to IPV4 UDP packet data buffer
1968  *
1969  * This func. checks whether it is a IPV4 UDP packet or not.
1970  *
1971  * Return: TRUE if it is a IPV4 UDP packet
1972  *         FALSE if not
1973  */
1974 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1975 {
1976 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1977 		uint8_t pkt_type;
1978 
1979 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1980 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1981 
1982 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1983 			return true;
1984 		else
1985 			return false;
1986 	} else
1987 		return false;
1988 }
1989 
1990 /**
1991  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1992  * @data: Pointer to IPV4 TCP packet data buffer
1993  *
1994  * This func. checks whether it is a IPV4 TCP packet or not.
1995  *
1996  * Return: TRUE if it is a IPV4 TCP packet
1997  *         FALSE if not
1998  */
1999 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2000 {
2001 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2002 		uint8_t pkt_type;
2003 
2004 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2005 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2006 
2007 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2008 			return true;
2009 		else
2010 			return false;
2011 	} else
2012 		return false;
2013 }
2014 
2015 /**
2016  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2017  * @data: Pointer to IPV6 UDP packet data buffer
2018  *
2019  * This func. checks whether it is a IPV6 UDP packet or not.
2020  *
2021  * Return: TRUE if it is a IPV6 UDP packet
2022  *         FALSE if not
2023  */
2024 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2025 {
2026 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2027 		uint8_t pkt_type;
2028 
2029 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2030 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2031 
2032 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2033 			return true;
2034 		else
2035 			return false;
2036 	} else
2037 		return false;
2038 }
2039 
2040 /**
2041  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2042  * @data: Pointer to IPV6 TCP packet data buffer
2043  *
2044  * This func. checks whether it is a IPV6 TCP packet or not.
2045  *
2046  * Return: TRUE if it is a IPV6 TCP packet
2047  *         FALSE if not
2048  */
2049 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2050 {
2051 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2052 		uint8_t pkt_type;
2053 
2054 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2055 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2056 
2057 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2058 			return true;
2059 		else
2060 			return false;
2061 	} else
2062 		return false;
2063 }
2064 
2065 /**
2066  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2067  * @nbuf - sk buff
2068  *
2069  * Return: true if packet is broadcast
2070  *	   false otherwise
2071  */
2072 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2073 {
2074 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2075 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2076 }
2077 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2078 
2079 #ifdef NBUF_MEMORY_DEBUG
2080 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2081 
2082 /**
2083  * struct qdf_nbuf_track_t - Network buffer track structure
2084  *
2085  * @p_next: Pointer to next
2086  * @net_buf: Pointer to network buffer
2087  * @file_name: File name
2088  * @line_num: Line number
2089  * @size: Size
2090  */
2091 struct qdf_nbuf_track_t {
2092 	struct qdf_nbuf_track_t *p_next;
2093 	qdf_nbuf_t net_buf;
2094 	char file_name[QDF_MEM_FILE_NAME_SIZE];
2095 	uint32_t line_num;
2096 	size_t size;
2097 };
2098 
2099 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2100 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2101 
2102 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2103 static struct kmem_cache *nbuf_tracking_cache;
2104 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2105 static spinlock_t qdf_net_buf_track_free_list_lock;
2106 static uint32_t qdf_net_buf_track_free_list_count;
2107 static uint32_t qdf_net_buf_track_used_list_count;
2108 static uint32_t qdf_net_buf_track_max_used;
2109 static uint32_t qdf_net_buf_track_max_free;
2110 static uint32_t qdf_net_buf_track_max_allocated;
2111 
2112 /**
2113  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2114  *
2115  * tracks the max number of network buffers that the wlan driver was tracking
2116  * at any one time.
2117  *
2118  * Return: none
2119  */
2120 static inline void update_max_used(void)
2121 {
2122 	int sum;
2123 
2124 	if (qdf_net_buf_track_max_used <
2125 	    qdf_net_buf_track_used_list_count)
2126 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2127 	sum = qdf_net_buf_track_free_list_count +
2128 		qdf_net_buf_track_used_list_count;
2129 	if (qdf_net_buf_track_max_allocated < sum)
2130 		qdf_net_buf_track_max_allocated = sum;
2131 }
2132 
2133 /**
2134  * update_max_free() - update qdf_net_buf_track_free_list_count
2135  *
2136  * tracks the max number tracking buffers kept in the freelist.
2137  *
2138  * Return: none
2139  */
2140 static inline void update_max_free(void)
2141 {
2142 	if (qdf_net_buf_track_max_free <
2143 	    qdf_net_buf_track_free_list_count)
2144 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2145 }
2146 
2147 /**
2148  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2149  *
2150  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2151  * This function also ads fexibility to adjust the allocation and freelist
2152  * scheems.
2153  *
2154  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2155  */
2156 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2157 {
2158 	int flags = GFP_KERNEL;
2159 	unsigned long irq_flag;
2160 	QDF_NBUF_TRACK *new_node = NULL;
2161 
2162 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2163 	qdf_net_buf_track_used_list_count++;
2164 	if (qdf_net_buf_track_free_list != NULL) {
2165 		new_node = qdf_net_buf_track_free_list;
2166 		qdf_net_buf_track_free_list =
2167 			qdf_net_buf_track_free_list->p_next;
2168 		qdf_net_buf_track_free_list_count--;
2169 	}
2170 	update_max_used();
2171 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2172 
2173 	if (new_node != NULL)
2174 		return new_node;
2175 
2176 	if (in_interrupt() || irqs_disabled() || in_atomic())
2177 		flags = GFP_ATOMIC;
2178 
2179 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2180 }
2181 
2182 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2183 #define FREEQ_POOLSIZE 2048
2184 
2185 /**
2186  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2187  *
2188  * Matches calls to qdf_nbuf_track_alloc.
2189  * Either frees the tracking cookie to kernel or an internal
2190  * freelist based on the size of the freelist.
2191  *
2192  * Return: none
2193  */
2194 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2195 {
2196 	unsigned long irq_flag;
2197 
2198 	if (!node)
2199 		return;
2200 
2201 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2202 	 * only shrink the freelist if it is bigger than twice the number of
2203 	 * nbufs in use. If the driver is stalling in a consistent bursty
2204 	 * fasion, this will keep 3/4 of thee allocations from the free list
2205 	 * while also allowing the system to recover memory as less frantic
2206 	 * traffic occurs.
2207 	 */
2208 
2209 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2210 
2211 	qdf_net_buf_track_used_list_count--;
2212 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2213 	   (qdf_net_buf_track_free_list_count >
2214 	    qdf_net_buf_track_used_list_count << 1)) {
2215 		kmem_cache_free(nbuf_tracking_cache, node);
2216 	} else {
2217 		node->p_next = qdf_net_buf_track_free_list;
2218 		qdf_net_buf_track_free_list = node;
2219 		qdf_net_buf_track_free_list_count++;
2220 	}
2221 	update_max_free();
2222 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2223 }
2224 
2225 /**
2226  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2227  *
2228  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2229  * the freelist first makes it performant for the first iperf udp burst
2230  * as well as steady state.
2231  *
2232  * Return: None
2233  */
2234 static void qdf_nbuf_track_prefill(void)
2235 {
2236 	int i;
2237 	QDF_NBUF_TRACK *node, *head;
2238 
2239 	/* prepopulate the freelist */
2240 	head = NULL;
2241 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2242 		node = qdf_nbuf_track_alloc();
2243 		if (node == NULL)
2244 			continue;
2245 		node->p_next = head;
2246 		head = node;
2247 	}
2248 	while (head) {
2249 		node = head->p_next;
2250 		qdf_nbuf_track_free(head);
2251 		head = node;
2252 	}
2253 
2254 	/* prefilled buffers should not count as used */
2255 	qdf_net_buf_track_max_used = 0;
2256 }
2257 
2258 /**
2259  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2260  *
2261  * This initializes the memory manager for the nbuf tracking cookies.  Because
2262  * these cookies are all the same size and only used in this feature, we can
2263  * use a kmem_cache to provide tracking as well as to speed up allocations.
2264  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2265  * features) a freelist is prepopulated here.
2266  *
2267  * Return: None
2268  */
2269 static void qdf_nbuf_track_memory_manager_create(void)
2270 {
2271 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2272 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2273 						sizeof(QDF_NBUF_TRACK),
2274 						0, 0, NULL);
2275 
2276 	qdf_nbuf_track_prefill();
2277 }
2278 
2279 /**
2280  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2281  *
2282  * Empty the freelist and print out usage statistics when it is no longer
2283  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2284  * any nbuf tracking cookies were leaked.
2285  *
2286  * Return: None
2287  */
2288 static void qdf_nbuf_track_memory_manager_destroy(void)
2289 {
2290 	QDF_NBUF_TRACK *node, *tmp;
2291 	unsigned long irq_flag;
2292 
2293 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2294 	node = qdf_net_buf_track_free_list;
2295 
2296 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2297 		qdf_print("%s: unexpectedly large max_used count %d",
2298 			  __func__, qdf_net_buf_track_max_used);
2299 
2300 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2301 		qdf_print("%s: %d unused trackers were allocated",
2302 			  __func__,
2303 			  qdf_net_buf_track_max_allocated -
2304 			  qdf_net_buf_track_max_used);
2305 
2306 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2307 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2308 		qdf_print("%s: check freelist shrinking functionality",
2309 			  __func__);
2310 
2311 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2312 		  "%s: %d residual freelist size",
2313 		  __func__, qdf_net_buf_track_free_list_count);
2314 
2315 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2316 		  "%s: %d max freelist size observed",
2317 		  __func__, qdf_net_buf_track_max_free);
2318 
2319 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2320 		  "%s: %d max buffers used observed",
2321 		  __func__, qdf_net_buf_track_max_used);
2322 
2323 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2324 		  "%s: %d max buffers allocated observed",
2325 		  __func__, qdf_net_buf_track_max_allocated);
2326 
2327 	while (node) {
2328 		tmp = node;
2329 		node = node->p_next;
2330 		kmem_cache_free(nbuf_tracking_cache, tmp);
2331 		qdf_net_buf_track_free_list_count--;
2332 	}
2333 
2334 	if (qdf_net_buf_track_free_list_count != 0)
2335 		qdf_info("%d unfreed tracking memory lost in freelist",
2336 			 qdf_net_buf_track_free_list_count);
2337 
2338 	if (qdf_net_buf_track_used_list_count != 0)
2339 		qdf_info("%d unfreed tracking memory still in use",
2340 			 qdf_net_buf_track_used_list_count);
2341 
2342 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2343 	kmem_cache_destroy(nbuf_tracking_cache);
2344 	qdf_net_buf_track_free_list = NULL;
2345 }
2346 
2347 /**
2348  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2349  *
2350  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2351  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2352  * WLAN driver module whose allocated SKB is freed by network stack are
2353  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2354  * reported as memory leak.
2355  *
2356  * Return: none
2357  */
2358 void qdf_net_buf_debug_init(void)
2359 {
2360 	uint32_t i;
2361 
2362 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2363 
2364 	qdf_nbuf_map_tracking_init();
2365 	qdf_nbuf_track_memory_manager_create();
2366 
2367 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2368 		gp_qdf_net_buf_track_tbl[i] = NULL;
2369 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2370 	}
2371 }
2372 qdf_export_symbol(qdf_net_buf_debug_init);
2373 
2374 /**
2375  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2376  *
2377  * Exit network buffer tracking debug functionality and log SKB memory leaks
2378  * As part of exiting the functionality, free the leaked memory and
2379  * cleanup the tracking buffers.
2380  *
2381  * Return: none
2382  */
2383 void qdf_net_buf_debug_exit(void)
2384 {
2385 	uint32_t i;
2386 	uint32_t count = 0;
2387 	unsigned long irq_flag;
2388 	QDF_NBUF_TRACK *p_node;
2389 	QDF_NBUF_TRACK *p_prev;
2390 
2391 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2392 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2393 		p_node = gp_qdf_net_buf_track_tbl[i];
2394 		while (p_node) {
2395 			p_prev = p_node;
2396 			p_node = p_node->p_next;
2397 			count++;
2398 			qdf_info("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK",
2399 				 p_prev->file_name, p_prev->line_num,
2400 				 p_prev->size, p_prev->net_buf);
2401 			qdf_nbuf_track_free(p_prev);
2402 		}
2403 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2404 	}
2405 
2406 	qdf_nbuf_track_memory_manager_destroy();
2407 	qdf_nbuf_map_tracking_deinit();
2408 
2409 #ifdef CONFIG_HALT_KMEMLEAK
2410 	if (count) {
2411 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2412 		QDF_BUG(0);
2413 	}
2414 #endif
2415 }
2416 qdf_export_symbol(qdf_net_buf_debug_exit);
2417 
2418 /**
2419  * qdf_net_buf_debug_hash() - hash network buffer pointer
2420  *
2421  * Return: hash value
2422  */
2423 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2424 {
2425 	uint32_t i;
2426 
2427 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2428 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2429 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2430 
2431 	return i;
2432 }
2433 
2434 /**
2435  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2436  *
2437  * Return: If skb is found in hash table then return pointer to network buffer
2438  *	else return %NULL
2439  */
2440 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2441 {
2442 	uint32_t i;
2443 	QDF_NBUF_TRACK *p_node;
2444 
2445 	i = qdf_net_buf_debug_hash(net_buf);
2446 	p_node = gp_qdf_net_buf_track_tbl[i];
2447 
2448 	while (p_node) {
2449 		if (p_node->net_buf == net_buf)
2450 			return p_node;
2451 		p_node = p_node->p_next;
2452 	}
2453 
2454 	return NULL;
2455 }
2456 
2457 /**
2458  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2459  *
2460  * Return: none
2461  */
2462 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2463 				uint8_t *file_name, uint32_t line_num)
2464 {
2465 	uint32_t i;
2466 	unsigned long irq_flag;
2467 	QDF_NBUF_TRACK *p_node;
2468 	QDF_NBUF_TRACK *new_node;
2469 
2470 	new_node = qdf_nbuf_track_alloc();
2471 
2472 	i = qdf_net_buf_debug_hash(net_buf);
2473 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2474 
2475 	p_node = qdf_net_buf_debug_look_up(net_buf);
2476 
2477 	if (p_node) {
2478 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2479 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2480 			  net_buf, kbasename(file_name), line_num);
2481 		qdf_nbuf_track_free(new_node);
2482 	} else {
2483 		p_node = new_node;
2484 		if (p_node) {
2485 			p_node->net_buf = net_buf;
2486 			qdf_str_lcopy(p_node->file_name, kbasename(file_name),
2487 				      QDF_MEM_FILE_NAME_SIZE);
2488 			p_node->line_num = line_num;
2489 			p_node->size = size;
2490 			qdf_mem_skb_inc(size);
2491 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2492 			gp_qdf_net_buf_track_tbl[i] = p_node;
2493 		} else
2494 			qdf_print(
2495 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2496 				  kbasename(file_name), line_num, size);
2497 	}
2498 
2499 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2500 }
2501 qdf_export_symbol(qdf_net_buf_debug_add_node);
2502 
2503 /**
2504  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2505  *
2506  * Return: none
2507  */
2508 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2509 {
2510 	uint32_t i;
2511 	QDF_NBUF_TRACK *p_head;
2512 	QDF_NBUF_TRACK *p_node = NULL;
2513 	unsigned long irq_flag;
2514 	QDF_NBUF_TRACK *p_prev;
2515 
2516 	i = qdf_net_buf_debug_hash(net_buf);
2517 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2518 
2519 	p_head = gp_qdf_net_buf_track_tbl[i];
2520 
2521 	/* Unallocated SKB */
2522 	if (!p_head)
2523 		goto done;
2524 
2525 	p_node = p_head;
2526 	/* Found at head of the table */
2527 	if (p_head->net_buf == net_buf) {
2528 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2529 		goto done;
2530 	}
2531 
2532 	/* Search in collision list */
2533 	while (p_node) {
2534 		p_prev = p_node;
2535 		p_node = p_node->p_next;
2536 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2537 			p_prev->p_next = p_node->p_next;
2538 			break;
2539 		}
2540 	}
2541 
2542 done:
2543 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2544 
2545 	if (p_node) {
2546 		qdf_mem_skb_dec(p_node->size);
2547 		qdf_nbuf_track_free(p_node);
2548 	} else {
2549 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2550 			  net_buf);
2551 		QDF_BUG(0);
2552 	}
2553 }
2554 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2555 
2556 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2557 			uint8_t *file_name, uint32_t line_num)
2558 {
2559 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2560 
2561 	while (ext_list) {
2562 		/*
2563 		 * Take care to add if it is Jumbo packet connected using
2564 		 * frag_list
2565 		 */
2566 		qdf_nbuf_t next;
2567 
2568 		next = qdf_nbuf_queue_next(ext_list);
2569 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2570 		ext_list = next;
2571 	}
2572 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2573 }
2574 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2575 
2576 /**
2577  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2578  * @net_buf: Network buf holding head segment (single)
2579  *
2580  * WLAN driver module whose allocated SKB is freed by network stack are
2581  * suppose to call this API before returning SKB to network stack such
2582  * that the SKB is not reported as memory leak.
2583  *
2584  * Return: none
2585  */
2586 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2587 {
2588 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2589 
2590 	while (ext_list) {
2591 		/*
2592 		 * Take care to free if it is Jumbo packet connected using
2593 		 * frag_list
2594 		 */
2595 		qdf_nbuf_t next;
2596 
2597 		next = qdf_nbuf_queue_next(ext_list);
2598 
2599 		if (qdf_nbuf_is_tso(ext_list) &&
2600 			qdf_nbuf_get_users(ext_list) > 1) {
2601 			ext_list = next;
2602 			continue;
2603 		}
2604 
2605 		qdf_net_buf_debug_delete_node(ext_list);
2606 		ext_list = next;
2607 	}
2608 
2609 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2610 		return;
2611 
2612 	qdf_net_buf_debug_delete_node(net_buf);
2613 }
2614 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2615 
2616 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2617 				int reserve, int align, int prio,
2618 				uint8_t *file, uint32_t line)
2619 {
2620 	qdf_nbuf_t nbuf;
2621 
2622 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio);
2623 
2624 	/* Store SKB in internal QDF tracking table */
2625 	if (qdf_likely(nbuf)) {
2626 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2627 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2628 	}
2629 
2630 	return nbuf;
2631 }
2632 qdf_export_symbol(qdf_nbuf_alloc_debug);
2633 
2634 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2635 {
2636 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2637 		goto free_buf;
2638 
2639 	/* Remove SKB from internal QDF tracking table */
2640 	if (qdf_likely(nbuf)) {
2641 		struct qdf_nbuf_map_metadata *meta;
2642 
2643 		qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
2644 		meta = qdf_nbuf_meta_get(nbuf);
2645 		if (meta)
2646 			QDF_DEBUG_PANIC(
2647 				"Nbuf freed @ %s:%u while mapped from %s:%u",
2648 				kbasename(file), line, meta->file, meta->line);
2649 		qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
2650 
2651 		qdf_net_buf_debug_delete_node(nbuf);
2652 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2653 	}
2654 
2655 free_buf:
2656 	__qdf_nbuf_free(nbuf);
2657 }
2658 qdf_export_symbol(qdf_nbuf_free_debug);
2659 
2660 #endif /* NBUF_MEMORY_DEBUG */
2661 
2662 #if defined(FEATURE_TSO)
2663 
2664 /**
2665  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2666  *
2667  * @ethproto: ethernet type of the msdu
2668  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2669  * @l2_len: L2 length for the msdu
2670  * @eit_hdr: pointer to EIT header
2671  * @eit_hdr_len: EIT header length for the msdu
2672  * @eit_hdr_dma_map_addr: dma addr for EIT header
2673  * @tcphdr: pointer to tcp header
2674  * @ipv4_csum_en: ipv4 checksum enable
2675  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2676  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2677  * @ip_id: IP id
2678  * @tcp_seq_num: TCP sequence number
2679  *
2680  * This structure holds the TSO common info that is common
2681  * across all the TCP segments of the jumbo packet.
2682  */
2683 struct qdf_tso_cmn_seg_info_t {
2684 	uint16_t ethproto;
2685 	uint16_t ip_tcp_hdr_len;
2686 	uint16_t l2_len;
2687 	uint8_t *eit_hdr;
2688 	uint32_t eit_hdr_len;
2689 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2690 	struct tcphdr *tcphdr;
2691 	uint16_t ipv4_csum_en;
2692 	uint16_t tcp_ipv4_csum_en;
2693 	uint16_t tcp_ipv6_csum_en;
2694 	uint16_t ip_id;
2695 	uint32_t tcp_seq_num;
2696 };
2697 
2698 /**
2699  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2700  * information
2701  * @osdev: qdf device handle
2702  * @skb: skb buffer
2703  * @tso_info: Parameters common to all segements
2704  *
2705  * Get the TSO information that is common across all the TCP
2706  * segments of the jumbo packet
2707  *
2708  * Return: 0 - success 1 - failure
2709  */
2710 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2711 			struct sk_buff *skb,
2712 			struct qdf_tso_cmn_seg_info_t *tso_info)
2713 {
2714 	/* Get ethernet type and ethernet header length */
2715 	tso_info->ethproto = vlan_get_protocol(skb);
2716 
2717 	/* Determine whether this is an IPv4 or IPv6 packet */
2718 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2719 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2720 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2721 
2722 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2723 		tso_info->ipv4_csum_en = 1;
2724 		tso_info->tcp_ipv4_csum_en = 1;
2725 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2726 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2727 				ipv4_hdr->protocol);
2728 			return 1;
2729 		}
2730 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2731 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2732 		tso_info->tcp_ipv6_csum_en = 1;
2733 	} else {
2734 		qdf_err("TSO: ethertype 0x%x is not supported!",
2735 			tso_info->ethproto);
2736 		return 1;
2737 	}
2738 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2739 	tso_info->tcphdr = tcp_hdr(skb);
2740 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2741 	/* get pointer to the ethernet + IP + TCP header and their length */
2742 	tso_info->eit_hdr = skb->data;
2743 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2744 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2745 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2746 							tso_info->eit_hdr,
2747 							tso_info->eit_hdr_len,
2748 							DMA_TO_DEVICE);
2749 	if (unlikely(dma_mapping_error(osdev->dev,
2750 				       tso_info->eit_hdr_dma_map_addr))) {
2751 		qdf_err("DMA mapping error!");
2752 		qdf_assert(0);
2753 		return 1;
2754 	}
2755 
2756 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2757 		/* inlcude IPv4 header length for IPV4 (total length) */
2758 		tso_info->ip_tcp_hdr_len =
2759 			tso_info->eit_hdr_len - tso_info->l2_len;
2760 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2761 		/* exclude IPv6 header length for IPv6 (payload length) */
2762 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2763 	}
2764 	/*
2765 	 * The length of the payload (application layer data) is added to
2766 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2767 	 * descriptor.
2768 	 */
2769 
2770 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2771 		tso_info->tcp_seq_num,
2772 		tso_info->eit_hdr_len,
2773 		tso_info->l2_len,
2774 		skb->len);
2775 	return 0;
2776 }
2777 
2778 
2779 /**
2780  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2781  *
2782  * @curr_seg: Segment whose contents are initialized
2783  * @tso_cmn_info: Parameters common to all segements
2784  *
2785  * Return: None
2786  */
2787 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2788 				struct qdf_tso_seg_elem_t *curr_seg,
2789 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2790 {
2791 	/* Initialize the flags to 0 */
2792 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2793 
2794 	/*
2795 	 * The following fields remain the same across all segments of
2796 	 * a jumbo packet
2797 	 */
2798 	curr_seg->seg.tso_flags.tso_enable = 1;
2799 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2800 		tso_cmn_info->ipv4_csum_en;
2801 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2802 		tso_cmn_info->tcp_ipv6_csum_en;
2803 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2804 		tso_cmn_info->tcp_ipv4_csum_en;
2805 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2806 
2807 	/* The following fields change for the segments */
2808 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2809 	tso_cmn_info->ip_id++;
2810 
2811 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2812 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2813 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2814 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2815 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2816 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2817 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2818 
2819 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2820 
2821 	/*
2822 	 * First fragment for each segment always contains the ethernet,
2823 	 * IP and TCP header
2824 	 */
2825 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2826 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2827 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2828 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2829 
2830 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2831 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2832 		   tso_cmn_info->eit_hdr_len,
2833 		   curr_seg->seg.tso_flags.tcp_seq_num,
2834 		   curr_seg->seg.total_len);
2835 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2836 }
2837 
2838 /**
2839  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2840  * into segments
2841  * @nbuf: network buffer to be segmented
2842  * @tso_info: This is the output. The information about the
2843  *           TSO segments will be populated within this.
2844  *
2845  * This function fragments a TCP jumbo packet into smaller
2846  * segments to be transmitted by the driver. It chains the TSO
2847  * segments created into a list.
2848  *
2849  * Return: number of TSO segments
2850  */
2851 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2852 		struct qdf_tso_info_t *tso_info)
2853 {
2854 	/* common across all segments */
2855 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2856 	/* segment specific */
2857 	void *tso_frag_vaddr;
2858 	qdf_dma_addr_t tso_frag_paddr = 0;
2859 	uint32_t num_seg = 0;
2860 	struct qdf_tso_seg_elem_t *curr_seg;
2861 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2862 	struct skb_frag_struct *frag = NULL;
2863 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2864 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2865 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2866 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2867 	int j = 0; /* skb fragment index */
2868 
2869 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2870 
2871 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2872 						skb, &tso_cmn_info))) {
2873 		qdf_warn("TSO: error getting common segment info");
2874 		return 0;
2875 	}
2876 
2877 	total_num_seg = tso_info->tso_num_seg_list;
2878 	curr_seg = tso_info->tso_seg_list;
2879 
2880 	/* length of the first chunk of data in the skb */
2881 	skb_frag_len = skb_headlen(skb);
2882 
2883 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2884 	/* update the remaining skb fragment length and TSO segment length */
2885 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2886 	skb_proc -= tso_cmn_info.eit_hdr_len;
2887 
2888 	/* get the address to the next tso fragment */
2889 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2890 	/* get the length of the next tso fragment */
2891 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2892 
2893 	if (tso_frag_len != 0) {
2894 		tso_frag_paddr = dma_map_single(osdev->dev,
2895 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2896 	}
2897 
2898 	if (unlikely(dma_mapping_error(osdev->dev,
2899 					tso_frag_paddr))) {
2900 		qdf_err("DMA mapping error!");
2901 		qdf_assert(0);
2902 		return 0;
2903 	}
2904 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2905 		__LINE__, skb_frag_len, tso_frag_len);
2906 	num_seg = tso_info->num_segs;
2907 	tso_info->num_segs = 0;
2908 	tso_info->is_tso = 1;
2909 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2910 
2911 	while (num_seg && curr_seg) {
2912 		int i = 1; /* tso fragment index */
2913 		uint8_t more_tso_frags = 1;
2914 
2915 		curr_seg->seg.num_frags = 0;
2916 		tso_info->num_segs++;
2917 		total_num_seg->num_seg.tso_cmn_num_seg++;
2918 
2919 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2920 						 &tso_cmn_info);
2921 
2922 		if (unlikely(skb_proc == 0))
2923 			return tso_info->num_segs;
2924 
2925 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2926 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2927 		/* frag len is added to ip_len in while loop below*/
2928 
2929 		curr_seg->seg.num_frags++;
2930 
2931 		while (more_tso_frags) {
2932 			if (tso_frag_len != 0) {
2933 				curr_seg->seg.tso_frags[i].vaddr =
2934 					tso_frag_vaddr;
2935 				curr_seg->seg.tso_frags[i].length =
2936 					tso_frag_len;
2937 				curr_seg->seg.total_len += tso_frag_len;
2938 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2939 				curr_seg->seg.num_frags++;
2940 				skb_proc = skb_proc - tso_frag_len;
2941 
2942 				/* increment the TCP sequence number */
2943 
2944 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2945 				curr_seg->seg.tso_frags[i].paddr =
2946 					tso_frag_paddr;
2947 			}
2948 
2949 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2950 					__func__, __LINE__,
2951 					i,
2952 					tso_frag_len,
2953 					curr_seg->seg.total_len,
2954 					curr_seg->seg.tso_frags[i].vaddr);
2955 
2956 			/* if there is no more data left in the skb */
2957 			if (!skb_proc)
2958 				return tso_info->num_segs;
2959 
2960 			/* get the next payload fragment information */
2961 			/* check if there are more fragments in this segment */
2962 			if (tso_frag_len < tso_seg_size) {
2963 				more_tso_frags = 1;
2964 				if (tso_frag_len != 0) {
2965 					tso_seg_size = tso_seg_size -
2966 						tso_frag_len;
2967 					i++;
2968 					if (curr_seg->seg.num_frags ==
2969 								FRAG_NUM_MAX) {
2970 						more_tso_frags = 0;
2971 						/*
2972 						 * reset i and the tso
2973 						 * payload size
2974 						 */
2975 						i = 1;
2976 						tso_seg_size =
2977 							skb_shinfo(skb)->
2978 								gso_size;
2979 					}
2980 				}
2981 			} else {
2982 				more_tso_frags = 0;
2983 				/* reset i and the tso payload size */
2984 				i = 1;
2985 				tso_seg_size = skb_shinfo(skb)->gso_size;
2986 			}
2987 
2988 			/* if the next fragment is contiguous */
2989 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
2990 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
2991 				skb_frag_len = skb_frag_len - tso_frag_len;
2992 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2993 
2994 			} else { /* the next fragment is not contiguous */
2995 				if (skb_shinfo(skb)->nr_frags == 0) {
2996 					qdf_info("TSO: nr_frags == 0!");
2997 					qdf_assert(0);
2998 					return 0;
2999 				}
3000 				if (j >= skb_shinfo(skb)->nr_frags) {
3001 					qdf_info("TSO: nr_frags %d j %d",
3002 						 skb_shinfo(skb)->nr_frags, j);
3003 					qdf_assert(0);
3004 					return 0;
3005 				}
3006 				frag = &skb_shinfo(skb)->frags[j];
3007 				skb_frag_len = skb_frag_size(frag);
3008 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3009 				tso_frag_vaddr = skb_frag_address_safe(frag);
3010 				j++;
3011 			}
3012 
3013 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3014 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3015 				tso_seg_size);
3016 
3017 			if (!(tso_frag_vaddr)) {
3018 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3019 						__func__);
3020 				return 0;
3021 			}
3022 
3023 			tso_frag_paddr =
3024 					 dma_map_single(osdev->dev,
3025 						 tso_frag_vaddr,
3026 						 tso_frag_len,
3027 						 DMA_TO_DEVICE);
3028 			if (unlikely(dma_mapping_error(osdev->dev,
3029 							tso_frag_paddr))) {
3030 				qdf_err("DMA mapping error!");
3031 				qdf_assert(0);
3032 				return 0;
3033 			}
3034 		}
3035 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3036 				curr_seg->seg.tso_flags.tcp_seq_num);
3037 		num_seg--;
3038 		/* if TCP FIN flag was set, set it in the last segment */
3039 		if (!num_seg)
3040 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3041 
3042 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3043 		curr_seg = curr_seg->next;
3044 	}
3045 	return tso_info->num_segs;
3046 }
3047 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3048 
3049 /**
3050  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3051  *
3052  * @osdev: qdf device handle
3053  * @tso_seg: TSO segment element to be unmapped
3054  * @is_last_seg: whether this is last tso seg or not
3055  *
3056  * Return: none
3057  */
3058 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3059 			  struct qdf_tso_seg_elem_t *tso_seg,
3060 			  bool is_last_seg)
3061 {
3062 	uint32_t num_frags = 0;
3063 
3064 	if (tso_seg->seg.num_frags > 0)
3065 		num_frags = tso_seg->seg.num_frags - 1;
3066 
3067 	/*Num of frags in a tso seg cannot be less than 2 */
3068 	if (num_frags < 1) {
3069 		qdf_assert(0);
3070 		qdf_err("ERROR: num of frags in a tso segment is %d",
3071 			(num_frags + 1));
3072 		return;
3073 	}
3074 
3075 	while (num_frags) {
3076 		/*Do dma unmap the tso seg except the 0th frag */
3077 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3078 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3079 				num_frags);
3080 			qdf_assert(0);
3081 			return;
3082 		}
3083 		dma_unmap_single(osdev->dev,
3084 				 tso_seg->seg.tso_frags[num_frags].paddr,
3085 				 tso_seg->seg.tso_frags[num_frags].length,
3086 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3087 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3088 		num_frags--;
3089 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3090 	}
3091 
3092 	if (is_last_seg) {
3093 		/*Do dma unmap for the tso seg 0th frag */
3094 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3095 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3096 			qdf_assert(0);
3097 			return;
3098 		}
3099 		dma_unmap_single(osdev->dev,
3100 				 tso_seg->seg.tso_frags[0].paddr,
3101 				 tso_seg->seg.tso_frags[0].length,
3102 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3103 		tso_seg->seg.tso_frags[0].paddr = 0;
3104 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3105 	}
3106 }
3107 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3108 
3109 /**
3110  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3111  * into segments
3112  * @nbuf:   network buffer to be segmented
3113  * @tso_info:  This is the output. The information about the
3114  *      TSO segments will be populated within this.
3115  *
3116  * This function fragments a TCP jumbo packet into smaller
3117  * segments to be transmitted by the driver. It chains the TSO
3118  * segments created into a list.
3119  *
3120  * Return: 0 - success, 1 - failure
3121  */
3122 #ifndef BUILD_X86
3123 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3124 {
3125 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3126 	uint32_t remainder, num_segs = 0;
3127 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3128 	uint8_t frags_per_tso = 0;
3129 	uint32_t skb_frag_len = 0;
3130 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3131 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3132 	struct skb_frag_struct *frag = NULL;
3133 	int j = 0;
3134 	uint32_t temp_num_seg = 0;
3135 
3136 	/* length of the first chunk of data in the skb minus eit header*/
3137 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3138 
3139 	/* Calculate num of segs for skb's first chunk of data*/
3140 	remainder = skb_frag_len % tso_seg_size;
3141 	num_segs = skb_frag_len / tso_seg_size;
3142 	/**
3143 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3144 	 * In that case, one more tso seg is required to accommodate
3145 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3146 	 * then remaining data will be accomodated while doing the calculation
3147 	 * for nr_frags data. Hence, frags_per_tso++.
3148 	 */
3149 	if (remainder) {
3150 		if (!skb_nr_frags)
3151 			num_segs++;
3152 		else
3153 			frags_per_tso++;
3154 	}
3155 
3156 	while (skb_nr_frags) {
3157 		if (j >= skb_shinfo(skb)->nr_frags) {
3158 			qdf_info("TSO: nr_frags %d j %d",
3159 				 skb_shinfo(skb)->nr_frags, j);
3160 			qdf_assert(0);
3161 			return 0;
3162 		}
3163 		/**
3164 		 * Calculate the number of tso seg for nr_frags data:
3165 		 * Get the length of each frag in skb_frag_len, add to
3166 		 * remainder.Get the number of segments by dividing it to
3167 		 * tso_seg_size and calculate the new remainder.
3168 		 * Decrement the nr_frags value and keep
3169 		 * looping all the skb_fragments.
3170 		 */
3171 		frag = &skb_shinfo(skb)->frags[j];
3172 		skb_frag_len = skb_frag_size(frag);
3173 		temp_num_seg = num_segs;
3174 		remainder += skb_frag_len;
3175 		num_segs += remainder / tso_seg_size;
3176 		remainder = remainder % tso_seg_size;
3177 		skb_nr_frags--;
3178 		if (remainder) {
3179 			if (num_segs > temp_num_seg)
3180 				frags_per_tso = 0;
3181 			/**
3182 			 * increment the tso per frags whenever remainder is
3183 			 * positive. If frags_per_tso reaches the (max-1),
3184 			 * [First frags always have EIT header, therefore max-1]
3185 			 * increment the num_segs as no more data can be
3186 			 * accomodated in the curr tso seg. Reset the remainder
3187 			 * and frags per tso and keep looping.
3188 			 */
3189 			frags_per_tso++;
3190 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3191 				num_segs++;
3192 				frags_per_tso = 0;
3193 				remainder = 0;
3194 			}
3195 			/**
3196 			 * If this is the last skb frag and still remainder is
3197 			 * non-zero(frags_per_tso is not reached to the max-1)
3198 			 * then increment the num_segs to take care of the
3199 			 * remaining length.
3200 			 */
3201 			if (!skb_nr_frags && remainder) {
3202 				num_segs++;
3203 				frags_per_tso = 0;
3204 			}
3205 		} else {
3206 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3207 			frags_per_tso = 0;
3208 		}
3209 		j++;
3210 	}
3211 
3212 	return num_segs;
3213 }
3214 #else
3215 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3216 {
3217 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3218 	struct skb_frag_struct *frag = NULL;
3219 
3220 	/*
3221 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3222 	 * region which cannot be accessed by Target
3223 	 */
3224 	if (virt_to_phys(skb->data) < 0x50000040) {
3225 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3226 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3227 				virt_to_phys(skb->data));
3228 		goto fail;
3229 
3230 	}
3231 
3232 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3233 		frag = &skb_shinfo(skb)->frags[i];
3234 
3235 		if (!frag)
3236 			goto fail;
3237 
3238 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3239 			goto fail;
3240 	}
3241 
3242 
3243 	gso_size = skb_shinfo(skb)->gso_size;
3244 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3245 			+ tcp_hdrlen(skb));
3246 	while (tmp_len) {
3247 		num_segs++;
3248 		if (tmp_len > gso_size)
3249 			tmp_len -= gso_size;
3250 		else
3251 			break;
3252 	}
3253 
3254 	return num_segs;
3255 
3256 	/*
3257 	 * Do not free this frame, just do socket level accounting
3258 	 * so that this is not reused.
3259 	 */
3260 fail:
3261 	if (skb->sk)
3262 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3263 
3264 	return 0;
3265 }
3266 #endif
3267 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3268 
3269 #endif /* FEATURE_TSO */
3270 
3271 /**
3272  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3273  *
3274  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3275  *
3276  * Return: N/A
3277  */
3278 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3279 			  uint32_t *lo, uint32_t *hi)
3280 {
3281 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3282 		*lo = lower_32_bits(dmaaddr);
3283 		*hi = upper_32_bits(dmaaddr);
3284 	} else {
3285 		*lo = dmaaddr;
3286 		*hi = 0;
3287 	}
3288 }
3289 
3290 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3291 
3292 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3293 {
3294 	qdf_nbuf_users_inc(&skb->users);
3295 	return skb;
3296 }
3297 qdf_export_symbol(__qdf_nbuf_inc_users);
3298 
3299 int __qdf_nbuf_get_users(struct sk_buff *skb)
3300 {
3301 	return qdf_nbuf_users_read(&skb->users);
3302 }
3303 qdf_export_symbol(__qdf_nbuf_get_users);
3304 
3305 /**
3306  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3307  * @skb: sk_buff handle
3308  *
3309  * Return: none
3310  */
3311 
3312 void __qdf_nbuf_ref(struct sk_buff *skb)
3313 {
3314 	skb_get(skb);
3315 }
3316 qdf_export_symbol(__qdf_nbuf_ref);
3317 
3318 /**
3319  * __qdf_nbuf_shared() - Check whether the buffer is shared
3320  *  @skb: sk_buff buffer
3321  *
3322  *  Return: true if more than one person has a reference to this buffer.
3323  */
3324 int __qdf_nbuf_shared(struct sk_buff *skb)
3325 {
3326 	return skb_shared(skb);
3327 }
3328 qdf_export_symbol(__qdf_nbuf_shared);
3329 
3330 /**
3331  * __qdf_nbuf_dmamap_create() - create a DMA map.
3332  * @osdev: qdf device handle
3333  * @dmap: dma map handle
3334  *
3335  * This can later be used to map networking buffers. They :
3336  * - need space in adf_drv's software descriptor
3337  * - are typically created during adf_drv_create
3338  * - need to be created before any API(qdf_nbuf_map) that uses them
3339  *
3340  * Return: QDF STATUS
3341  */
3342 QDF_STATUS
3343 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3344 {
3345 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3346 	/*
3347 	 * driver can tell its SG capablity, it must be handled.
3348 	 * Bounce buffers if they are there
3349 	 */
3350 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3351 	if (!(*dmap))
3352 		error = QDF_STATUS_E_NOMEM;
3353 
3354 	return error;
3355 }
3356 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3357 /**
3358  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3359  * @osdev: qdf device handle
3360  * @dmap: dma map handle
3361  *
3362  * Return: none
3363  */
3364 void
3365 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3366 {
3367 	kfree(dmap);
3368 }
3369 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3370 
3371 /**
3372  * __qdf_nbuf_map_nbytes_single() - map nbytes
3373  * @osdev: os device
3374  * @buf: buffer
3375  * @dir: direction
3376  * @nbytes: number of bytes
3377  *
3378  * Return: QDF_STATUS
3379  */
3380 #ifdef A_SIMOS_DEVHOST
3381 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3382 		qdf_device_t osdev, struct sk_buff *buf,
3383 		 qdf_dma_dir_t dir, int nbytes)
3384 {
3385 	qdf_dma_addr_t paddr;
3386 
3387 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3388 	return QDF_STATUS_SUCCESS;
3389 }
3390 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3391 #else
3392 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3393 		qdf_device_t osdev, struct sk_buff *buf,
3394 		 qdf_dma_dir_t dir, int nbytes)
3395 {
3396 	qdf_dma_addr_t paddr;
3397 
3398 	/* assume that the OS only provides a single fragment */
3399 	QDF_NBUF_CB_PADDR(buf) = paddr =
3400 		dma_map_single(osdev->dev, buf->data,
3401 			nbytes, __qdf_dma_dir_to_os(dir));
3402 	return dma_mapping_error(osdev->dev, paddr) ?
3403 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3404 }
3405 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3406 #endif
3407 /**
3408  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3409  * @osdev: os device
3410  * @buf: buffer
3411  * @dir: direction
3412  * @nbytes: number of bytes
3413  *
3414  * Return: none
3415  */
3416 #if defined(A_SIMOS_DEVHOST)
3417 void
3418 __qdf_nbuf_unmap_nbytes_single(
3419 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3420 {
3421 }
3422 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3423 
3424 #else
3425 void
3426 __qdf_nbuf_unmap_nbytes_single(
3427 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3428 {
3429 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3430 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3431 		return;
3432 	}
3433 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3434 			nbytes, __qdf_dma_dir_to_os(dir));
3435 }
3436 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3437 #endif
3438 /**
3439  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3440  * @osdev: os device
3441  * @skb: skb handle
3442  * @dir: dma direction
3443  * @nbytes: number of bytes to be mapped
3444  *
3445  * Return: QDF_STATUS
3446  */
3447 #ifdef QDF_OS_DEBUG
3448 QDF_STATUS
3449 __qdf_nbuf_map_nbytes(
3450 	qdf_device_t osdev,
3451 	struct sk_buff *skb,
3452 	qdf_dma_dir_t dir,
3453 	int nbytes)
3454 {
3455 	struct skb_shared_info  *sh = skb_shinfo(skb);
3456 
3457 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3458 
3459 	/*
3460 	 * Assume there's only a single fragment.
3461 	 * To support multiple fragments, it would be necessary to change
3462 	 * adf_nbuf_t to be a separate object that stores meta-info
3463 	 * (including the bus address for each fragment) and a pointer
3464 	 * to the underlying sk_buff.
3465 	 */
3466 	qdf_assert(sh->nr_frags == 0);
3467 
3468 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3469 }
3470 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3471 #else
3472 QDF_STATUS
3473 __qdf_nbuf_map_nbytes(
3474 	qdf_device_t osdev,
3475 	struct sk_buff *skb,
3476 	qdf_dma_dir_t dir,
3477 	int nbytes)
3478 {
3479 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3480 }
3481 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3482 #endif
3483 /**
3484  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3485  * @osdev: OS device
3486  * @skb: skb handle
3487  * @dir: direction
3488  * @nbytes: number of bytes
3489  *
3490  * Return: none
3491  */
3492 void
3493 __qdf_nbuf_unmap_nbytes(
3494 	qdf_device_t osdev,
3495 	struct sk_buff *skb,
3496 	qdf_dma_dir_t dir,
3497 	int nbytes)
3498 {
3499 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3500 
3501 	/*
3502 	 * Assume there's a single fragment.
3503 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3504 	 */
3505 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3506 }
3507 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3508 
3509 /**
3510  * __qdf_nbuf_dma_map_info() - return the dma map info
3511  * @bmap: dma map
3512  * @sg: dma map info
3513  *
3514  * Return: none
3515  */
3516 void
3517 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3518 {
3519 	qdf_assert(bmap->mapped);
3520 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3521 
3522 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3523 			sizeof(struct __qdf_segment));
3524 	sg->nsegs = bmap->nsegs;
3525 }
3526 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3527 /**
3528  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3529  *			specified by the index
3530  * @skb: sk buff
3531  * @sg: scatter/gather list of all the frags
3532  *
3533  * Return: none
3534  */
3535 #if defined(__QDF_SUPPORT_FRAG_MEM)
3536 void
3537 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3538 {
3539 	qdf_assert(skb != NULL);
3540 	sg->sg_segs[0].vaddr = skb->data;
3541 	sg->sg_segs[0].len   = skb->len;
3542 	sg->nsegs            = 1;
3543 
3544 	for (int i = 1; i <= sh->nr_frags; i++) {
3545 		skb_frag_t    *f        = &sh->frags[i - 1];
3546 
3547 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3548 			f->page_offset);
3549 		sg->sg_segs[i].len      = f->size;
3550 
3551 		qdf_assert(i < QDF_MAX_SGLIST);
3552 	}
3553 	sg->nsegs += i;
3554 
3555 }
3556 qdf_export_symbol(__qdf_nbuf_frag_info);
3557 #else
3558 #ifdef QDF_OS_DEBUG
3559 void
3560 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3561 {
3562 
3563 	struct skb_shared_info  *sh = skb_shinfo(skb);
3564 
3565 	qdf_assert(skb != NULL);
3566 	sg->sg_segs[0].vaddr = skb->data;
3567 	sg->sg_segs[0].len   = skb->len;
3568 	sg->nsegs            = 1;
3569 
3570 	qdf_assert(sh->nr_frags == 0);
3571 }
3572 qdf_export_symbol(__qdf_nbuf_frag_info);
3573 #else
3574 void
3575 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3576 {
3577 	sg->sg_segs[0].vaddr = skb->data;
3578 	sg->sg_segs[0].len   = skb->len;
3579 	sg->nsegs            = 1;
3580 }
3581 qdf_export_symbol(__qdf_nbuf_frag_info);
3582 #endif
3583 #endif
3584 /**
3585  * __qdf_nbuf_get_frag_size() - get frag size
3586  * @nbuf: sk buffer
3587  * @cur_frag: current frag
3588  *
3589  * Return: frag size
3590  */
3591 uint32_t
3592 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3593 {
3594 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3595 	const skb_frag_t *frag = sh->frags + cur_frag;
3596 
3597 	return skb_frag_size(frag);
3598 }
3599 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3600 
3601 /**
3602  * __qdf_nbuf_frag_map() - dma map frag
3603  * @osdev: os device
3604  * @nbuf: sk buff
3605  * @offset: offset
3606  * @dir: direction
3607  * @cur_frag: current fragment
3608  *
3609  * Return: QDF status
3610  */
3611 #ifdef A_SIMOS_DEVHOST
3612 QDF_STATUS __qdf_nbuf_frag_map(
3613 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3614 	int offset, qdf_dma_dir_t dir, int cur_frag)
3615 {
3616 	int32_t paddr, frag_len;
3617 
3618 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3619 	return QDF_STATUS_SUCCESS;
3620 }
3621 qdf_export_symbol(__qdf_nbuf_frag_map);
3622 #else
3623 QDF_STATUS __qdf_nbuf_frag_map(
3624 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3625 	int offset, qdf_dma_dir_t dir, int cur_frag)
3626 {
3627 	dma_addr_t paddr, frag_len;
3628 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3629 	const skb_frag_t *frag = sh->frags + cur_frag;
3630 
3631 	frag_len = skb_frag_size(frag);
3632 
3633 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3634 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3635 					__qdf_dma_dir_to_os(dir));
3636 	return dma_mapping_error(osdev->dev, paddr) ?
3637 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3638 }
3639 qdf_export_symbol(__qdf_nbuf_frag_map);
3640 #endif
3641 /**
3642  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3643  * @dmap: dma map
3644  * @cb: callback
3645  * @arg: argument
3646  *
3647  * Return: none
3648  */
3649 void
3650 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3651 {
3652 	return;
3653 }
3654 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3655 
3656 
3657 /**
3658  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3659  * @osdev: os device
3660  * @buf: sk buff
3661  * @dir: direction
3662  *
3663  * Return: none
3664  */
3665 #if defined(A_SIMOS_DEVHOST)
3666 static void __qdf_nbuf_sync_single_for_cpu(
3667 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3668 {
3669 	return;
3670 }
3671 #else
3672 static void __qdf_nbuf_sync_single_for_cpu(
3673 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3674 {
3675 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3676 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3677 		return;
3678 	}
3679 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3680 		skb_end_offset(buf) - skb_headroom(buf),
3681 		__qdf_dma_dir_to_os(dir));
3682 }
3683 #endif
3684 /**
3685  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3686  * @osdev: os device
3687  * @skb: sk buff
3688  * @dir: direction
3689  *
3690  * Return: none
3691  */
3692 void
3693 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3694 	struct sk_buff *skb, qdf_dma_dir_t dir)
3695 {
3696 	qdf_assert(
3697 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3698 
3699 	/*
3700 	 * Assume there's a single fragment.
3701 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3702 	 */
3703 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3704 }
3705 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3706 
3707 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3708 /**
3709  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3710  * @rx_status: Pointer to rx_status.
3711  * @rtap_buf: Buf to which VHT info has to be updated.
3712  * @rtap_len: Current length of radiotap buffer
3713  *
3714  * Return: Length of radiotap after VHT flags updated.
3715  */
3716 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3717 					struct mon_rx_status *rx_status,
3718 					int8_t *rtap_buf,
3719 					uint32_t rtap_len)
3720 {
3721 	uint16_t vht_flags = 0;
3722 
3723 	rtap_len = qdf_align(rtap_len, 2);
3724 
3725 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3726 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3727 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3728 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3729 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3730 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3731 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3732 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3733 	rtap_len += 2;
3734 
3735 	rtap_buf[rtap_len] |=
3736 		(rx_status->is_stbc ?
3737 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3738 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3739 		(rx_status->ldpc ?
3740 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3741 		(rx_status->beamformed ?
3742 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3743 	rtap_len += 1;
3744 	switch (rx_status->vht_flag_values2) {
3745 	case IEEE80211_RADIOTAP_VHT_BW_20:
3746 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3747 		break;
3748 	case IEEE80211_RADIOTAP_VHT_BW_40:
3749 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3750 		break;
3751 	case IEEE80211_RADIOTAP_VHT_BW_80:
3752 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3753 		break;
3754 	case IEEE80211_RADIOTAP_VHT_BW_160:
3755 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3756 		break;
3757 	}
3758 	rtap_len += 1;
3759 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3760 	rtap_len += 1;
3761 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3762 	rtap_len += 1;
3763 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3764 	rtap_len += 1;
3765 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3766 	rtap_len += 1;
3767 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3768 	rtap_len += 1;
3769 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3770 	rtap_len += 1;
3771 	put_unaligned_le16(rx_status->vht_flag_values6,
3772 			   &rtap_buf[rtap_len]);
3773 	rtap_len += 2;
3774 
3775 	return rtap_len;
3776 }
3777 
3778 /**
3779  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3780  * @rx_status: Pointer to rx_status.
3781  * @rtap_buf: buffer to which radiotap has to be updated
3782  * @rtap_len: radiotap length
3783  *
3784  * API update high-efficiency (11ax) fields in the radiotap header
3785  *
3786  * Return: length of rtap_len updated.
3787  */
3788 static unsigned int
3789 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3790 				     int8_t *rtap_buf, uint32_t rtap_len)
3791 {
3792 	/*
3793 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3794 	 * Enable all "known" HE radiotap flags for now
3795 	 */
3796 	rtap_len = qdf_align(rtap_len, 2);
3797 
3798 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3799 	rtap_len += 2;
3800 
3801 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3802 	rtap_len += 2;
3803 
3804 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3805 	rtap_len += 2;
3806 
3807 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3808 	rtap_len += 2;
3809 
3810 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3811 	rtap_len += 2;
3812 
3813 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3814 	rtap_len += 2;
3815 	qdf_info("he data %x %x %x %x %x %x",
3816 		  rx_status->he_data1,
3817 		  rx_status->he_data2, rx_status->he_data3,
3818 		  rx_status->he_data4, rx_status->he_data5,
3819 		  rx_status->he_data6);
3820 	return rtap_len;
3821 }
3822 
3823 
3824 /**
3825  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3826  * @rx_status: Pointer to rx_status.
3827  * @rtap_buf: buffer to which radiotap has to be updated
3828  * @rtap_len: radiotap length
3829  *
3830  * API update HE-MU fields in the radiotap header
3831  *
3832  * Return: length of rtap_len updated.
3833  */
3834 static unsigned int
3835 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3836 				     int8_t *rtap_buf, uint32_t rtap_len)
3837 {
3838 	rtap_len = qdf_align(rtap_len, 2);
3839 
3840 	/*
3841 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3842 	 * Enable all "known" he-mu radiotap flags for now
3843 	 */
3844 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3845 	rtap_len += 2;
3846 
3847 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3848 	rtap_len += 2;
3849 
3850 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3851 	rtap_len += 1;
3852 
3853 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3854 	rtap_len += 1;
3855 
3856 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3857 	rtap_len += 1;
3858 
3859 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3860 	rtap_len += 1;
3861 	qdf_info("he_flags %x %x he-RU %x %x %x %x",
3862 		  rx_status->he_flags1,
3863 		  rx_status->he_flags2, rx_status->he_RU[0],
3864 		  rx_status->he_RU[1], rx_status->he_RU[2],
3865 		  rx_status->he_RU[3]);
3866 
3867 	return rtap_len;
3868 }
3869 
3870 /**
3871  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3872  * @rx_status: Pointer to rx_status.
3873  * @rtap_buf: buffer to which radiotap has to be updated
3874  * @rtap_len: radiotap length
3875  *
3876  * API update he-mu-other fields in the radiotap header
3877  *
3878  * Return: length of rtap_len updated.
3879  */
3880 static unsigned int
3881 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3882 				     int8_t *rtap_buf, uint32_t rtap_len)
3883 {
3884 	rtap_len = qdf_align(rtap_len, 2);
3885 
3886 	/*
3887 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3888 	 * Enable all "known" he-mu-other radiotap flags for now
3889 	 */
3890 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3891 	rtap_len += 2;
3892 
3893 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3894 	rtap_len += 2;
3895 
3896 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3897 	rtap_len += 1;
3898 
3899 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3900 	rtap_len += 1;
3901 	qdf_info("he_per_user %x %x pos %x knwn %x",
3902 		  rx_status->he_per_user_1,
3903 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3904 		  rx_status->he_per_user_known);
3905 	return rtap_len;
3906 }
3907 
3908 
3909 /**
3910  * This is the length for radiotap, combined length
3911  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3912  * cannot be more than available headroom_sz.
3913  * increase this when we add more radiotap elements.
3914  * Number after '+' indicates maximum possible increase due to alignment
3915  */
3916 
3917 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3918 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3919 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3920 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3921 #define RADIOTAP_FIXED_HEADER_LEN 17
3922 #define RADIOTAP_HT_FLAGS_LEN 3
3923 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3924 #define RADIOTAP_VENDOR_NS_LEN \
3925 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3926 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3927 				RADIOTAP_FIXED_HEADER_LEN + \
3928 				RADIOTAP_HT_FLAGS_LEN + \
3929 				RADIOTAP_VHT_FLAGS_LEN + \
3930 				RADIOTAP_AMPDU_STATUS_LEN + \
3931 				RADIOTAP_HE_FLAGS_LEN + \
3932 				RADIOTAP_HE_MU_FLAGS_LEN + \
3933 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
3934 				RADIOTAP_VENDOR_NS_LEN)
3935 
3936 #define IEEE80211_RADIOTAP_HE 23
3937 #define IEEE80211_RADIOTAP_HE_MU	24
3938 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3939 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
3940 
3941 /**
3942  * radiotap_num_to_freq() - Get frequency from chan number
3943  * @chan_num - Input channel number
3944  *
3945  * Return - Channel frequency in Mhz
3946  */
3947 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3948 {
3949 	if (chan_num == CHANNEL_NUM_14)
3950 		return CHANNEL_FREQ_2484;
3951 	if (chan_num < CHANNEL_NUM_14)
3952 		return CHANNEL_FREQ_2407 +
3953 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3954 
3955 	if (chan_num < CHANNEL_NUM_27)
3956 		return CHANNEL_FREQ_2512 +
3957 			((chan_num - CHANNEL_NUM_15) *
3958 			 FREQ_MULTIPLIER_CONST_20MHZ);
3959 
3960 	if (chan_num > CHANNEL_NUM_182 &&
3961 			chan_num < CHANNEL_NUM_197)
3962 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3963 			CHANNEL_FREQ_4000);
3964 
3965 	return CHANNEL_FREQ_5000 +
3966 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3967 }
3968 
3969 /**
3970  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
3971  * @rx_status: Pointer to rx_status.
3972  * @rtap_buf: Buf to which AMPDU info has to be updated.
3973  * @rtap_len: Current length of radiotap buffer
3974  *
3975  * Return: Length of radiotap after AMPDU flags updated.
3976  */
3977 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
3978 					struct mon_rx_status *rx_status,
3979 					uint8_t *rtap_buf,
3980 					uint32_t rtap_len)
3981 {
3982 	/*
3983 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
3984 	 * First 32 bits of AMPDU represents the reference number
3985 	 */
3986 
3987 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
3988 	uint16_t ampdu_flags = 0;
3989 	uint16_t ampdu_reserved_flags = 0;
3990 
3991 	rtap_len = qdf_align(rtap_len, 4);
3992 
3993 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
3994 	rtap_len += 4;
3995 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
3996 	rtap_len += 2;
3997 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
3998 	rtap_len += 2;
3999 
4000 	return rtap_len;
4001 }
4002 
4003 /**
4004  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4005  * @rx_status: Pointer to rx_status.
4006  * @nbuf:      nbuf pointer to which radiotap has to be updated
4007  * @headroom_sz: Available headroom size.
4008  *
4009  * Return: length of rtap_len updated.
4010  */
4011 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4012 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4013 {
4014 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4015 	struct ieee80211_radiotap_header *rthdr =
4016 		(struct ieee80211_radiotap_header *)rtap_buf;
4017 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4018 	uint32_t rtap_len = rtap_hdr_len;
4019 	uint8_t length = rtap_len;
4020 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4021 
4022 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4023 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4024 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4025 	rtap_len += 8;
4026 
4027 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4028 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4029 
4030 	if (rx_status->rs_fcs_err)
4031 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4032 
4033 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4034 	rtap_len += 1;
4035 
4036 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4037 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4038 	    !rx_status->he_flags) {
4039 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4040 		rtap_buf[rtap_len] = rx_status->rate;
4041 	} else
4042 		rtap_buf[rtap_len] = 0;
4043 	rtap_len += 1;
4044 
4045 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4046 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4047 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4048 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4049 	rtap_len += 2;
4050 	/* Channel flags. */
4051 	if (rx_status->chan_num > CHANNEL_NUM_35)
4052 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4053 	else
4054 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4055 	if (rx_status->cck_flag)
4056 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4057 	if (rx_status->ofdm_flag)
4058 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4059 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4060 	rtap_len += 2;
4061 
4062 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4063 	 *					(dBm)
4064 	 */
4065 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4066 	/*
4067 	 * rssi_comb is int dB, need to convert it to dBm.
4068 	 * normalize value to noise floor of -96 dBm
4069 	 */
4070 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4071 	rtap_len += 1;
4072 
4073 	/* RX signal noise floor */
4074 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4075 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4076 	rtap_len += 1;
4077 
4078 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4079 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4080 	rtap_buf[rtap_len] = rx_status->nr_ant;
4081 	rtap_len += 1;
4082 
4083 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4084 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4085 		return 0;
4086 	}
4087 
4088 	if (rx_status->ht_flags) {
4089 		length = rtap_len;
4090 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4091 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4092 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4093 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4094 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4095 		rtap_len += 1;
4096 
4097 		if (rx_status->sgi)
4098 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4099 		if (rx_status->bw)
4100 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4101 		else
4102 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4103 		rtap_len += 1;
4104 
4105 		rtap_buf[rtap_len] = rx_status->mcs;
4106 		rtap_len += 1;
4107 
4108 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4109 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4110 			return 0;
4111 		}
4112 	}
4113 
4114 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4115 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4116 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4117 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4118 								rtap_buf,
4119 								rtap_len);
4120 	}
4121 
4122 	if (rx_status->vht_flags) {
4123 		length = rtap_len;
4124 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4125 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4126 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4127 								rtap_buf,
4128 								rtap_len);
4129 
4130 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4131 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4132 			return 0;
4133 		}
4134 	}
4135 
4136 	if (rx_status->he_flags) {
4137 		length = rtap_len;
4138 		/* IEEE80211_RADIOTAP_HE */
4139 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4140 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4141 								rtap_buf,
4142 								rtap_len);
4143 
4144 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4145 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4146 			return 0;
4147 		}
4148 	}
4149 
4150 	if (rx_status->he_mu_flags) {
4151 		length = rtap_len;
4152 		/* IEEE80211_RADIOTAP_HE-MU */
4153 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4154 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4155 								rtap_buf,
4156 								rtap_len);
4157 
4158 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4159 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4160 			return 0;
4161 		}
4162 	}
4163 
4164 	if (rx_status->he_mu_other_flags) {
4165 		length = rtap_len;
4166 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4167 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4168 		rtap_len =
4169 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4170 								rtap_buf,
4171 								rtap_len);
4172 
4173 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4174 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4175 			return 0;
4176 		}
4177 	}
4178 
4179 	rtap_len = qdf_align(rtap_len, 2);
4180 	/*
4181 	 * Radiotap Vendor Namespace
4182 	 */
4183 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4184 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4185 					(rtap_buf + rtap_len);
4186 	/*
4187 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4188 	 */
4189 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4190 	/*
4191 	 * Name space selector = 0
4192 	 * We only will have one namespace for now
4193 	 */
4194 	radiotap_vendor_ns_ath->hdr.selector = 0;
4195 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4196 					sizeof(*radiotap_vendor_ns_ath) -
4197 					sizeof(radiotap_vendor_ns_ath->hdr));
4198 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4199 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4200 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4201 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4202 				cpu_to_le32(rx_status->ppdu_timestamp);
4203 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4204 
4205 	rthdr->it_len = cpu_to_le16(rtap_len);
4206 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4207 
4208 	if (headroom_sz < rtap_len) {
4209 		qdf_err("ERROR: not enough space to update radiotap");
4210 		return 0;
4211 	}
4212 	qdf_nbuf_push_head(nbuf, rtap_len);
4213 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4214 	return rtap_len;
4215 }
4216 #else
4217 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4218 					struct mon_rx_status *rx_status,
4219 					int8_t *rtap_buf,
4220 					uint32_t rtap_len)
4221 {
4222 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4223 	return 0;
4224 }
4225 
4226 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4227 				      int8_t *rtap_buf, uint32_t rtap_len)
4228 {
4229 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4230 	return 0;
4231 }
4232 
4233 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4234 					struct mon_rx_status *rx_status,
4235 					uint8_t *rtap_buf,
4236 					uint32_t rtap_len)
4237 {
4238 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4239 	return 0;
4240 }
4241 
4242 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4243 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4244 {
4245 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4246 	return 0;
4247 }
4248 #endif
4249 qdf_export_symbol(qdf_nbuf_update_radiotap);
4250 
4251 /**
4252  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4253  * @cb_func_ptr: function pointer to the nbuf free callback
4254  *
4255  * This function registers a callback function for nbuf free.
4256  *
4257  * Return: none
4258  */
4259 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4260 {
4261 	nbuf_free_cb = cb_func_ptr;
4262 }
4263 
4264 /**
4265  * qdf_nbuf_classify_pkt() - classify packet
4266  * @skb - sk buff
4267  *
4268  * Return: none
4269  */
4270 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4271 {
4272 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4273 
4274 	/* check destination mac address is broadcast/multicast */
4275 	if (is_broadcast_ether_addr((uint8_t *)eh))
4276 		QDF_NBUF_CB_SET_BCAST(skb);
4277 	else if (is_multicast_ether_addr((uint8_t *)eh))
4278 		QDF_NBUF_CB_SET_MCAST(skb);
4279 
4280 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4281 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4282 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4283 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4284 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4285 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4286 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4287 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4288 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4289 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4290 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4291 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4292 }
4293 qdf_export_symbol(qdf_nbuf_classify_pkt);
4294 
4295 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4296 {
4297 	qdf_nbuf_users_set(&nbuf->users, 1);
4298 	nbuf->data = nbuf->head + NET_SKB_PAD;
4299 	skb_reset_tail_pointer(nbuf);
4300 }
4301 qdf_export_symbol(__qdf_nbuf_init);
4302 
4303 #ifdef WLAN_FEATURE_FASTPATH
4304 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4305 {
4306 	qdf_nbuf_users_set(&nbuf->users, 1);
4307 	nbuf->data = nbuf->head + NET_SKB_PAD;
4308 	skb_reset_tail_pointer(nbuf);
4309 }
4310 qdf_export_symbol(qdf_nbuf_init_fast);
4311 #endif /* WLAN_FEATURE_FASTPATH */
4312 
4313 
4314 #ifdef QDF_NBUF_GLOBAL_COUNT
4315 /**
4316  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4317  *
4318  * Return void
4319  */
4320 void __qdf_nbuf_mod_init(void)
4321 {
4322 	qdf_atomic_init(&nbuf_count);
4323 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4324 }
4325 
4326 /**
4327  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4328  *
4329  * Return void
4330  */
4331 void __qdf_nbuf_mod_exit(void)
4332 {
4333 }
4334 #endif
4335