xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define FREQ_MULTIPLIER_CONST_5MHZ 5
88 #define FREQ_MULTIPLIER_CONST_20MHZ 20
89 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
90 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
91 #define RADIOTAP_CCK_CHANNEL 0x0020
92 #define RADIOTAP_OFDM_CHANNEL 0x0040
93 
94 #ifdef CONFIG_MCL
95 #include <qdf_mc_timer.h>
96 
97 struct qdf_track_timer {
98 	qdf_mc_timer_t track_timer;
99 	qdf_atomic_t alloc_fail_cnt;
100 };
101 
102 static struct qdf_track_timer alloc_track_timer;
103 
104 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
105 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
106 #endif
107 
108 /* Packet Counter */
109 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
110 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
111 #ifdef QDF_NBUF_GLOBAL_COUNT
112 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
113 static qdf_atomic_t nbuf_count;
114 #endif
115 
116 /**
117  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
118  *
119  * Return: none
120  */
121 void qdf_nbuf_tx_desc_count_display(void)
122 {
123 	qdf_debug("Current Snapshot of the Driver:");
124 	qdf_debug("Data Packets:");
125 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
126 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
127 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
129 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
136 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
144 	qdf_debug("Mgmt Packets:");
145 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
146 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
147 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
159 }
160 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
161 
162 /**
163  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
164  * @packet_type   : packet type either mgmt/data
165  * @current_state : layer at which the packet currently present
166  *
167  * Return: none
168  */
169 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
170 			uint8_t current_state)
171 {
172 	switch (packet_type) {
173 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
174 		nbuf_tx_mgmt[current_state]++;
175 		break;
176 	case QDF_NBUF_TX_PKT_DATA_TRACK:
177 		nbuf_tx_data[current_state]++;
178 		break;
179 	default:
180 		break;
181 	}
182 }
183 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
184 
185 /**
186  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
187  *
188  * Return: none
189  */
190 void qdf_nbuf_tx_desc_count_clear(void)
191 {
192 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
193 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
194 }
195 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
196 
197 /**
198  * qdf_nbuf_set_state() - Updates the packet state
199  * @nbuf:            network buffer
200  * @current_state :  layer at which the packet currently is
201  *
202  * This function updates the packet state to the layer at which the packet
203  * currently is
204  *
205  * Return: none
206  */
207 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
208 {
209 	/*
210 	 * Only Mgmt, Data Packets are tracked. WMI messages
211 	 * such as scan commands are not tracked
212 	 */
213 	uint8_t packet_type;
214 
215 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
216 
217 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
218 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
219 		return;
220 	}
221 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
222 	qdf_nbuf_tx_desc_count_update(packet_type,
223 					current_state);
224 }
225 qdf_export_symbol(qdf_nbuf_set_state);
226 
227 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
228 /**
229  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
230  *
231  * This function starts the alloc fail replenish timer.
232  *
233  * Return: void
234  */
235 static void __qdf_nbuf_start_replenish_timer(void)
236 {
237 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
238 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
239 	    QDF_TIMER_STATE_RUNNING)
240 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
241 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
242 }
243 
244 /**
245  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
246  *
247  * This function stops the alloc fail replenish timer.
248  *
249  * Return: void
250  */
251 static void __qdf_nbuf_stop_replenish_timer(void)
252 {
253 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
254 		return;
255 
256 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
257 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
258 	    QDF_TIMER_STATE_RUNNING)
259 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
260 }
261 
262 /**
263  * qdf_replenish_expire_handler - Replenish expire handler
264  *
265  * This function triggers when the alloc fail replenish timer expires.
266  *
267  * Return: void
268  */
269 static void qdf_replenish_expire_handler(void *arg)
270 {
271 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
272 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
273 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
274 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
275 
276 		/* Error handling here */
277 	}
278 }
279 
280 /**
281  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
282  *
283  * This function initializes the nbuf alloc fail replenish timer.
284  *
285  * Return: void
286  */
287 void __qdf_nbuf_init_replenish_timer(void)
288 {
289 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
290 			  qdf_replenish_expire_handler, NULL);
291 }
292 
293 /**
294  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
295  *
296  * This function deinitializes the nbuf alloc fail replenish timer.
297  *
298  * Return: void
299  */
300 void __qdf_nbuf_deinit_replenish_timer(void)
301 {
302 	__qdf_nbuf_stop_replenish_timer();
303 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
304 }
305 #else
306 
307 static inline void __qdf_nbuf_start_replenish_timer(void) {}
308 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
309 #endif
310 
311 /* globals do not need to be initialized to NULL/0 */
312 qdf_nbuf_trace_update_t qdf_trace_update_cb;
313 qdf_nbuf_free_t nbuf_free_cb;
314 
315 #ifdef QDF_NBUF_GLOBAL_COUNT
316 
317 /**
318  * __qdf_nbuf_count_get() - get nbuf global count
319  *
320  * Return: nbuf global count
321  */
322 int __qdf_nbuf_count_get(void)
323 {
324 	return qdf_atomic_read(&nbuf_count);
325 }
326 qdf_export_symbol(__qdf_nbuf_count_get);
327 
328 /**
329  * __qdf_nbuf_count_inc() - increment nbuf global count
330  *
331  * @buf: sk buff
332  *
333  * Return: void
334  */
335 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
336 {
337 	qdf_atomic_inc(&nbuf_count);
338 }
339 qdf_export_symbol(__qdf_nbuf_count_inc);
340 
341 /**
342  * __qdf_nbuf_count_dec() - decrement nbuf global count
343  *
344  * @buf: sk buff
345  *
346  * Return: void
347  */
348 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
349 {
350 	qdf_atomic_dec(&nbuf_count);
351 }
352 qdf_export_symbol(__qdf_nbuf_count_dec);
353 #endif
354 
355 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86)
356 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
357 				 int align, int prio, const char *func,
358 				 uint32_t line)
359 {
360 	struct sk_buff *skb;
361 	unsigned long offset;
362 	uint32_t lowmem_alloc_tries = 0;
363 
364 	if (align)
365 		size += (align - 1);
366 
367 realloc:
368 	skb = dev_alloc_skb(size);
369 
370 	if (skb)
371 		goto skb_alloc;
372 
373 	skb = pld_nbuf_pre_alloc(size);
374 
375 	if (!skb) {
376 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
377 				size, func, line);
378 		return NULL;
379 	}
380 
381 skb_alloc:
382 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
383 	 * Though we are trying to reserve low memory upfront to prevent this,
384 	 * we sometimes see SKBs allocated from low memory.
385 	 */
386 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
387 		lowmem_alloc_tries++;
388 		if (lowmem_alloc_tries > 100) {
389 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
390 				     size, func, line);
391 			return NULL;
392 		} else {
393 			/* Not freeing to make sure it
394 			 * will not get allocated again
395 			 */
396 			goto realloc;
397 		}
398 	}
399 	memset(skb->cb, 0x0, sizeof(skb->cb));
400 
401 	/*
402 	 * The default is for netbuf fragments to be interpreted
403 	 * as wordstreams rather than bytestreams.
404 	 */
405 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
406 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
407 
408 	/*
409 	 * XXX:how about we reserve first then align
410 	 * Align & make sure that the tail & data are adjusted properly
411 	 */
412 
413 	if (align) {
414 		offset = ((unsigned long)skb->data) % align;
415 		if (offset)
416 			skb_reserve(skb, align - offset);
417 	}
418 
419 	/*
420 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
421 	 * pointer
422 	 */
423 	skb_reserve(skb, reserve);
424 	qdf_nbuf_count_inc(skb);
425 
426 	return skb;
427 }
428 #else
429 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
430 				 int align, int prio, const char *func,
431 				 uint32_t line)
432 {
433 	struct sk_buff *skb;
434 	unsigned long offset;
435 	int flags = GFP_KERNEL;
436 
437 	if (align)
438 		size += (align - 1);
439 
440 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
441 		flags = GFP_ATOMIC;
442 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
443 		/*
444 		 * Observed that kcompactd burns out CPU to make order-3 page.
445 		 *__netdev_alloc_skb has 4k page fallback option just in case of
446 		 * failing high order page allocation so we don't need to be
447 		 * hard. Make kcompactd rest in piece.
448 		 */
449 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
450 #endif
451 	}
452 
453 	skb = __netdev_alloc_skb(NULL, size, flags);
454 
455 	if (skb)
456 		goto skb_alloc;
457 
458 	skb = pld_nbuf_pre_alloc(size);
459 
460 	if (!skb) {
461 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
462 				size, func, line);
463 		__qdf_nbuf_start_replenish_timer();
464 		return NULL;
465 	} else {
466 		__qdf_nbuf_stop_replenish_timer();
467 	}
468 
469 skb_alloc:
470 	memset(skb->cb, 0x0, sizeof(skb->cb));
471 
472 	/*
473 	 * The default is for netbuf fragments to be interpreted
474 	 * as wordstreams rather than bytestreams.
475 	 */
476 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
477 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
478 
479 	/*
480 	 * XXX:how about we reserve first then align
481 	 * Align & make sure that the tail & data are adjusted properly
482 	 */
483 
484 	if (align) {
485 		offset = ((unsigned long)skb->data) % align;
486 		if (offset)
487 			skb_reserve(skb, align - offset);
488 	}
489 
490 	/*
491 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
492 	 * pointer
493 	 */
494 	skb_reserve(skb, reserve);
495 	qdf_nbuf_count_inc(skb);
496 
497 	return skb;
498 }
499 #endif
500 qdf_export_symbol(__qdf_nbuf_alloc);
501 
502 /**
503  * __qdf_nbuf_free() - free the nbuf its interrupt safe
504  * @skb: Pointer to network buffer
505  *
506  * Return: none
507  */
508 
509 #ifdef CONFIG_MCL
510 void __qdf_nbuf_free(struct sk_buff *skb)
511 {
512 	if (pld_nbuf_pre_alloc_free(skb))
513 		return;
514 
515 	qdf_nbuf_count_dec(skb);
516 	if (nbuf_free_cb)
517 		nbuf_free_cb(skb);
518 	else
519 		dev_kfree_skb_any(skb);
520 }
521 #else
522 void __qdf_nbuf_free(struct sk_buff *skb)
523 {
524 	if (pld_nbuf_pre_alloc_free(skb))
525 		return;
526 
527 	qdf_nbuf_count_dec(skb);
528 	dev_kfree_skb_any(skb);
529 }
530 #endif
531 
532 qdf_export_symbol(__qdf_nbuf_free);
533 
534 #ifdef NBUF_MEMORY_DEBUG
535 enum qdf_nbuf_event_type {
536 	QDF_NBUF_ALLOC,
537 	QDF_NBUF_ALLOC_CLONE,
538 	QDF_NBUF_ALLOC_COPY,
539 	QDF_NBUF_ALLOC_FAILURE,
540 	QDF_NBUF_FREE,
541 	QDF_NBUF_MAP,
542 	QDF_NBUF_UNMAP,
543 };
544 
545 struct qdf_nbuf_event {
546 	qdf_nbuf_t nbuf;
547 	char func[QDF_MEM_FUNC_NAME_SIZE];
548 	uint32_t line;
549 	enum qdf_nbuf_event_type type;
550 	uint64_t timestamp;
551 };
552 
553 #define QDF_NBUF_HISTORY_SIZE 4096
554 static qdf_atomic_t qdf_nbuf_history_index;
555 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
556 
557 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
558 {
559 	int32_t next = qdf_atomic_inc_return(index);
560 
561 	if (next == size)
562 		qdf_atomic_sub(size, index);
563 
564 	return next % size;
565 }
566 
567 static void
568 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
569 		     enum qdf_nbuf_event_type type)
570 {
571 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
572 						   QDF_NBUF_HISTORY_SIZE);
573 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
574 
575 	event->nbuf = nbuf;
576 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
577 	event->line = line;
578 	event->type = type;
579 	event->timestamp = qdf_get_log_timestamp();
580 }
581 #endif /* NBUF_MEMORY_DEBUG */
582 
583 #ifdef NBUF_MAP_UNMAP_DEBUG
584 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
585 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
586 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
587 
588 static void qdf_nbuf_map_tracking_init(void)
589 {
590 	qdf_tracker_init(&qdf_nbuf_map_tracker);
591 }
592 
593 static void qdf_nbuf_map_tracking_deinit(void)
594 {
595 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
596 }
597 
598 static QDF_STATUS
599 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
600 {
601 	QDF_STATUS status;
602 
603 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
604 	if (QDF_IS_STATUS_ERROR(status))
605 		return status;
606 
607 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
608 
609 	return QDF_STATUS_SUCCESS;
610 }
611 
612 static void
613 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
614 {
615 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
616 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
617 }
618 
619 void qdf_nbuf_map_check_for_leaks(void)
620 {
621 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
622 }
623 
624 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
625 			      qdf_nbuf_t buf,
626 			      qdf_dma_dir_t dir,
627 			      const char *func,
628 			      uint32_t line)
629 {
630 	QDF_STATUS status;
631 
632 	status = qdf_nbuf_track_map(buf, func, line);
633 	if (QDF_IS_STATUS_ERROR(status))
634 		return status;
635 
636 	status = __qdf_nbuf_map(osdev, buf, dir);
637 	if (QDF_IS_STATUS_ERROR(status))
638 		qdf_nbuf_untrack_map(buf, func, line);
639 
640 	return status;
641 }
642 
643 qdf_export_symbol(qdf_nbuf_map_debug);
644 
645 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
646 			  qdf_nbuf_t buf,
647 			  qdf_dma_dir_t dir,
648 			  const char *func,
649 			  uint32_t line)
650 {
651 	qdf_nbuf_untrack_map(buf, func, line);
652 	__qdf_nbuf_unmap_single(osdev, buf, dir);
653 }
654 
655 qdf_export_symbol(qdf_nbuf_unmap_debug);
656 
657 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
658 				     qdf_nbuf_t buf,
659 				     qdf_dma_dir_t dir,
660 				     const char *func,
661 				     uint32_t line)
662 {
663 	QDF_STATUS status;
664 
665 	status = qdf_nbuf_track_map(buf, func, line);
666 	if (QDF_IS_STATUS_ERROR(status))
667 		return status;
668 
669 	status = __qdf_nbuf_map_single(osdev, buf, dir);
670 	if (QDF_IS_STATUS_ERROR(status))
671 		qdf_nbuf_untrack_map(buf, func, line);
672 
673 	return status;
674 }
675 
676 qdf_export_symbol(qdf_nbuf_map_single_debug);
677 
678 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
679 				 qdf_nbuf_t buf,
680 				 qdf_dma_dir_t dir,
681 				 const char *func,
682 				 uint32_t line)
683 {
684 	qdf_nbuf_untrack_map(buf, func, line);
685 	__qdf_nbuf_unmap_single(osdev, buf, dir);
686 }
687 
688 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
689 
690 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
691 				     qdf_nbuf_t buf,
692 				     qdf_dma_dir_t dir,
693 				     int nbytes,
694 				     const char *func,
695 				     uint32_t line)
696 {
697 	QDF_STATUS status;
698 
699 	status = qdf_nbuf_track_map(buf, func, line);
700 	if (QDF_IS_STATUS_ERROR(status))
701 		return status;
702 
703 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
704 	if (QDF_IS_STATUS_ERROR(status))
705 		qdf_nbuf_untrack_map(buf, func, line);
706 
707 	return status;
708 }
709 
710 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
711 
712 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
713 				 qdf_nbuf_t buf,
714 				 qdf_dma_dir_t dir,
715 				 int nbytes,
716 				 const char *func,
717 				 uint32_t line)
718 {
719 	qdf_nbuf_untrack_map(buf, func, line);
720 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
721 }
722 
723 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
724 
725 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
726 					    qdf_nbuf_t buf,
727 					    qdf_dma_dir_t dir,
728 					    int nbytes,
729 					    const char *func,
730 					    uint32_t line)
731 {
732 	QDF_STATUS status;
733 
734 	status = qdf_nbuf_track_map(buf, func, line);
735 	if (QDF_IS_STATUS_ERROR(status))
736 		return status;
737 
738 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
739 	if (QDF_IS_STATUS_ERROR(status))
740 		qdf_nbuf_untrack_map(buf, func, line);
741 
742 	return status;
743 }
744 
745 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
746 
747 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
748 					qdf_nbuf_t buf,
749 					qdf_dma_dir_t dir,
750 					int nbytes,
751 					const char *func,
752 					uint32_t line)
753 {
754 	qdf_nbuf_untrack_map(buf, func, line);
755 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
756 }
757 
758 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
759 
760 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
761 					     const char *func,
762 					     uint32_t line)
763 {
764 	char map_func[QDF_TRACKER_FUNC_SIZE];
765 	uint32_t map_line;
766 
767 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
768 				&map_func, &map_line))
769 		return;
770 
771 	QDF_DEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
772 			func, line, map_func, map_line);
773 }
774 #else
775 static inline void qdf_nbuf_map_tracking_init(void)
776 {
777 }
778 
779 static inline void qdf_nbuf_map_tracking_deinit(void)
780 {
781 }
782 
783 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
784 						    const char *func,
785 						    uint32_t line)
786 {
787 }
788 #endif /* NBUF_MAP_UNMAP_DEBUG */
789 
790 /**
791  * __qdf_nbuf_map() - map a buffer to local bus address space
792  * @osdev: OS device
793  * @bmap: Bitmap
794  * @skb: Pointer to network buffer
795  * @dir: Direction
796  *
797  * Return: QDF_STATUS
798  */
799 #ifdef QDF_OS_DEBUG
800 QDF_STATUS
801 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
802 {
803 	struct skb_shared_info *sh = skb_shinfo(skb);
804 
805 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
806 			|| (dir == QDF_DMA_FROM_DEVICE));
807 
808 	/*
809 	 * Assume there's only a single fragment.
810 	 * To support multiple fragments, it would be necessary to change
811 	 * qdf_nbuf_t to be a separate object that stores meta-info
812 	 * (including the bus address for each fragment) and a pointer
813 	 * to the underlying sk_buff.
814 	 */
815 	qdf_assert(sh->nr_frags == 0);
816 
817 	return __qdf_nbuf_map_single(osdev, skb, dir);
818 }
819 qdf_export_symbol(__qdf_nbuf_map);
820 
821 #else
822 QDF_STATUS
823 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
824 {
825 	return __qdf_nbuf_map_single(osdev, skb, dir);
826 }
827 qdf_export_symbol(__qdf_nbuf_map);
828 #endif
829 /**
830  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
831  * @osdev: OS device
832  * @skb: Pointer to network buffer
833  * @dir: dma direction
834  *
835  * Return: none
836  */
837 void
838 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
839 			qdf_dma_dir_t dir)
840 {
841 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
842 		   || (dir == QDF_DMA_FROM_DEVICE));
843 
844 	/*
845 	 * Assume there's a single fragment.
846 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
847 	 */
848 	__qdf_nbuf_unmap_single(osdev, skb, dir);
849 }
850 qdf_export_symbol(__qdf_nbuf_unmap);
851 
852 /**
853  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
854  * @osdev: OS device
855  * @skb: Pointer to network buffer
856  * @dir: Direction
857  *
858  * Return: QDF_STATUS
859  */
860 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
861 QDF_STATUS
862 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
863 {
864 	qdf_dma_addr_t paddr;
865 
866 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
867 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
868 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
869 	return QDF_STATUS_SUCCESS;
870 }
871 qdf_export_symbol(__qdf_nbuf_map_single);
872 #else
873 QDF_STATUS
874 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
875 {
876 	qdf_dma_addr_t paddr;
877 
878 	/* assume that the OS only provides a single fragment */
879 	QDF_NBUF_CB_PADDR(buf) = paddr =
880 		dma_map_single(osdev->dev, buf->data,
881 				skb_end_pointer(buf) - buf->data,
882 				__qdf_dma_dir_to_os(dir));
883 	return dma_mapping_error(osdev->dev, paddr)
884 		? QDF_STATUS_E_FAILURE
885 		: QDF_STATUS_SUCCESS;
886 }
887 qdf_export_symbol(__qdf_nbuf_map_single);
888 #endif
889 /**
890  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
891  * @osdev: OS device
892  * @skb: Pointer to network buffer
893  * @dir: Direction
894  *
895  * Return: none
896  */
897 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
898 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
899 				qdf_dma_dir_t dir)
900 {
901 }
902 #else
903 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
904 					qdf_dma_dir_t dir)
905 {
906 	if (QDF_NBUF_CB_PADDR(buf))
907 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
908 			skb_end_pointer(buf) - buf->data,
909 			__qdf_dma_dir_to_os(dir));
910 }
911 #endif
912 qdf_export_symbol(__qdf_nbuf_unmap_single);
913 
914 /**
915  * __qdf_nbuf_set_rx_cksum() - set rx checksum
916  * @skb: Pointer to network buffer
917  * @cksum: Pointer to checksum value
918  *
919  * Return: QDF_STATUS
920  */
921 QDF_STATUS
922 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
923 {
924 	switch (cksum->l4_result) {
925 	case QDF_NBUF_RX_CKSUM_NONE:
926 		skb->ip_summed = CHECKSUM_NONE;
927 		break;
928 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
929 		skb->ip_summed = CHECKSUM_UNNECESSARY;
930 		break;
931 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
932 		skb->ip_summed = CHECKSUM_PARTIAL;
933 		skb->csum = cksum->val;
934 		break;
935 	default:
936 		pr_err("Unknown checksum type\n");
937 		qdf_assert(0);
938 		return QDF_STATUS_E_NOSUPPORT;
939 	}
940 	return QDF_STATUS_SUCCESS;
941 }
942 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
943 
944 /**
945  * __qdf_nbuf_get_tx_cksum() - get tx checksum
946  * @skb: Pointer to network buffer
947  *
948  * Return: TX checksum value
949  */
950 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
951 {
952 	switch (skb->ip_summed) {
953 	case CHECKSUM_NONE:
954 		return QDF_NBUF_TX_CKSUM_NONE;
955 	case CHECKSUM_PARTIAL:
956 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
957 	case CHECKSUM_COMPLETE:
958 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
959 	default:
960 		return QDF_NBUF_TX_CKSUM_NONE;
961 	}
962 }
963 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
964 
965 /**
966  * __qdf_nbuf_get_tid() - get tid
967  * @skb: Pointer to network buffer
968  *
969  * Return: tid
970  */
971 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
972 {
973 	return skb->priority;
974 }
975 qdf_export_symbol(__qdf_nbuf_get_tid);
976 
977 /**
978  * __qdf_nbuf_set_tid() - set tid
979  * @skb: Pointer to network buffer
980  *
981  * Return: none
982  */
983 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
984 {
985 	skb->priority = tid;
986 }
987 qdf_export_symbol(__qdf_nbuf_set_tid);
988 
989 /**
990  * __qdf_nbuf_set_tid() - set tid
991  * @skb: Pointer to network buffer
992  *
993  * Return: none
994  */
995 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
996 {
997 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
998 }
999 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1000 
1001 /**
1002  * __qdf_nbuf_reg_trace_cb() - register trace callback
1003  * @cb_func_ptr: Pointer to trace callback function
1004  *
1005  * Return: none
1006  */
1007 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1008 {
1009 	qdf_trace_update_cb = cb_func_ptr;
1010 }
1011 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1012 
1013 /**
1014  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1015  *              of DHCP packet.
1016  * @data: Pointer to DHCP packet data buffer
1017  *
1018  * This func. returns the subtype of DHCP packet.
1019  *
1020  * Return: subtype of the DHCP packet.
1021  */
1022 enum qdf_proto_subtype
1023 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1024 {
1025 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1026 
1027 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1028 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1029 					QDF_DHCP_OPTION53_LENGTH)) {
1030 
1031 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1032 		case QDF_DHCP_DISCOVER:
1033 			subtype = QDF_PROTO_DHCP_DISCOVER;
1034 			break;
1035 		case QDF_DHCP_REQUEST:
1036 			subtype = QDF_PROTO_DHCP_REQUEST;
1037 			break;
1038 		case QDF_DHCP_OFFER:
1039 			subtype = QDF_PROTO_DHCP_OFFER;
1040 			break;
1041 		case QDF_DHCP_ACK:
1042 			subtype = QDF_PROTO_DHCP_ACK;
1043 			break;
1044 		case QDF_DHCP_NAK:
1045 			subtype = QDF_PROTO_DHCP_NACK;
1046 			break;
1047 		case QDF_DHCP_RELEASE:
1048 			subtype = QDF_PROTO_DHCP_RELEASE;
1049 			break;
1050 		case QDF_DHCP_INFORM:
1051 			subtype = QDF_PROTO_DHCP_INFORM;
1052 			break;
1053 		case QDF_DHCP_DECLINE:
1054 			subtype = QDF_PROTO_DHCP_DECLINE;
1055 			break;
1056 		default:
1057 			break;
1058 		}
1059 	}
1060 
1061 	return subtype;
1062 }
1063 
1064 /**
1065  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1066  *            of EAPOL packet.
1067  * @data: Pointer to EAPOL packet data buffer
1068  *
1069  * This func. returns the subtype of EAPOL packet.
1070  *
1071  * Return: subtype of the EAPOL packet.
1072  */
1073 enum qdf_proto_subtype
1074 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1075 {
1076 	uint16_t eapol_key_info;
1077 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1078 	uint16_t mask;
1079 
1080 	eapol_key_info = (uint16_t)(*(uint16_t *)
1081 			(data + EAPOL_KEY_INFO_OFFSET));
1082 
1083 	mask = eapol_key_info & EAPOL_MASK;
1084 	switch (mask) {
1085 	case EAPOL_M1_BIT_MASK:
1086 		subtype = QDF_PROTO_EAPOL_M1;
1087 		break;
1088 	case EAPOL_M2_BIT_MASK:
1089 		subtype = QDF_PROTO_EAPOL_M2;
1090 		break;
1091 	case EAPOL_M3_BIT_MASK:
1092 		subtype = QDF_PROTO_EAPOL_M3;
1093 		break;
1094 	case EAPOL_M4_BIT_MASK:
1095 		subtype = QDF_PROTO_EAPOL_M4;
1096 		break;
1097 	default:
1098 		break;
1099 	}
1100 
1101 	return subtype;
1102 }
1103 
1104 /**
1105  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1106  *            of ARP packet.
1107  * @data: Pointer to ARP packet data buffer
1108  *
1109  * This func. returns the subtype of ARP packet.
1110  *
1111  * Return: subtype of the ARP packet.
1112  */
1113 enum qdf_proto_subtype
1114 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1115 {
1116 	uint16_t subtype;
1117 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1118 
1119 	subtype = (uint16_t)(*(uint16_t *)
1120 			(data + ARP_SUB_TYPE_OFFSET));
1121 
1122 	switch (QDF_SWAP_U16(subtype)) {
1123 	case ARP_REQUEST:
1124 		proto_subtype = QDF_PROTO_ARP_REQ;
1125 		break;
1126 	case ARP_RESPONSE:
1127 		proto_subtype = QDF_PROTO_ARP_RES;
1128 		break;
1129 	default:
1130 		break;
1131 	}
1132 
1133 	return proto_subtype;
1134 }
1135 
1136 /**
1137  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1138  *            of IPV4 ICMP packet.
1139  * @data: Pointer to IPV4 ICMP packet data buffer
1140  *
1141  * This func. returns the subtype of ICMP packet.
1142  *
1143  * Return: subtype of the ICMP packet.
1144  */
1145 enum qdf_proto_subtype
1146 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1147 {
1148 	uint8_t subtype;
1149 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1150 
1151 	subtype = (uint8_t)(*(uint8_t *)
1152 			(data + ICMP_SUBTYPE_OFFSET));
1153 
1154 	switch (subtype) {
1155 	case ICMP_REQUEST:
1156 		proto_subtype = QDF_PROTO_ICMP_REQ;
1157 		break;
1158 	case ICMP_RESPONSE:
1159 		proto_subtype = QDF_PROTO_ICMP_RES;
1160 		break;
1161 	default:
1162 		break;
1163 	}
1164 
1165 	return proto_subtype;
1166 }
1167 
1168 /**
1169  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1170  *            of IPV6 ICMPV6 packet.
1171  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1172  *
1173  * This func. returns the subtype of ICMPV6 packet.
1174  *
1175  * Return: subtype of the ICMPV6 packet.
1176  */
1177 enum qdf_proto_subtype
1178 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1179 {
1180 	uint8_t subtype;
1181 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1182 
1183 	subtype = (uint8_t)(*(uint8_t *)
1184 			(data + ICMPV6_SUBTYPE_OFFSET));
1185 
1186 	switch (subtype) {
1187 	case ICMPV6_REQUEST:
1188 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1189 		break;
1190 	case ICMPV6_RESPONSE:
1191 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1192 		break;
1193 	case ICMPV6_RS:
1194 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1195 		break;
1196 	case ICMPV6_RA:
1197 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1198 		break;
1199 	case ICMPV6_NS:
1200 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1201 		break;
1202 	case ICMPV6_NA:
1203 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1204 		break;
1205 	default:
1206 		break;
1207 	}
1208 
1209 	return proto_subtype;
1210 }
1211 
1212 /**
1213  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1214  *            of IPV4 packet.
1215  * @data: Pointer to IPV4 packet data buffer
1216  *
1217  * This func. returns the proto type of IPV4 packet.
1218  *
1219  * Return: proto type of IPV4 packet.
1220  */
1221 uint8_t
1222 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1223 {
1224 	uint8_t proto_type;
1225 
1226 	proto_type = (uint8_t)(*(uint8_t *)(data +
1227 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1228 	return proto_type;
1229 }
1230 
1231 /**
1232  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1233  *            of IPV6 packet.
1234  * @data: Pointer to IPV6 packet data buffer
1235  *
1236  * This func. returns the proto type of IPV6 packet.
1237  *
1238  * Return: proto type of IPV6 packet.
1239  */
1240 uint8_t
1241 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1242 {
1243 	uint8_t proto_type;
1244 
1245 	proto_type = (uint8_t)(*(uint8_t *)(data +
1246 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1247 	return proto_type;
1248 }
1249 
1250 /**
1251  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1252  * @data: Pointer to network data
1253  *
1254  * This api is for Tx packets.
1255  *
1256  * Return: true if packet is ipv4 packet
1257  *	   false otherwise
1258  */
1259 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1260 {
1261 	uint16_t ether_type;
1262 
1263 	ether_type = (uint16_t)(*(uint16_t *)(data +
1264 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1265 
1266 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1267 		return true;
1268 	else
1269 		return false;
1270 }
1271 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1272 
1273 /**
1274  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1275  * @data: Pointer to network data buffer
1276  *
1277  * This api is for ipv4 packet.
1278  *
1279  * Return: true if packet is DHCP packet
1280  *	   false otherwise
1281  */
1282 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1283 {
1284 	uint16_t sport;
1285 	uint16_t dport;
1286 
1287 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1288 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1289 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1290 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1291 					 sizeof(uint16_t)));
1292 
1293 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1294 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1295 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1296 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1297 		return true;
1298 	else
1299 		return false;
1300 }
1301 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1302 
1303 /**
1304  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1305  * @data: Pointer to network data buffer
1306  *
1307  * This api is for ipv4 packet.
1308  *
1309  * Return: true if packet is EAPOL packet
1310  *	   false otherwise.
1311  */
1312 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1313 {
1314 	uint16_t ether_type;
1315 
1316 	ether_type = (uint16_t)(*(uint16_t *)(data +
1317 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1318 
1319 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1320 		return true;
1321 	else
1322 		return false;
1323 }
1324 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1325 
1326 /**
1327  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1328  * @skb: Pointer to network buffer
1329  *
1330  * This api is for ipv4 packet.
1331  *
1332  * Return: true if packet is WAPI packet
1333  *	   false otherwise.
1334  */
1335 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1336 {
1337 	uint16_t ether_type;
1338 
1339 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1340 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1341 
1342 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1343 		return true;
1344 	else
1345 		return false;
1346 }
1347 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1348 
1349 /**
1350  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1351  * @skb: Pointer to network buffer
1352  *
1353  * This api is for ipv4 packet.
1354  *
1355  * Return: true if packet is tdls packet
1356  *	   false otherwise.
1357  */
1358 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1359 {
1360 	uint16_t ether_type;
1361 
1362 	ether_type = *(uint16_t *)(skb->data +
1363 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1364 
1365 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1366 		return true;
1367 	else
1368 		return false;
1369 }
1370 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1371 
1372 /**
1373  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1374  * @data: Pointer to network data buffer
1375  *
1376  * This api is for ipv4 packet.
1377  *
1378  * Return: true if packet is ARP packet
1379  *	   false otherwise.
1380  */
1381 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1382 {
1383 	uint16_t ether_type;
1384 
1385 	ether_type = (uint16_t)(*(uint16_t *)(data +
1386 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1387 
1388 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1389 		return true;
1390 	else
1391 		return false;
1392 }
1393 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1394 
1395 /**
1396  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1397  * @data: Pointer to network data buffer
1398  *
1399  * This api is for ipv4 packet.
1400  *
1401  * Return: true if packet is ARP request
1402  *	   false otherwise.
1403  */
1404 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1405 {
1406 	uint16_t op_code;
1407 
1408 	op_code = (uint16_t)(*(uint16_t *)(data +
1409 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1410 
1411 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1412 		return true;
1413 	return false;
1414 }
1415 
1416 /**
1417  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1418  * @data: Pointer to network data buffer
1419  *
1420  * This api is for ipv4 packet.
1421  *
1422  * Return: true if packet is ARP response
1423  *	   false otherwise.
1424  */
1425 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1426 {
1427 	uint16_t op_code;
1428 
1429 	op_code = (uint16_t)(*(uint16_t *)(data +
1430 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1431 
1432 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1433 		return true;
1434 	return false;
1435 }
1436 
1437 /**
1438  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1439  * @data: Pointer to network data buffer
1440  *
1441  * This api is for ipv4 packet.
1442  *
1443  * Return: ARP packet source IP value.
1444  */
1445 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1446 {
1447 	uint32_t src_ip;
1448 
1449 	src_ip = (uint32_t)(*(uint32_t *)(data +
1450 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1451 
1452 	return src_ip;
1453 }
1454 
1455 /**
1456  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1457  * @data: Pointer to network data buffer
1458  *
1459  * This api is for ipv4 packet.
1460  *
1461  * Return: ARP packet target IP value.
1462  */
1463 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1464 {
1465 	uint32_t tgt_ip;
1466 
1467 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1468 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1469 
1470 	return tgt_ip;
1471 }
1472 
1473 /**
1474  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1475  * @data: Pointer to network data buffer
1476  * @len: length to copy
1477  *
1478  * This api is for dns domain name
1479  *
1480  * Return: dns domain name.
1481  */
1482 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1483 {
1484 	uint8_t *domain_name;
1485 
1486 	domain_name = (uint8_t *)
1487 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1488 	return domain_name;
1489 }
1490 
1491 
1492 /**
1493  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1494  * @data: Pointer to network data buffer
1495  *
1496  * This api is for dns query packet.
1497  *
1498  * Return: true if packet is dns query packet.
1499  *	   false otherwise.
1500  */
1501 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1502 {
1503 	uint16_t op_code;
1504 	uint16_t tgt_port;
1505 
1506 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1507 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1508 	/* Standard DNS query always happen on Dest Port 53. */
1509 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1510 		op_code = (uint16_t)(*(uint16_t *)(data +
1511 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1512 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1513 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1514 			return true;
1515 	}
1516 	return false;
1517 }
1518 
1519 /**
1520  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1521  * @data: Pointer to network data buffer
1522  *
1523  * This api is for dns query response.
1524  *
1525  * Return: true if packet is dns response packet.
1526  *	   false otherwise.
1527  */
1528 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1529 {
1530 	uint16_t op_code;
1531 	uint16_t src_port;
1532 
1533 	src_port = (uint16_t)(*(uint16_t *)(data +
1534 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1535 	/* Standard DNS response always comes on Src Port 53. */
1536 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1537 		op_code = (uint16_t)(*(uint16_t *)(data +
1538 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1539 
1540 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1541 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1542 			return true;
1543 	}
1544 	return false;
1545 }
1546 
1547 /**
1548  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1549  * @data: Pointer to network data buffer
1550  *
1551  * This api is for tcp syn packet.
1552  *
1553  * Return: true if packet is tcp syn packet.
1554  *	   false otherwise.
1555  */
1556 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1557 {
1558 	uint8_t op_code;
1559 
1560 	op_code = (uint8_t)(*(uint8_t *)(data +
1561 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1562 
1563 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1564 		return true;
1565 	return false;
1566 }
1567 
1568 /**
1569  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1570  * @data: Pointer to network data buffer
1571  *
1572  * This api is for tcp syn ack packet.
1573  *
1574  * Return: true if packet is tcp syn ack packet.
1575  *	   false otherwise.
1576  */
1577 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1578 {
1579 	uint8_t op_code;
1580 
1581 	op_code = (uint8_t)(*(uint8_t *)(data +
1582 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1583 
1584 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1585 		return true;
1586 	return false;
1587 }
1588 
1589 /**
1590  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1591  * @data: Pointer to network data buffer
1592  *
1593  * This api is for tcp ack packet.
1594  *
1595  * Return: true if packet is tcp ack packet.
1596  *	   false otherwise.
1597  */
1598 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1599 {
1600 	uint8_t op_code;
1601 
1602 	op_code = (uint8_t)(*(uint8_t *)(data +
1603 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1604 
1605 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1606 		return true;
1607 	return false;
1608 }
1609 
1610 /**
1611  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1612  * @data: Pointer to network data buffer
1613  *
1614  * This api is for tcp packet.
1615  *
1616  * Return: tcp source port value.
1617  */
1618 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1619 {
1620 	uint16_t src_port;
1621 
1622 	src_port = (uint16_t)(*(uint16_t *)(data +
1623 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1624 
1625 	return src_port;
1626 }
1627 
1628 /**
1629  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1630  * @data: Pointer to network data buffer
1631  *
1632  * This api is for tcp packet.
1633  *
1634  * Return: tcp destination port value.
1635  */
1636 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1637 {
1638 	uint16_t tgt_port;
1639 
1640 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1641 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1642 
1643 	return tgt_port;
1644 }
1645 
1646 /**
1647  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1648  * @data: Pointer to network data buffer
1649  *
1650  * This api is for ipv4 req packet.
1651  *
1652  * Return: true if packet is icmpv4 request
1653  *	   false otherwise.
1654  */
1655 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1656 {
1657 	uint8_t op_code;
1658 
1659 	op_code = (uint8_t)(*(uint8_t *)(data +
1660 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1661 
1662 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1663 		return true;
1664 	return false;
1665 }
1666 
1667 /**
1668  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1669  * @data: Pointer to network data buffer
1670  *
1671  * This api is for ipv4 res packet.
1672  *
1673  * Return: true if packet is icmpv4 response
1674  *	   false otherwise.
1675  */
1676 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1677 {
1678 	uint8_t op_code;
1679 
1680 	op_code = (uint8_t)(*(uint8_t *)(data +
1681 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1682 
1683 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1684 		return true;
1685 	return false;
1686 }
1687 
1688 /**
1689  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1690  * @data: Pointer to network data buffer
1691  *
1692  * This api is for ipv4 packet.
1693  *
1694  * Return: icmpv4 packet source IP value.
1695  */
1696 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1697 {
1698 	uint32_t src_ip;
1699 
1700 	src_ip = (uint32_t)(*(uint32_t *)(data +
1701 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1702 
1703 	return src_ip;
1704 }
1705 
1706 /**
1707  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1708  * @data: Pointer to network data buffer
1709  *
1710  * This api is for ipv4 packet.
1711  *
1712  * Return: icmpv4 packet target IP value.
1713  */
1714 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1715 {
1716 	uint32_t tgt_ip;
1717 
1718 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1719 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1720 
1721 	return tgt_ip;
1722 }
1723 
1724 
1725 /**
1726  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1727  * @data: Pointer to IPV6 packet data buffer
1728  *
1729  * This func. checks whether it is a IPV6 packet or not.
1730  *
1731  * Return: TRUE if it is a IPV6 packet
1732  *         FALSE if not
1733  */
1734 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1735 {
1736 	uint16_t ether_type;
1737 
1738 	ether_type = (uint16_t)(*(uint16_t *)(data +
1739 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1740 
1741 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1742 		return true;
1743 	else
1744 		return false;
1745 }
1746 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1747 
1748 /**
1749  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1750  * @data: Pointer to network data buffer
1751  *
1752  * This api is for ipv6 packet.
1753  *
1754  * Return: true if packet is DHCP packet
1755  *	   false otherwise
1756  */
1757 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1758 {
1759 	uint16_t sport;
1760 	uint16_t dport;
1761 
1762 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1763 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1764 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1765 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1766 					sizeof(uint16_t));
1767 
1768 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1769 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1770 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1771 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1772 		return true;
1773 	else
1774 		return false;
1775 }
1776 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1777 
1778 /**
1779  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1780  * @data: Pointer to IPV4 packet data buffer
1781  *
1782  * This func. checks whether it is a IPV4 multicast packet or not.
1783  *
1784  * Return: TRUE if it is a IPV4 multicast packet
1785  *         FALSE if not
1786  */
1787 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1788 {
1789 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1790 		uint32_t *dst_addr =
1791 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1792 
1793 		/*
1794 		 * Check first word of the IPV4 address and if it is
1795 		 * equal to 0xE then it represents multicast IP.
1796 		 */
1797 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1798 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1799 			return true;
1800 		else
1801 			return false;
1802 	} else
1803 		return false;
1804 }
1805 
1806 /**
1807  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1808  * @data: Pointer to IPV6 packet data buffer
1809  *
1810  * This func. checks whether it is a IPV6 multicast packet or not.
1811  *
1812  * Return: TRUE if it is a IPV6 multicast packet
1813  *         FALSE if not
1814  */
1815 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1816 {
1817 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1818 		uint16_t *dst_addr;
1819 
1820 		dst_addr = (uint16_t *)
1821 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1822 
1823 		/*
1824 		 * Check first byte of the IP address and if it
1825 		 * 0xFF00 then it is a IPV6 mcast packet.
1826 		 */
1827 		if (*dst_addr ==
1828 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1829 			return true;
1830 		else
1831 			return false;
1832 	} else
1833 		return false;
1834 }
1835 
1836 /**
1837  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1838  * @data: Pointer to IPV4 ICMP packet data buffer
1839  *
1840  * This func. checks whether it is a ICMP packet or not.
1841  *
1842  * Return: TRUE if it is a ICMP packet
1843  *         FALSE if not
1844  */
1845 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1846 {
1847 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1848 		uint8_t pkt_type;
1849 
1850 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1851 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1852 
1853 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1854 			return true;
1855 		else
1856 			return false;
1857 	} else
1858 		return false;
1859 }
1860 
1861 /**
1862  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1863  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1864  *
1865  * This func. checks whether it is a ICMPV6 packet or not.
1866  *
1867  * Return: TRUE if it is a ICMPV6 packet
1868  *         FALSE if not
1869  */
1870 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1871 {
1872 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1873 		uint8_t pkt_type;
1874 
1875 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1876 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1877 
1878 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1879 			return true;
1880 		else
1881 			return false;
1882 	} else
1883 		return false;
1884 }
1885 
1886 /**
1887  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1888  * @data: Pointer to IPV4 UDP packet data buffer
1889  *
1890  * This func. checks whether it is a IPV4 UDP packet or not.
1891  *
1892  * Return: TRUE if it is a IPV4 UDP packet
1893  *         FALSE if not
1894  */
1895 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1896 {
1897 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1898 		uint8_t pkt_type;
1899 
1900 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1901 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1902 
1903 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1904 			return true;
1905 		else
1906 			return false;
1907 	} else
1908 		return false;
1909 }
1910 
1911 /**
1912  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1913  * @data: Pointer to IPV4 TCP packet data buffer
1914  *
1915  * This func. checks whether it is a IPV4 TCP packet or not.
1916  *
1917  * Return: TRUE if it is a IPV4 TCP packet
1918  *         FALSE if not
1919  */
1920 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1921 {
1922 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1923 		uint8_t pkt_type;
1924 
1925 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1926 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1927 
1928 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1929 			return true;
1930 		else
1931 			return false;
1932 	} else
1933 		return false;
1934 }
1935 
1936 /**
1937  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
1938  * @data: Pointer to IPV6 UDP packet data buffer
1939  *
1940  * This func. checks whether it is a IPV6 UDP packet or not.
1941  *
1942  * Return: TRUE if it is a IPV6 UDP packet
1943  *         FALSE if not
1944  */
1945 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
1946 {
1947 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1948 		uint8_t pkt_type;
1949 
1950 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1951 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1952 
1953 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1954 			return true;
1955 		else
1956 			return false;
1957 	} else
1958 		return false;
1959 }
1960 
1961 /**
1962  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
1963  * @data: Pointer to IPV6 TCP packet data buffer
1964  *
1965  * This func. checks whether it is a IPV6 TCP packet or not.
1966  *
1967  * Return: TRUE if it is a IPV6 TCP packet
1968  *         FALSE if not
1969  */
1970 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
1971 {
1972 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1973 		uint8_t pkt_type;
1974 
1975 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1976 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1977 
1978 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1979 			return true;
1980 		else
1981 			return false;
1982 	} else
1983 		return false;
1984 }
1985 
1986 /**
1987  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
1988  * @nbuf - sk buff
1989  *
1990  * Return: true if packet is broadcast
1991  *	   false otherwise
1992  */
1993 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
1994 {
1995 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
1996 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
1997 }
1998 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
1999 
2000 #ifdef NBUF_MEMORY_DEBUG
2001 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2002 
2003 /**
2004  * struct qdf_nbuf_track_t - Network buffer track structure
2005  *
2006  * @p_next: Pointer to next
2007  * @net_buf: Pointer to network buffer
2008  * @func_name: Function name
2009  * @line_num: Line number
2010  * @size: Size
2011  */
2012 struct qdf_nbuf_track_t {
2013 	struct qdf_nbuf_track_t *p_next;
2014 	qdf_nbuf_t net_buf;
2015 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2016 	uint32_t line_num;
2017 	size_t size;
2018 };
2019 
2020 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2021 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2022 
2023 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2024 static struct kmem_cache *nbuf_tracking_cache;
2025 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2026 static spinlock_t qdf_net_buf_track_free_list_lock;
2027 static uint32_t qdf_net_buf_track_free_list_count;
2028 static uint32_t qdf_net_buf_track_used_list_count;
2029 static uint32_t qdf_net_buf_track_max_used;
2030 static uint32_t qdf_net_buf_track_max_free;
2031 static uint32_t qdf_net_buf_track_max_allocated;
2032 
2033 /**
2034  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2035  *
2036  * tracks the max number of network buffers that the wlan driver was tracking
2037  * at any one time.
2038  *
2039  * Return: none
2040  */
2041 static inline void update_max_used(void)
2042 {
2043 	int sum;
2044 
2045 	if (qdf_net_buf_track_max_used <
2046 	    qdf_net_buf_track_used_list_count)
2047 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2048 	sum = qdf_net_buf_track_free_list_count +
2049 		qdf_net_buf_track_used_list_count;
2050 	if (qdf_net_buf_track_max_allocated < sum)
2051 		qdf_net_buf_track_max_allocated = sum;
2052 }
2053 
2054 /**
2055  * update_max_free() - update qdf_net_buf_track_free_list_count
2056  *
2057  * tracks the max number tracking buffers kept in the freelist.
2058  *
2059  * Return: none
2060  */
2061 static inline void update_max_free(void)
2062 {
2063 	if (qdf_net_buf_track_max_free <
2064 	    qdf_net_buf_track_free_list_count)
2065 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2066 }
2067 
2068 /**
2069  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2070  *
2071  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2072  * This function also ads fexibility to adjust the allocation and freelist
2073  * scheems.
2074  *
2075  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2076  */
2077 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2078 {
2079 	int flags = GFP_KERNEL;
2080 	unsigned long irq_flag;
2081 	QDF_NBUF_TRACK *new_node = NULL;
2082 
2083 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2084 	qdf_net_buf_track_used_list_count++;
2085 	if (qdf_net_buf_track_free_list) {
2086 		new_node = qdf_net_buf_track_free_list;
2087 		qdf_net_buf_track_free_list =
2088 			qdf_net_buf_track_free_list->p_next;
2089 		qdf_net_buf_track_free_list_count--;
2090 	}
2091 	update_max_used();
2092 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2093 
2094 	if (new_node)
2095 		return new_node;
2096 
2097 	if (in_interrupt() || irqs_disabled() || in_atomic())
2098 		flags = GFP_ATOMIC;
2099 
2100 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2101 }
2102 
2103 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2104 #define FREEQ_POOLSIZE 2048
2105 
2106 /**
2107  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2108  *
2109  * Matches calls to qdf_nbuf_track_alloc.
2110  * Either frees the tracking cookie to kernel or an internal
2111  * freelist based on the size of the freelist.
2112  *
2113  * Return: none
2114  */
2115 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2116 {
2117 	unsigned long irq_flag;
2118 
2119 	if (!node)
2120 		return;
2121 
2122 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2123 	 * only shrink the freelist if it is bigger than twice the number of
2124 	 * nbufs in use. If the driver is stalling in a consistent bursty
2125 	 * fasion, this will keep 3/4 of thee allocations from the free list
2126 	 * while also allowing the system to recover memory as less frantic
2127 	 * traffic occurs.
2128 	 */
2129 
2130 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2131 
2132 	qdf_net_buf_track_used_list_count--;
2133 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2134 	   (qdf_net_buf_track_free_list_count >
2135 	    qdf_net_buf_track_used_list_count << 1)) {
2136 		kmem_cache_free(nbuf_tracking_cache, node);
2137 	} else {
2138 		node->p_next = qdf_net_buf_track_free_list;
2139 		qdf_net_buf_track_free_list = node;
2140 		qdf_net_buf_track_free_list_count++;
2141 	}
2142 	update_max_free();
2143 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2144 }
2145 
2146 /**
2147  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2148  *
2149  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2150  * the freelist first makes it performant for the first iperf udp burst
2151  * as well as steady state.
2152  *
2153  * Return: None
2154  */
2155 static void qdf_nbuf_track_prefill(void)
2156 {
2157 	int i;
2158 	QDF_NBUF_TRACK *node, *head;
2159 
2160 	/* prepopulate the freelist */
2161 	head = NULL;
2162 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2163 		node = qdf_nbuf_track_alloc();
2164 		if (!node)
2165 			continue;
2166 		node->p_next = head;
2167 		head = node;
2168 	}
2169 	while (head) {
2170 		node = head->p_next;
2171 		qdf_nbuf_track_free(head);
2172 		head = node;
2173 	}
2174 
2175 	/* prefilled buffers should not count as used */
2176 	qdf_net_buf_track_max_used = 0;
2177 }
2178 
2179 /**
2180  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2181  *
2182  * This initializes the memory manager for the nbuf tracking cookies.  Because
2183  * these cookies are all the same size and only used in this feature, we can
2184  * use a kmem_cache to provide tracking as well as to speed up allocations.
2185  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2186  * features) a freelist is prepopulated here.
2187  *
2188  * Return: None
2189  */
2190 static void qdf_nbuf_track_memory_manager_create(void)
2191 {
2192 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2193 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2194 						sizeof(QDF_NBUF_TRACK),
2195 						0, 0, NULL);
2196 
2197 	qdf_nbuf_track_prefill();
2198 }
2199 
2200 /**
2201  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2202  *
2203  * Empty the freelist and print out usage statistics when it is no longer
2204  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2205  * any nbuf tracking cookies were leaked.
2206  *
2207  * Return: None
2208  */
2209 static void qdf_nbuf_track_memory_manager_destroy(void)
2210 {
2211 	QDF_NBUF_TRACK *node, *tmp;
2212 	unsigned long irq_flag;
2213 
2214 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2215 	node = qdf_net_buf_track_free_list;
2216 
2217 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2218 		qdf_print("%s: unexpectedly large max_used count %d",
2219 			  __func__, qdf_net_buf_track_max_used);
2220 
2221 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2222 		qdf_print("%s: %d unused trackers were allocated",
2223 			  __func__,
2224 			  qdf_net_buf_track_max_allocated -
2225 			  qdf_net_buf_track_max_used);
2226 
2227 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2228 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2229 		qdf_print("%s: check freelist shrinking functionality",
2230 			  __func__);
2231 
2232 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2233 		  "%s: %d residual freelist size",
2234 		  __func__, qdf_net_buf_track_free_list_count);
2235 
2236 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2237 		  "%s: %d max freelist size observed",
2238 		  __func__, qdf_net_buf_track_max_free);
2239 
2240 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2241 		  "%s: %d max buffers used observed",
2242 		  __func__, qdf_net_buf_track_max_used);
2243 
2244 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2245 		  "%s: %d max buffers allocated observed",
2246 		  __func__, qdf_net_buf_track_max_allocated);
2247 
2248 	while (node) {
2249 		tmp = node;
2250 		node = node->p_next;
2251 		kmem_cache_free(nbuf_tracking_cache, tmp);
2252 		qdf_net_buf_track_free_list_count--;
2253 	}
2254 
2255 	if (qdf_net_buf_track_free_list_count != 0)
2256 		qdf_info("%d unfreed tracking memory lost in freelist",
2257 			 qdf_net_buf_track_free_list_count);
2258 
2259 	if (qdf_net_buf_track_used_list_count != 0)
2260 		qdf_info("%d unfreed tracking memory still in use",
2261 			 qdf_net_buf_track_used_list_count);
2262 
2263 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2264 	kmem_cache_destroy(nbuf_tracking_cache);
2265 	qdf_net_buf_track_free_list = NULL;
2266 }
2267 
2268 /**
2269  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2270  *
2271  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2272  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2273  * WLAN driver module whose allocated SKB is freed by network stack are
2274  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2275  * reported as memory leak.
2276  *
2277  * Return: none
2278  */
2279 void qdf_net_buf_debug_init(void)
2280 {
2281 	uint32_t i;
2282 
2283 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2284 
2285 	qdf_nbuf_map_tracking_init();
2286 	qdf_nbuf_track_memory_manager_create();
2287 
2288 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2289 		gp_qdf_net_buf_track_tbl[i] = NULL;
2290 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2291 	}
2292 }
2293 qdf_export_symbol(qdf_net_buf_debug_init);
2294 
2295 /**
2296  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2297  *
2298  * Exit network buffer tracking debug functionality and log SKB memory leaks
2299  * As part of exiting the functionality, free the leaked memory and
2300  * cleanup the tracking buffers.
2301  *
2302  * Return: none
2303  */
2304 void qdf_net_buf_debug_exit(void)
2305 {
2306 	uint32_t i;
2307 	uint32_t count = 0;
2308 	unsigned long irq_flag;
2309 	QDF_NBUF_TRACK *p_node;
2310 	QDF_NBUF_TRACK *p_prev;
2311 
2312 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2313 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2314 		p_node = gp_qdf_net_buf_track_tbl[i];
2315 		while (p_node) {
2316 			p_prev = p_node;
2317 			p_node = p_node->p_next;
2318 			count++;
2319 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2320 				 p_prev->func_name, p_prev->line_num,
2321 				 p_prev->size, p_prev->net_buf);
2322 			qdf_nbuf_track_free(p_prev);
2323 		}
2324 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2325 	}
2326 
2327 	qdf_nbuf_track_memory_manager_destroy();
2328 	qdf_nbuf_map_tracking_deinit();
2329 
2330 #ifdef CONFIG_HALT_KMEMLEAK
2331 	if (count) {
2332 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2333 		QDF_BUG(0);
2334 	}
2335 #endif
2336 }
2337 qdf_export_symbol(qdf_net_buf_debug_exit);
2338 
2339 /**
2340  * qdf_net_buf_debug_hash() - hash network buffer pointer
2341  *
2342  * Return: hash value
2343  */
2344 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2345 {
2346 	uint32_t i;
2347 
2348 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2349 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2350 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2351 
2352 	return i;
2353 }
2354 
2355 /**
2356  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2357  *
2358  * Return: If skb is found in hash table then return pointer to network buffer
2359  *	else return %NULL
2360  */
2361 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2362 {
2363 	uint32_t i;
2364 	QDF_NBUF_TRACK *p_node;
2365 
2366 	i = qdf_net_buf_debug_hash(net_buf);
2367 	p_node = gp_qdf_net_buf_track_tbl[i];
2368 
2369 	while (p_node) {
2370 		if (p_node->net_buf == net_buf)
2371 			return p_node;
2372 		p_node = p_node->p_next;
2373 	}
2374 
2375 	return NULL;
2376 }
2377 
2378 /**
2379  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2380  *
2381  * Return: none
2382  */
2383 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2384 				const char *func_name, uint32_t line_num)
2385 {
2386 	uint32_t i;
2387 	unsigned long irq_flag;
2388 	QDF_NBUF_TRACK *p_node;
2389 	QDF_NBUF_TRACK *new_node;
2390 
2391 	new_node = qdf_nbuf_track_alloc();
2392 
2393 	i = qdf_net_buf_debug_hash(net_buf);
2394 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2395 
2396 	p_node = qdf_net_buf_debug_look_up(net_buf);
2397 
2398 	if (p_node) {
2399 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2400 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2401 			  net_buf, func_name, line_num);
2402 		qdf_nbuf_track_free(new_node);
2403 	} else {
2404 		p_node = new_node;
2405 		if (p_node) {
2406 			p_node->net_buf = net_buf;
2407 			qdf_str_lcopy(p_node->func_name, func_name,
2408 				      QDF_MEM_FUNC_NAME_SIZE);
2409 			p_node->line_num = line_num;
2410 			p_node->size = size;
2411 			qdf_mem_skb_inc(size);
2412 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2413 			gp_qdf_net_buf_track_tbl[i] = p_node;
2414 		} else
2415 			qdf_print(
2416 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2417 				  func_name, line_num, size);
2418 	}
2419 
2420 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2421 }
2422 qdf_export_symbol(qdf_net_buf_debug_add_node);
2423 
2424 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2425 				   uint32_t line_num)
2426 {
2427 	uint32_t i;
2428 	unsigned long irq_flag;
2429 	QDF_NBUF_TRACK *p_node;
2430 
2431 	i = qdf_net_buf_debug_hash(net_buf);
2432 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2433 
2434 	p_node = qdf_net_buf_debug_look_up(net_buf);
2435 
2436 	if (p_node) {
2437 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2438 			      QDF_MEM_FUNC_NAME_SIZE);
2439 		p_node->line_num = line_num;
2440 	}
2441 
2442 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2443 }
2444 
2445 qdf_export_symbol(qdf_net_buf_debug_update_node);
2446 
2447 /**
2448  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2449  *
2450  * Return: none
2451  */
2452 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2453 {
2454 	uint32_t i;
2455 	QDF_NBUF_TRACK *p_head;
2456 	QDF_NBUF_TRACK *p_node = NULL;
2457 	unsigned long irq_flag;
2458 	QDF_NBUF_TRACK *p_prev;
2459 
2460 	i = qdf_net_buf_debug_hash(net_buf);
2461 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2462 
2463 	p_head = gp_qdf_net_buf_track_tbl[i];
2464 
2465 	/* Unallocated SKB */
2466 	if (!p_head)
2467 		goto done;
2468 
2469 	p_node = p_head;
2470 	/* Found at head of the table */
2471 	if (p_head->net_buf == net_buf) {
2472 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2473 		goto done;
2474 	}
2475 
2476 	/* Search in collision list */
2477 	while (p_node) {
2478 		p_prev = p_node;
2479 		p_node = p_node->p_next;
2480 		if ((p_node) && (p_node->net_buf == net_buf)) {
2481 			p_prev->p_next = p_node->p_next;
2482 			break;
2483 		}
2484 	}
2485 
2486 done:
2487 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2488 
2489 	if (p_node) {
2490 		qdf_mem_skb_dec(p_node->size);
2491 		qdf_nbuf_track_free(p_node);
2492 	} else {
2493 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2494 			  net_buf);
2495 		QDF_BUG(0);
2496 	}
2497 }
2498 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2499 
2500 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2501 				   const char *func_name, uint32_t line_num)
2502 {
2503 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2504 
2505 	while (ext_list) {
2506 		/*
2507 		 * Take care to add if it is Jumbo packet connected using
2508 		 * frag_list
2509 		 */
2510 		qdf_nbuf_t next;
2511 
2512 		next = qdf_nbuf_queue_next(ext_list);
2513 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2514 		ext_list = next;
2515 	}
2516 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2517 }
2518 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2519 
2520 /**
2521  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2522  * @net_buf: Network buf holding head segment (single)
2523  *
2524  * WLAN driver module whose allocated SKB is freed by network stack are
2525  * suppose to call this API before returning SKB to network stack such
2526  * that the SKB is not reported as memory leak.
2527  *
2528  * Return: none
2529  */
2530 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2531 {
2532 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2533 
2534 	while (ext_list) {
2535 		/*
2536 		 * Take care to free if it is Jumbo packet connected using
2537 		 * frag_list
2538 		 */
2539 		qdf_nbuf_t next;
2540 
2541 		next = qdf_nbuf_queue_next(ext_list);
2542 
2543 		if (qdf_nbuf_get_users(ext_list) > 1) {
2544 			ext_list = next;
2545 			continue;
2546 		}
2547 
2548 		qdf_net_buf_debug_delete_node(ext_list);
2549 		ext_list = next;
2550 	}
2551 
2552 	if (qdf_nbuf_get_users(net_buf) > 1)
2553 		return;
2554 
2555 	qdf_net_buf_debug_delete_node(net_buf);
2556 }
2557 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2558 
2559 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2560 				int reserve, int align, int prio,
2561 				const char *func, uint32_t line)
2562 {
2563 	qdf_nbuf_t nbuf;
2564 
2565 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2566 
2567 	/* Store SKB in internal QDF tracking table */
2568 	if (qdf_likely(nbuf)) {
2569 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2570 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2571 	} else {
2572 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2573 	}
2574 
2575 	return nbuf;
2576 }
2577 qdf_export_symbol(qdf_nbuf_alloc_debug);
2578 
2579 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2580 {
2581 	if (qdf_unlikely(!nbuf))
2582 		return;
2583 
2584 	if (qdf_nbuf_get_users(nbuf) > 1)
2585 		goto free_buf;
2586 
2587 	/* Remove SKB from internal QDF tracking table */
2588 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2589 	qdf_net_buf_debug_delete_node(nbuf);
2590 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2591 
2592 free_buf:
2593 	__qdf_nbuf_free(nbuf);
2594 }
2595 qdf_export_symbol(qdf_nbuf_free_debug);
2596 
2597 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2598 {
2599 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2600 
2601 	if (qdf_unlikely(!cloned_buf))
2602 		return NULL;
2603 
2604 	/* Store SKB in internal QDF tracking table */
2605 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2606 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2607 
2608 	return cloned_buf;
2609 }
2610 qdf_export_symbol(qdf_nbuf_clone_debug);
2611 
2612 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2613 {
2614 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2615 
2616 	if (qdf_unlikely(!copied_buf))
2617 		return NULL;
2618 
2619 	/* Store SKB in internal QDF tracking table */
2620 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2621 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2622 
2623 	return copied_buf;
2624 }
2625 qdf_export_symbol(qdf_nbuf_copy_debug);
2626 
2627 #endif /* NBUF_MEMORY_DEBUG */
2628 
2629 #if defined(FEATURE_TSO)
2630 
2631 /**
2632  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2633  *
2634  * @ethproto: ethernet type of the msdu
2635  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2636  * @l2_len: L2 length for the msdu
2637  * @eit_hdr: pointer to EIT header
2638  * @eit_hdr_len: EIT header length for the msdu
2639  * @eit_hdr_dma_map_addr: dma addr for EIT header
2640  * @tcphdr: pointer to tcp header
2641  * @ipv4_csum_en: ipv4 checksum enable
2642  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2643  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2644  * @ip_id: IP id
2645  * @tcp_seq_num: TCP sequence number
2646  *
2647  * This structure holds the TSO common info that is common
2648  * across all the TCP segments of the jumbo packet.
2649  */
2650 struct qdf_tso_cmn_seg_info_t {
2651 	uint16_t ethproto;
2652 	uint16_t ip_tcp_hdr_len;
2653 	uint16_t l2_len;
2654 	uint8_t *eit_hdr;
2655 	uint32_t eit_hdr_len;
2656 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2657 	struct tcphdr *tcphdr;
2658 	uint16_t ipv4_csum_en;
2659 	uint16_t tcp_ipv4_csum_en;
2660 	uint16_t tcp_ipv6_csum_en;
2661 	uint16_t ip_id;
2662 	uint32_t tcp_seq_num;
2663 };
2664 
2665 /**
2666  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2667  * information
2668  * @osdev: qdf device handle
2669  * @skb: skb buffer
2670  * @tso_info: Parameters common to all segements
2671  *
2672  * Get the TSO information that is common across all the TCP
2673  * segments of the jumbo packet
2674  *
2675  * Return: 0 - success 1 - failure
2676  */
2677 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2678 			struct sk_buff *skb,
2679 			struct qdf_tso_cmn_seg_info_t *tso_info)
2680 {
2681 	/* Get ethernet type and ethernet header length */
2682 	tso_info->ethproto = vlan_get_protocol(skb);
2683 
2684 	/* Determine whether this is an IPv4 or IPv6 packet */
2685 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2686 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2687 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2688 
2689 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2690 		tso_info->ipv4_csum_en = 1;
2691 		tso_info->tcp_ipv4_csum_en = 1;
2692 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2693 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2694 				ipv4_hdr->protocol);
2695 			return 1;
2696 		}
2697 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2698 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2699 		tso_info->tcp_ipv6_csum_en = 1;
2700 	} else {
2701 		qdf_err("TSO: ethertype 0x%x is not supported!",
2702 			tso_info->ethproto);
2703 		return 1;
2704 	}
2705 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2706 	tso_info->tcphdr = tcp_hdr(skb);
2707 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2708 	/* get pointer to the ethernet + IP + TCP header and their length */
2709 	tso_info->eit_hdr = skb->data;
2710 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2711 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2712 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2713 							tso_info->eit_hdr,
2714 							tso_info->eit_hdr_len,
2715 							DMA_TO_DEVICE);
2716 	if (unlikely(dma_mapping_error(osdev->dev,
2717 				       tso_info->eit_hdr_dma_map_addr))) {
2718 		qdf_err("DMA mapping error!");
2719 		qdf_assert(0);
2720 		return 1;
2721 	}
2722 
2723 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2724 		/* inlcude IPv4 header length for IPV4 (total length) */
2725 		tso_info->ip_tcp_hdr_len =
2726 			tso_info->eit_hdr_len - tso_info->l2_len;
2727 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2728 		/* exclude IPv6 header length for IPv6 (payload length) */
2729 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2730 	}
2731 	/*
2732 	 * The length of the payload (application layer data) is added to
2733 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2734 	 * descriptor.
2735 	 */
2736 
2737 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2738 		tso_info->tcp_seq_num,
2739 		tso_info->eit_hdr_len,
2740 		tso_info->l2_len,
2741 		skb->len);
2742 	return 0;
2743 }
2744 
2745 
2746 /**
2747  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2748  *
2749  * @curr_seg: Segment whose contents are initialized
2750  * @tso_cmn_info: Parameters common to all segements
2751  *
2752  * Return: None
2753  */
2754 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2755 				struct qdf_tso_seg_elem_t *curr_seg,
2756 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2757 {
2758 	/* Initialize the flags to 0 */
2759 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2760 
2761 	/*
2762 	 * The following fields remain the same across all segments of
2763 	 * a jumbo packet
2764 	 */
2765 	curr_seg->seg.tso_flags.tso_enable = 1;
2766 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2767 		tso_cmn_info->ipv4_csum_en;
2768 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2769 		tso_cmn_info->tcp_ipv6_csum_en;
2770 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2771 		tso_cmn_info->tcp_ipv4_csum_en;
2772 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2773 
2774 	/* The following fields change for the segments */
2775 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2776 	tso_cmn_info->ip_id++;
2777 
2778 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2779 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2780 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2781 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2782 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2783 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2784 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2785 
2786 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2787 
2788 	/*
2789 	 * First fragment for each segment always contains the ethernet,
2790 	 * IP and TCP header
2791 	 */
2792 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2793 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2794 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2795 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2796 
2797 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2798 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2799 		   tso_cmn_info->eit_hdr_len,
2800 		   curr_seg->seg.tso_flags.tcp_seq_num,
2801 		   curr_seg->seg.total_len);
2802 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2803 }
2804 
2805 /**
2806  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2807  * into segments
2808  * @nbuf: network buffer to be segmented
2809  * @tso_info: This is the output. The information about the
2810  *           TSO segments will be populated within this.
2811  *
2812  * This function fragments a TCP jumbo packet into smaller
2813  * segments to be transmitted by the driver. It chains the TSO
2814  * segments created into a list.
2815  *
2816  * Return: number of TSO segments
2817  */
2818 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2819 		struct qdf_tso_info_t *tso_info)
2820 {
2821 	/* common across all segments */
2822 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2823 	/* segment specific */
2824 	void *tso_frag_vaddr;
2825 	qdf_dma_addr_t tso_frag_paddr = 0;
2826 	uint32_t num_seg = 0;
2827 	struct qdf_tso_seg_elem_t *curr_seg;
2828 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2829 	struct skb_frag_struct *frag = NULL;
2830 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2831 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2832 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2833 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2834 	int j = 0; /* skb fragment index */
2835 
2836 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2837 	total_num_seg = tso_info->tso_num_seg_list;
2838 	curr_seg = tso_info->tso_seg_list;
2839 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2840 
2841 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2842 						skb, &tso_cmn_info))) {
2843 		qdf_warn("TSO: error getting common segment info");
2844 		return 0;
2845 	}
2846 
2847 	/* length of the first chunk of data in the skb */
2848 	skb_frag_len = skb_headlen(skb);
2849 
2850 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2851 	/* update the remaining skb fragment length and TSO segment length */
2852 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2853 	skb_proc -= tso_cmn_info.eit_hdr_len;
2854 
2855 	/* get the address to the next tso fragment */
2856 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2857 	/* get the length of the next tso fragment */
2858 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2859 
2860 	if (tso_frag_len != 0) {
2861 		tso_frag_paddr = dma_map_single(osdev->dev,
2862 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2863 	}
2864 
2865 	if (unlikely(dma_mapping_error(osdev->dev,
2866 					tso_frag_paddr))) {
2867 		qdf_err("DMA mapping error!");
2868 		qdf_assert(0);
2869 		return 0;
2870 	}
2871 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2872 		__LINE__, skb_frag_len, tso_frag_len);
2873 	num_seg = tso_info->num_segs;
2874 	tso_info->num_segs = 0;
2875 	tso_info->is_tso = 1;
2876 
2877 	while (num_seg && curr_seg) {
2878 		int i = 1; /* tso fragment index */
2879 		uint8_t more_tso_frags = 1;
2880 
2881 		curr_seg->seg.num_frags = 0;
2882 		tso_info->num_segs++;
2883 		total_num_seg->num_seg.tso_cmn_num_seg++;
2884 
2885 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2886 						 &tso_cmn_info);
2887 
2888 		if (unlikely(skb_proc == 0))
2889 			return tso_info->num_segs;
2890 
2891 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2892 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2893 		/* frag len is added to ip_len in while loop below*/
2894 
2895 		curr_seg->seg.num_frags++;
2896 
2897 		while (more_tso_frags) {
2898 			if (tso_frag_len != 0) {
2899 				curr_seg->seg.tso_frags[i].vaddr =
2900 					tso_frag_vaddr;
2901 				curr_seg->seg.tso_frags[i].length =
2902 					tso_frag_len;
2903 				curr_seg->seg.total_len += tso_frag_len;
2904 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2905 				curr_seg->seg.num_frags++;
2906 				skb_proc = skb_proc - tso_frag_len;
2907 
2908 				/* increment the TCP sequence number */
2909 
2910 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2911 				curr_seg->seg.tso_frags[i].paddr =
2912 					tso_frag_paddr;
2913 			}
2914 
2915 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2916 					__func__, __LINE__,
2917 					i,
2918 					tso_frag_len,
2919 					curr_seg->seg.total_len,
2920 					curr_seg->seg.tso_frags[i].vaddr);
2921 
2922 			/* if there is no more data left in the skb */
2923 			if (!skb_proc)
2924 				return tso_info->num_segs;
2925 
2926 			/* get the next payload fragment information */
2927 			/* check if there are more fragments in this segment */
2928 			if (tso_frag_len < tso_seg_size) {
2929 				more_tso_frags = 1;
2930 				if (tso_frag_len != 0) {
2931 					tso_seg_size = tso_seg_size -
2932 						tso_frag_len;
2933 					i++;
2934 					if (curr_seg->seg.num_frags ==
2935 								FRAG_NUM_MAX) {
2936 						more_tso_frags = 0;
2937 						/*
2938 						 * reset i and the tso
2939 						 * payload size
2940 						 */
2941 						i = 1;
2942 						tso_seg_size =
2943 							skb_shinfo(skb)->
2944 								gso_size;
2945 					}
2946 				}
2947 			} else {
2948 				more_tso_frags = 0;
2949 				/* reset i and the tso payload size */
2950 				i = 1;
2951 				tso_seg_size = skb_shinfo(skb)->gso_size;
2952 			}
2953 
2954 			/* if the next fragment is contiguous */
2955 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
2956 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
2957 				skb_frag_len = skb_frag_len - tso_frag_len;
2958 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2959 
2960 			} else { /* the next fragment is not contiguous */
2961 				if (skb_shinfo(skb)->nr_frags == 0) {
2962 					qdf_info("TSO: nr_frags == 0!");
2963 					qdf_assert(0);
2964 					return 0;
2965 				}
2966 				if (j >= skb_shinfo(skb)->nr_frags) {
2967 					qdf_info("TSO: nr_frags %d j %d",
2968 						 skb_shinfo(skb)->nr_frags, j);
2969 					qdf_assert(0);
2970 					return 0;
2971 				}
2972 				frag = &skb_shinfo(skb)->frags[j];
2973 				skb_frag_len = skb_frag_size(frag);
2974 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2975 				tso_frag_vaddr = skb_frag_address_safe(frag);
2976 				j++;
2977 			}
2978 
2979 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
2980 				__func__, __LINE__, skb_frag_len, tso_frag_len,
2981 				tso_seg_size);
2982 
2983 			if (!(tso_frag_vaddr)) {
2984 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
2985 						__func__);
2986 				return 0;
2987 			}
2988 
2989 			tso_frag_paddr =
2990 					 dma_map_single(osdev->dev,
2991 						 tso_frag_vaddr,
2992 						 tso_frag_len,
2993 						 DMA_TO_DEVICE);
2994 			if (unlikely(dma_mapping_error(osdev->dev,
2995 							tso_frag_paddr))) {
2996 				qdf_err("DMA mapping error!");
2997 				qdf_assert(0);
2998 				return 0;
2999 			}
3000 		}
3001 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3002 				curr_seg->seg.tso_flags.tcp_seq_num);
3003 		num_seg--;
3004 		/* if TCP FIN flag was set, set it in the last segment */
3005 		if (!num_seg)
3006 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3007 
3008 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3009 		curr_seg = curr_seg->next;
3010 	}
3011 	return tso_info->num_segs;
3012 }
3013 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3014 
3015 /**
3016  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3017  *
3018  * @osdev: qdf device handle
3019  * @tso_seg: TSO segment element to be unmapped
3020  * @is_last_seg: whether this is last tso seg or not
3021  *
3022  * Return: none
3023  */
3024 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3025 			  struct qdf_tso_seg_elem_t *tso_seg,
3026 			  bool is_last_seg)
3027 {
3028 	uint32_t num_frags = 0;
3029 
3030 	if (tso_seg->seg.num_frags > 0)
3031 		num_frags = tso_seg->seg.num_frags - 1;
3032 
3033 	/*Num of frags in a tso seg cannot be less than 2 */
3034 	if (num_frags < 1) {
3035 		/*
3036 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3037 		 * this may happen when qdf_nbuf_get_tso_info failed,
3038 		 * do dma unmap for the 0th frag in this seg.
3039 		 */
3040 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3041 			goto last_seg_free_first_frag;
3042 
3043 		qdf_assert(0);
3044 		qdf_err("ERROR: num of frags in a tso segment is %d",
3045 			(num_frags + 1));
3046 		return;
3047 	}
3048 
3049 	while (num_frags) {
3050 		/*Do dma unmap the tso seg except the 0th frag */
3051 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3052 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3053 				num_frags);
3054 			qdf_assert(0);
3055 			return;
3056 		}
3057 		dma_unmap_single(osdev->dev,
3058 				 tso_seg->seg.tso_frags[num_frags].paddr,
3059 				 tso_seg->seg.tso_frags[num_frags].length,
3060 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3061 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3062 		num_frags--;
3063 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3064 	}
3065 
3066 last_seg_free_first_frag:
3067 	if (is_last_seg) {
3068 		/*Do dma unmap for the tso seg 0th frag */
3069 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3070 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3071 			qdf_assert(0);
3072 			return;
3073 		}
3074 		dma_unmap_single(osdev->dev,
3075 				 tso_seg->seg.tso_frags[0].paddr,
3076 				 tso_seg->seg.tso_frags[0].length,
3077 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3078 		tso_seg->seg.tso_frags[0].paddr = 0;
3079 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3080 	}
3081 }
3082 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3083 
3084 /**
3085  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3086  * into segments
3087  * @nbuf:   network buffer to be segmented
3088  * @tso_info:  This is the output. The information about the
3089  *      TSO segments will be populated within this.
3090  *
3091  * This function fragments a TCP jumbo packet into smaller
3092  * segments to be transmitted by the driver. It chains the TSO
3093  * segments created into a list.
3094  *
3095  * Return: 0 - success, 1 - failure
3096  */
3097 #ifndef BUILD_X86
3098 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3099 {
3100 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3101 	uint32_t remainder, num_segs = 0;
3102 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3103 	uint8_t frags_per_tso = 0;
3104 	uint32_t skb_frag_len = 0;
3105 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3106 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3107 	struct skb_frag_struct *frag = NULL;
3108 	int j = 0;
3109 	uint32_t temp_num_seg = 0;
3110 
3111 	/* length of the first chunk of data in the skb minus eit header*/
3112 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3113 
3114 	/* Calculate num of segs for skb's first chunk of data*/
3115 	remainder = skb_frag_len % tso_seg_size;
3116 	num_segs = skb_frag_len / tso_seg_size;
3117 	/**
3118 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3119 	 * In that case, one more tso seg is required to accommodate
3120 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3121 	 * then remaining data will be accomodated while doing the calculation
3122 	 * for nr_frags data. Hence, frags_per_tso++.
3123 	 */
3124 	if (remainder) {
3125 		if (!skb_nr_frags)
3126 			num_segs++;
3127 		else
3128 			frags_per_tso++;
3129 	}
3130 
3131 	while (skb_nr_frags) {
3132 		if (j >= skb_shinfo(skb)->nr_frags) {
3133 			qdf_info("TSO: nr_frags %d j %d",
3134 				 skb_shinfo(skb)->nr_frags, j);
3135 			qdf_assert(0);
3136 			return 0;
3137 		}
3138 		/**
3139 		 * Calculate the number of tso seg for nr_frags data:
3140 		 * Get the length of each frag in skb_frag_len, add to
3141 		 * remainder.Get the number of segments by dividing it to
3142 		 * tso_seg_size and calculate the new remainder.
3143 		 * Decrement the nr_frags value and keep
3144 		 * looping all the skb_fragments.
3145 		 */
3146 		frag = &skb_shinfo(skb)->frags[j];
3147 		skb_frag_len = skb_frag_size(frag);
3148 		temp_num_seg = num_segs;
3149 		remainder += skb_frag_len;
3150 		num_segs += remainder / tso_seg_size;
3151 		remainder = remainder % tso_seg_size;
3152 		skb_nr_frags--;
3153 		if (remainder) {
3154 			if (num_segs > temp_num_seg)
3155 				frags_per_tso = 0;
3156 			/**
3157 			 * increment the tso per frags whenever remainder is
3158 			 * positive. If frags_per_tso reaches the (max-1),
3159 			 * [First frags always have EIT header, therefore max-1]
3160 			 * increment the num_segs as no more data can be
3161 			 * accomodated in the curr tso seg. Reset the remainder
3162 			 * and frags per tso and keep looping.
3163 			 */
3164 			frags_per_tso++;
3165 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3166 				num_segs++;
3167 				frags_per_tso = 0;
3168 				remainder = 0;
3169 			}
3170 			/**
3171 			 * If this is the last skb frag and still remainder is
3172 			 * non-zero(frags_per_tso is not reached to the max-1)
3173 			 * then increment the num_segs to take care of the
3174 			 * remaining length.
3175 			 */
3176 			if (!skb_nr_frags && remainder) {
3177 				num_segs++;
3178 				frags_per_tso = 0;
3179 			}
3180 		} else {
3181 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3182 			frags_per_tso = 0;
3183 		}
3184 		j++;
3185 	}
3186 
3187 	return num_segs;
3188 }
3189 #else
3190 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3191 {
3192 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3193 	struct skb_frag_struct *frag = NULL;
3194 
3195 	/*
3196 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3197 	 * region which cannot be accessed by Target
3198 	 */
3199 	if (virt_to_phys(skb->data) < 0x50000040) {
3200 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3201 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3202 				virt_to_phys(skb->data));
3203 		goto fail;
3204 
3205 	}
3206 
3207 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3208 		frag = &skb_shinfo(skb)->frags[i];
3209 
3210 		if (!frag)
3211 			goto fail;
3212 
3213 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3214 			goto fail;
3215 	}
3216 
3217 
3218 	gso_size = skb_shinfo(skb)->gso_size;
3219 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3220 			+ tcp_hdrlen(skb));
3221 	while (tmp_len) {
3222 		num_segs++;
3223 		if (tmp_len > gso_size)
3224 			tmp_len -= gso_size;
3225 		else
3226 			break;
3227 	}
3228 
3229 	return num_segs;
3230 
3231 	/*
3232 	 * Do not free this frame, just do socket level accounting
3233 	 * so that this is not reused.
3234 	 */
3235 fail:
3236 	if (skb->sk)
3237 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3238 
3239 	return 0;
3240 }
3241 #endif
3242 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3243 
3244 #endif /* FEATURE_TSO */
3245 
3246 /**
3247  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3248  *
3249  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3250  *
3251  * Return: N/A
3252  */
3253 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3254 			  uint32_t *lo, uint32_t *hi)
3255 {
3256 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3257 		*lo = lower_32_bits(dmaaddr);
3258 		*hi = upper_32_bits(dmaaddr);
3259 	} else {
3260 		*lo = dmaaddr;
3261 		*hi = 0;
3262 	}
3263 }
3264 
3265 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3266 
3267 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3268 {
3269 	qdf_nbuf_users_inc(&skb->users);
3270 	return skb;
3271 }
3272 qdf_export_symbol(__qdf_nbuf_inc_users);
3273 
3274 int __qdf_nbuf_get_users(struct sk_buff *skb)
3275 {
3276 	return qdf_nbuf_users_read(&skb->users);
3277 }
3278 qdf_export_symbol(__qdf_nbuf_get_users);
3279 
3280 /**
3281  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3282  * @skb: sk_buff handle
3283  *
3284  * Return: none
3285  */
3286 
3287 void __qdf_nbuf_ref(struct sk_buff *skb)
3288 {
3289 	skb_get(skb);
3290 }
3291 qdf_export_symbol(__qdf_nbuf_ref);
3292 
3293 /**
3294  * __qdf_nbuf_shared() - Check whether the buffer is shared
3295  *  @skb: sk_buff buffer
3296  *
3297  *  Return: true if more than one person has a reference to this buffer.
3298  */
3299 int __qdf_nbuf_shared(struct sk_buff *skb)
3300 {
3301 	return skb_shared(skb);
3302 }
3303 qdf_export_symbol(__qdf_nbuf_shared);
3304 
3305 /**
3306  * __qdf_nbuf_dmamap_create() - create a DMA map.
3307  * @osdev: qdf device handle
3308  * @dmap: dma map handle
3309  *
3310  * This can later be used to map networking buffers. They :
3311  * - need space in adf_drv's software descriptor
3312  * - are typically created during adf_drv_create
3313  * - need to be created before any API(qdf_nbuf_map) that uses them
3314  *
3315  * Return: QDF STATUS
3316  */
3317 QDF_STATUS
3318 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3319 {
3320 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3321 	/*
3322 	 * driver can tell its SG capablity, it must be handled.
3323 	 * Bounce buffers if they are there
3324 	 */
3325 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3326 	if (!(*dmap))
3327 		error = QDF_STATUS_E_NOMEM;
3328 
3329 	return error;
3330 }
3331 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3332 /**
3333  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3334  * @osdev: qdf device handle
3335  * @dmap: dma map handle
3336  *
3337  * Return: none
3338  */
3339 void
3340 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3341 {
3342 	kfree(dmap);
3343 }
3344 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3345 
3346 /**
3347  * __qdf_nbuf_map_nbytes_single() - map nbytes
3348  * @osdev: os device
3349  * @buf: buffer
3350  * @dir: direction
3351  * @nbytes: number of bytes
3352  *
3353  * Return: QDF_STATUS
3354  */
3355 #ifdef A_SIMOS_DEVHOST
3356 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3357 		qdf_device_t osdev, struct sk_buff *buf,
3358 		 qdf_dma_dir_t dir, int nbytes)
3359 {
3360 	qdf_dma_addr_t paddr;
3361 
3362 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3363 	return QDF_STATUS_SUCCESS;
3364 }
3365 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3366 #else
3367 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3368 		qdf_device_t osdev, struct sk_buff *buf,
3369 		 qdf_dma_dir_t dir, int nbytes)
3370 {
3371 	qdf_dma_addr_t paddr;
3372 
3373 	/* assume that the OS only provides a single fragment */
3374 	QDF_NBUF_CB_PADDR(buf) = paddr =
3375 		dma_map_single(osdev->dev, buf->data,
3376 			nbytes, __qdf_dma_dir_to_os(dir));
3377 	return dma_mapping_error(osdev->dev, paddr) ?
3378 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3379 }
3380 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3381 #endif
3382 /**
3383  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3384  * @osdev: os device
3385  * @buf: buffer
3386  * @dir: direction
3387  * @nbytes: number of bytes
3388  *
3389  * Return: none
3390  */
3391 #if defined(A_SIMOS_DEVHOST)
3392 void
3393 __qdf_nbuf_unmap_nbytes_single(
3394 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3395 {
3396 }
3397 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3398 
3399 #else
3400 void
3401 __qdf_nbuf_unmap_nbytes_single(
3402 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3403 {
3404 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3405 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3406 		return;
3407 	}
3408 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3409 			nbytes, __qdf_dma_dir_to_os(dir));
3410 }
3411 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3412 #endif
3413 /**
3414  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3415  * @osdev: os device
3416  * @skb: skb handle
3417  * @dir: dma direction
3418  * @nbytes: number of bytes to be mapped
3419  *
3420  * Return: QDF_STATUS
3421  */
3422 #ifdef QDF_OS_DEBUG
3423 QDF_STATUS
3424 __qdf_nbuf_map_nbytes(
3425 	qdf_device_t osdev,
3426 	struct sk_buff *skb,
3427 	qdf_dma_dir_t dir,
3428 	int nbytes)
3429 {
3430 	struct skb_shared_info  *sh = skb_shinfo(skb);
3431 
3432 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3433 
3434 	/*
3435 	 * Assume there's only a single fragment.
3436 	 * To support multiple fragments, it would be necessary to change
3437 	 * adf_nbuf_t to be a separate object that stores meta-info
3438 	 * (including the bus address for each fragment) and a pointer
3439 	 * to the underlying sk_buff.
3440 	 */
3441 	qdf_assert(sh->nr_frags == 0);
3442 
3443 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3444 }
3445 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3446 #else
3447 QDF_STATUS
3448 __qdf_nbuf_map_nbytes(
3449 	qdf_device_t osdev,
3450 	struct sk_buff *skb,
3451 	qdf_dma_dir_t dir,
3452 	int nbytes)
3453 {
3454 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3455 }
3456 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3457 #endif
3458 /**
3459  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3460  * @osdev: OS device
3461  * @skb: skb handle
3462  * @dir: direction
3463  * @nbytes: number of bytes
3464  *
3465  * Return: none
3466  */
3467 void
3468 __qdf_nbuf_unmap_nbytes(
3469 	qdf_device_t osdev,
3470 	struct sk_buff *skb,
3471 	qdf_dma_dir_t dir,
3472 	int nbytes)
3473 {
3474 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3475 
3476 	/*
3477 	 * Assume there's a single fragment.
3478 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3479 	 */
3480 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3481 }
3482 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3483 
3484 /**
3485  * __qdf_nbuf_dma_map_info() - return the dma map info
3486  * @bmap: dma map
3487  * @sg: dma map info
3488  *
3489  * Return: none
3490  */
3491 void
3492 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3493 {
3494 	qdf_assert(bmap->mapped);
3495 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3496 
3497 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3498 			sizeof(struct __qdf_segment));
3499 	sg->nsegs = bmap->nsegs;
3500 }
3501 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3502 /**
3503  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3504  *			specified by the index
3505  * @skb: sk buff
3506  * @sg: scatter/gather list of all the frags
3507  *
3508  * Return: none
3509  */
3510 #if defined(__QDF_SUPPORT_FRAG_MEM)
3511 void
3512 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3513 {
3514 	qdf_assert(skb);
3515 	sg->sg_segs[0].vaddr = skb->data;
3516 	sg->sg_segs[0].len   = skb->len;
3517 	sg->nsegs            = 1;
3518 
3519 	for (int i = 1; i <= sh->nr_frags; i++) {
3520 		skb_frag_t    *f        = &sh->frags[i - 1];
3521 
3522 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3523 			f->page_offset);
3524 		sg->sg_segs[i].len      = f->size;
3525 
3526 		qdf_assert(i < QDF_MAX_SGLIST);
3527 	}
3528 	sg->nsegs += i;
3529 
3530 }
3531 qdf_export_symbol(__qdf_nbuf_frag_info);
3532 #else
3533 #ifdef QDF_OS_DEBUG
3534 void
3535 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3536 {
3537 
3538 	struct skb_shared_info  *sh = skb_shinfo(skb);
3539 
3540 	qdf_assert(skb);
3541 	sg->sg_segs[0].vaddr = skb->data;
3542 	sg->sg_segs[0].len   = skb->len;
3543 	sg->nsegs            = 1;
3544 
3545 	qdf_assert(sh->nr_frags == 0);
3546 }
3547 qdf_export_symbol(__qdf_nbuf_frag_info);
3548 #else
3549 void
3550 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3551 {
3552 	sg->sg_segs[0].vaddr = skb->data;
3553 	sg->sg_segs[0].len   = skb->len;
3554 	sg->nsegs            = 1;
3555 }
3556 qdf_export_symbol(__qdf_nbuf_frag_info);
3557 #endif
3558 #endif
3559 /**
3560  * __qdf_nbuf_get_frag_size() - get frag size
3561  * @nbuf: sk buffer
3562  * @cur_frag: current frag
3563  *
3564  * Return: frag size
3565  */
3566 uint32_t
3567 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3568 {
3569 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3570 	const skb_frag_t *frag = sh->frags + cur_frag;
3571 
3572 	return skb_frag_size(frag);
3573 }
3574 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3575 
3576 /**
3577  * __qdf_nbuf_frag_map() - dma map frag
3578  * @osdev: os device
3579  * @nbuf: sk buff
3580  * @offset: offset
3581  * @dir: direction
3582  * @cur_frag: current fragment
3583  *
3584  * Return: QDF status
3585  */
3586 #ifdef A_SIMOS_DEVHOST
3587 QDF_STATUS __qdf_nbuf_frag_map(
3588 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3589 	int offset, qdf_dma_dir_t dir, int cur_frag)
3590 {
3591 	int32_t paddr, frag_len;
3592 
3593 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3594 	return QDF_STATUS_SUCCESS;
3595 }
3596 qdf_export_symbol(__qdf_nbuf_frag_map);
3597 #else
3598 QDF_STATUS __qdf_nbuf_frag_map(
3599 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3600 	int offset, qdf_dma_dir_t dir, int cur_frag)
3601 {
3602 	dma_addr_t paddr, frag_len;
3603 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3604 	const skb_frag_t *frag = sh->frags + cur_frag;
3605 
3606 	frag_len = skb_frag_size(frag);
3607 
3608 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3609 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3610 					__qdf_dma_dir_to_os(dir));
3611 	return dma_mapping_error(osdev->dev, paddr) ?
3612 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3613 }
3614 qdf_export_symbol(__qdf_nbuf_frag_map);
3615 #endif
3616 /**
3617  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3618  * @dmap: dma map
3619  * @cb: callback
3620  * @arg: argument
3621  *
3622  * Return: none
3623  */
3624 void
3625 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3626 {
3627 	return;
3628 }
3629 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3630 
3631 
3632 /**
3633  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3634  * @osdev: os device
3635  * @buf: sk buff
3636  * @dir: direction
3637  *
3638  * Return: none
3639  */
3640 #if defined(A_SIMOS_DEVHOST)
3641 static void __qdf_nbuf_sync_single_for_cpu(
3642 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3643 {
3644 	return;
3645 }
3646 #else
3647 static void __qdf_nbuf_sync_single_for_cpu(
3648 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3649 {
3650 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3651 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3652 		return;
3653 	}
3654 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3655 		skb_end_offset(buf) - skb_headroom(buf),
3656 		__qdf_dma_dir_to_os(dir));
3657 }
3658 #endif
3659 /**
3660  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3661  * @osdev: os device
3662  * @skb: sk buff
3663  * @dir: direction
3664  *
3665  * Return: none
3666  */
3667 void
3668 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3669 	struct sk_buff *skb, qdf_dma_dir_t dir)
3670 {
3671 	qdf_assert(
3672 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3673 
3674 	/*
3675 	 * Assume there's a single fragment.
3676 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3677 	 */
3678 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3679 }
3680 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3681 
3682 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3683 /**
3684  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3685  * @rx_status: Pointer to rx_status.
3686  * @rtap_buf: Buf to which VHT info has to be updated.
3687  * @rtap_len: Current length of radiotap buffer
3688  *
3689  * Return: Length of radiotap after VHT flags updated.
3690  */
3691 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3692 					struct mon_rx_status *rx_status,
3693 					int8_t *rtap_buf,
3694 					uint32_t rtap_len)
3695 {
3696 	uint16_t vht_flags = 0;
3697 
3698 	rtap_len = qdf_align(rtap_len, 2);
3699 
3700 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3701 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3702 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3703 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3704 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3705 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3706 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3707 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3708 	rtap_len += 2;
3709 
3710 	rtap_buf[rtap_len] |=
3711 		(rx_status->is_stbc ?
3712 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3713 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3714 		(rx_status->ldpc ?
3715 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3716 		(rx_status->beamformed ?
3717 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3718 	rtap_len += 1;
3719 	switch (rx_status->vht_flag_values2) {
3720 	case IEEE80211_RADIOTAP_VHT_BW_20:
3721 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3722 		break;
3723 	case IEEE80211_RADIOTAP_VHT_BW_40:
3724 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3725 		break;
3726 	case IEEE80211_RADIOTAP_VHT_BW_80:
3727 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3728 		break;
3729 	case IEEE80211_RADIOTAP_VHT_BW_160:
3730 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3731 		break;
3732 	}
3733 	rtap_len += 1;
3734 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3735 	rtap_len += 1;
3736 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3737 	rtap_len += 1;
3738 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3739 	rtap_len += 1;
3740 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3741 	rtap_len += 1;
3742 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3743 	rtap_len += 1;
3744 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3745 	rtap_len += 1;
3746 	put_unaligned_le16(rx_status->vht_flag_values6,
3747 			   &rtap_buf[rtap_len]);
3748 	rtap_len += 2;
3749 
3750 	return rtap_len;
3751 }
3752 
3753 /**
3754  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3755  * @rx_status: Pointer to rx_status.
3756  * @rtap_buf: buffer to which radiotap has to be updated
3757  * @rtap_len: radiotap length
3758  *
3759  * API update high-efficiency (11ax) fields in the radiotap header
3760  *
3761  * Return: length of rtap_len updated.
3762  */
3763 static unsigned int
3764 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3765 				     int8_t *rtap_buf, uint32_t rtap_len)
3766 {
3767 	/*
3768 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3769 	 * Enable all "known" HE radiotap flags for now
3770 	 */
3771 	rtap_len = qdf_align(rtap_len, 2);
3772 
3773 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3774 	rtap_len += 2;
3775 
3776 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3777 	rtap_len += 2;
3778 
3779 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3780 	rtap_len += 2;
3781 
3782 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3783 	rtap_len += 2;
3784 
3785 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3786 	rtap_len += 2;
3787 
3788 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3789 	rtap_len += 2;
3790 	qdf_debug("he data %x %x %x %x %x %x",
3791 		  rx_status->he_data1,
3792 		  rx_status->he_data2, rx_status->he_data3,
3793 		  rx_status->he_data4, rx_status->he_data5,
3794 		  rx_status->he_data6);
3795 	return rtap_len;
3796 }
3797 
3798 
3799 /**
3800  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3801  * @rx_status: Pointer to rx_status.
3802  * @rtap_buf: buffer to which radiotap has to be updated
3803  * @rtap_len: radiotap length
3804  *
3805  * API update HE-MU fields in the radiotap header
3806  *
3807  * Return: length of rtap_len updated.
3808  */
3809 static unsigned int
3810 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3811 				     int8_t *rtap_buf, uint32_t rtap_len)
3812 {
3813 	rtap_len = qdf_align(rtap_len, 2);
3814 
3815 	/*
3816 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3817 	 * Enable all "known" he-mu radiotap flags for now
3818 	 */
3819 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3820 	rtap_len += 2;
3821 
3822 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3823 	rtap_len += 2;
3824 
3825 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3826 	rtap_len += 1;
3827 
3828 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3829 	rtap_len += 1;
3830 
3831 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3832 	rtap_len += 1;
3833 
3834 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3835 	rtap_len += 1;
3836 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3837 		  rx_status->he_flags1,
3838 		  rx_status->he_flags2, rx_status->he_RU[0],
3839 		  rx_status->he_RU[1], rx_status->he_RU[2],
3840 		  rx_status->he_RU[3]);
3841 
3842 	return rtap_len;
3843 }
3844 
3845 /**
3846  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3847  * @rx_status: Pointer to rx_status.
3848  * @rtap_buf: buffer to which radiotap has to be updated
3849  * @rtap_len: radiotap length
3850  *
3851  * API update he-mu-other fields in the radiotap header
3852  *
3853  * Return: length of rtap_len updated.
3854  */
3855 static unsigned int
3856 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3857 				     int8_t *rtap_buf, uint32_t rtap_len)
3858 {
3859 	rtap_len = qdf_align(rtap_len, 2);
3860 
3861 	/*
3862 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3863 	 * Enable all "known" he-mu-other radiotap flags for now
3864 	 */
3865 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3866 	rtap_len += 2;
3867 
3868 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3869 	rtap_len += 2;
3870 
3871 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3872 	rtap_len += 1;
3873 
3874 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3875 	rtap_len += 1;
3876 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3877 		  rx_status->he_per_user_1,
3878 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3879 		  rx_status->he_per_user_known);
3880 	return rtap_len;
3881 }
3882 
3883 
3884 /**
3885  * This is the length for radiotap, combined length
3886  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3887  * cannot be more than available headroom_sz.
3888  * increase this when we add more radiotap elements.
3889  * Number after '+' indicates maximum possible increase due to alignment
3890  */
3891 
3892 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3893 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3894 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3895 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3896 #define RADIOTAP_FIXED_HEADER_LEN 17
3897 #define RADIOTAP_HT_FLAGS_LEN 3
3898 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3899 #define RADIOTAP_VENDOR_NS_LEN \
3900 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3901 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3902 				RADIOTAP_FIXED_HEADER_LEN + \
3903 				RADIOTAP_HT_FLAGS_LEN + \
3904 				RADIOTAP_VHT_FLAGS_LEN + \
3905 				RADIOTAP_AMPDU_STATUS_LEN + \
3906 				RADIOTAP_HE_FLAGS_LEN + \
3907 				RADIOTAP_HE_MU_FLAGS_LEN + \
3908 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
3909 				RADIOTAP_VENDOR_NS_LEN)
3910 
3911 #define IEEE80211_RADIOTAP_HE 23
3912 #define IEEE80211_RADIOTAP_HE_MU	24
3913 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3914 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
3915 
3916 /**
3917  * radiotap_num_to_freq() - Get frequency from chan number
3918  * @chan_num - Input channel number
3919  *
3920  * Return - Channel frequency in Mhz
3921  */
3922 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3923 {
3924 	if (chan_num == CHANNEL_NUM_14)
3925 		return CHANNEL_FREQ_2484;
3926 	if (chan_num < CHANNEL_NUM_14)
3927 		return CHANNEL_FREQ_2407 +
3928 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3929 
3930 	if (chan_num < CHANNEL_NUM_27)
3931 		return CHANNEL_FREQ_2512 +
3932 			((chan_num - CHANNEL_NUM_15) *
3933 			 FREQ_MULTIPLIER_CONST_20MHZ);
3934 
3935 	if (chan_num > CHANNEL_NUM_182 &&
3936 			chan_num < CHANNEL_NUM_197)
3937 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3938 			CHANNEL_FREQ_4000);
3939 
3940 	return CHANNEL_FREQ_5000 +
3941 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3942 }
3943 
3944 /**
3945  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
3946  * @rx_status: Pointer to rx_status.
3947  * @rtap_buf: Buf to which AMPDU info has to be updated.
3948  * @rtap_len: Current length of radiotap buffer
3949  *
3950  * Return: Length of radiotap after AMPDU flags updated.
3951  */
3952 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
3953 					struct mon_rx_status *rx_status,
3954 					uint8_t *rtap_buf,
3955 					uint32_t rtap_len)
3956 {
3957 	/*
3958 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
3959 	 * First 32 bits of AMPDU represents the reference number
3960 	 */
3961 
3962 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
3963 	uint16_t ampdu_flags = 0;
3964 	uint16_t ampdu_reserved_flags = 0;
3965 
3966 	rtap_len = qdf_align(rtap_len, 4);
3967 
3968 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
3969 	rtap_len += 4;
3970 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
3971 	rtap_len += 2;
3972 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
3973 	rtap_len += 2;
3974 
3975 	return rtap_len;
3976 }
3977 
3978 /**
3979  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
3980  * @rx_status: Pointer to rx_status.
3981  * @nbuf:      nbuf pointer to which radiotap has to be updated
3982  * @headroom_sz: Available headroom size.
3983  *
3984  * Return: length of rtap_len updated.
3985  */
3986 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
3987 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
3988 {
3989 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
3990 	struct ieee80211_radiotap_header *rthdr =
3991 		(struct ieee80211_radiotap_header *)rtap_buf;
3992 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
3993 	uint32_t rtap_len = rtap_hdr_len;
3994 	uint8_t length = rtap_len;
3995 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
3996 
3997 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
3998 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
3999 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4000 	rtap_len += 8;
4001 
4002 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4003 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4004 
4005 	if (rx_status->rs_fcs_err)
4006 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4007 
4008 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4009 	rtap_len += 1;
4010 
4011 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4012 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4013 	    !rx_status->he_flags) {
4014 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4015 		rtap_buf[rtap_len] = rx_status->rate;
4016 	} else
4017 		rtap_buf[rtap_len] = 0;
4018 	rtap_len += 1;
4019 
4020 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4021 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4022 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4023 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4024 	rtap_len += 2;
4025 	/* Channel flags. */
4026 	if (rx_status->chan_num > CHANNEL_NUM_35)
4027 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4028 	else
4029 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4030 	if (rx_status->cck_flag)
4031 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4032 	if (rx_status->ofdm_flag)
4033 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4034 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4035 	rtap_len += 2;
4036 
4037 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4038 	 *					(dBm)
4039 	 */
4040 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4041 	/*
4042 	 * rssi_comb is int dB, need to convert it to dBm.
4043 	 * normalize value to noise floor of -96 dBm
4044 	 */
4045 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4046 	rtap_len += 1;
4047 
4048 	/* RX signal noise floor */
4049 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4050 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4051 	rtap_len += 1;
4052 
4053 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4054 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4055 	rtap_buf[rtap_len] = rx_status->nr_ant;
4056 	rtap_len += 1;
4057 
4058 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4059 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4060 		return 0;
4061 	}
4062 
4063 	if (rx_status->ht_flags) {
4064 		length = rtap_len;
4065 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4066 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4067 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4068 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4069 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4070 		rtap_len += 1;
4071 
4072 		if (rx_status->sgi)
4073 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4074 		if (rx_status->bw)
4075 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4076 		else
4077 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4078 		rtap_len += 1;
4079 
4080 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4081 		rtap_len += 1;
4082 
4083 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4084 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4085 			return 0;
4086 		}
4087 	}
4088 
4089 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4090 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4091 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4092 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4093 								rtap_buf,
4094 								rtap_len);
4095 	}
4096 
4097 	if (rx_status->vht_flags) {
4098 		length = rtap_len;
4099 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4100 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4101 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4102 								rtap_buf,
4103 								rtap_len);
4104 
4105 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4106 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4107 			return 0;
4108 		}
4109 	}
4110 
4111 	if (rx_status->he_flags) {
4112 		length = rtap_len;
4113 		/* IEEE80211_RADIOTAP_HE */
4114 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4115 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4116 								rtap_buf,
4117 								rtap_len);
4118 
4119 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4120 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4121 			return 0;
4122 		}
4123 	}
4124 
4125 	if (rx_status->he_mu_flags) {
4126 		length = rtap_len;
4127 		/* IEEE80211_RADIOTAP_HE-MU */
4128 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4129 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4130 								rtap_buf,
4131 								rtap_len);
4132 
4133 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4134 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4135 			return 0;
4136 		}
4137 	}
4138 
4139 	if (rx_status->he_mu_other_flags) {
4140 		length = rtap_len;
4141 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4142 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4143 		rtap_len =
4144 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4145 								rtap_buf,
4146 								rtap_len);
4147 
4148 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4149 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4150 			return 0;
4151 		}
4152 	}
4153 
4154 	rtap_len = qdf_align(rtap_len, 2);
4155 	/*
4156 	 * Radiotap Vendor Namespace
4157 	 */
4158 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4159 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4160 					(rtap_buf + rtap_len);
4161 	/*
4162 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4163 	 */
4164 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4165 	/*
4166 	 * Name space selector = 0
4167 	 * We only will have one namespace for now
4168 	 */
4169 	radiotap_vendor_ns_ath->hdr.selector = 0;
4170 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4171 					sizeof(*radiotap_vendor_ns_ath) -
4172 					sizeof(radiotap_vendor_ns_ath->hdr));
4173 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4174 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4175 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4176 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4177 				cpu_to_le32(rx_status->ppdu_timestamp);
4178 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4179 
4180 	rthdr->it_len = cpu_to_le16(rtap_len);
4181 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4182 
4183 	if (headroom_sz < rtap_len) {
4184 		qdf_err("ERROR: not enough space to update radiotap");
4185 		return 0;
4186 	}
4187 	qdf_nbuf_push_head(nbuf, rtap_len);
4188 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4189 	return rtap_len;
4190 }
4191 #else
4192 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4193 					struct mon_rx_status *rx_status,
4194 					int8_t *rtap_buf,
4195 					uint32_t rtap_len)
4196 {
4197 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4198 	return 0;
4199 }
4200 
4201 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4202 				      int8_t *rtap_buf, uint32_t rtap_len)
4203 {
4204 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4205 	return 0;
4206 }
4207 
4208 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4209 					struct mon_rx_status *rx_status,
4210 					uint8_t *rtap_buf,
4211 					uint32_t rtap_len)
4212 {
4213 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4214 	return 0;
4215 }
4216 
4217 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4218 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4219 {
4220 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4221 	return 0;
4222 }
4223 #endif
4224 qdf_export_symbol(qdf_nbuf_update_radiotap);
4225 
4226 /**
4227  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4228  * @cb_func_ptr: function pointer to the nbuf free callback
4229  *
4230  * This function registers a callback function for nbuf free.
4231  *
4232  * Return: none
4233  */
4234 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4235 {
4236 	nbuf_free_cb = cb_func_ptr;
4237 }
4238 
4239 /**
4240  * qdf_nbuf_classify_pkt() - classify packet
4241  * @skb - sk buff
4242  *
4243  * Return: none
4244  */
4245 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4246 {
4247 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4248 
4249 	/* check destination mac address is broadcast/multicast */
4250 	if (is_broadcast_ether_addr((uint8_t *)eh))
4251 		QDF_NBUF_CB_SET_BCAST(skb);
4252 	else if (is_multicast_ether_addr((uint8_t *)eh))
4253 		QDF_NBUF_CB_SET_MCAST(skb);
4254 
4255 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4256 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4257 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4258 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4259 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4260 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4261 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4262 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4263 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4264 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4265 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4266 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4267 }
4268 qdf_export_symbol(qdf_nbuf_classify_pkt);
4269 
4270 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4271 {
4272 	qdf_nbuf_users_set(&nbuf->users, 1);
4273 	nbuf->data = nbuf->head + NET_SKB_PAD;
4274 	skb_reset_tail_pointer(nbuf);
4275 }
4276 qdf_export_symbol(__qdf_nbuf_init);
4277 
4278 #ifdef WLAN_FEATURE_FASTPATH
4279 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4280 {
4281 	qdf_nbuf_users_set(&nbuf->users, 1);
4282 	nbuf->data = nbuf->head + NET_SKB_PAD;
4283 	skb_reset_tail_pointer(nbuf);
4284 }
4285 qdf_export_symbol(qdf_nbuf_init_fast);
4286 #endif /* WLAN_FEATURE_FASTPATH */
4287 
4288 
4289 #ifdef QDF_NBUF_GLOBAL_COUNT
4290 /**
4291  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4292  *
4293  * Return void
4294  */
4295 void __qdf_nbuf_mod_init(void)
4296 {
4297 	qdf_atomic_init(&nbuf_count);
4298 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4299 }
4300 
4301 /**
4302  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4303  *
4304  * Return void
4305  */
4306 void __qdf_nbuf_mod_exit(void)
4307 {
4308 }
4309 #endif
4310