xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define FREQ_MULTIPLIER_CONST_5MHZ 5
88 #define FREQ_MULTIPLIER_CONST_20MHZ 20
89 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
90 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
91 #define RADIOTAP_CCK_CHANNEL 0x0020
92 #define RADIOTAP_OFDM_CHANNEL 0x0040
93 
94 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
95 #include <qdf_mc_timer.h>
96 
97 struct qdf_track_timer {
98 	qdf_mc_timer_t track_timer;
99 	qdf_atomic_t alloc_fail_cnt;
100 };
101 
102 static struct qdf_track_timer alloc_track_timer;
103 
104 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
105 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
106 #endif
107 
108 /* Packet Counter */
109 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
110 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
111 #ifdef QDF_NBUF_GLOBAL_COUNT
112 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
113 static qdf_atomic_t nbuf_count;
114 #endif
115 
116 /**
117  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
118  *
119  * Return: none
120  */
121 void qdf_nbuf_tx_desc_count_display(void)
122 {
123 	qdf_debug("Current Snapshot of the Driver:");
124 	qdf_debug("Data Packets:");
125 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
126 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
127 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
129 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
136 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
144 	qdf_debug("Mgmt Packets:");
145 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
146 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
147 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
159 }
160 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
161 
162 /**
163  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
164  * @packet_type   : packet type either mgmt/data
165  * @current_state : layer at which the packet currently present
166  *
167  * Return: none
168  */
169 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
170 			uint8_t current_state)
171 {
172 	switch (packet_type) {
173 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
174 		nbuf_tx_mgmt[current_state]++;
175 		break;
176 	case QDF_NBUF_TX_PKT_DATA_TRACK:
177 		nbuf_tx_data[current_state]++;
178 		break;
179 	default:
180 		break;
181 	}
182 }
183 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
184 
185 /**
186  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
187  *
188  * Return: none
189  */
190 void qdf_nbuf_tx_desc_count_clear(void)
191 {
192 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
193 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
194 }
195 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
196 
197 /**
198  * qdf_nbuf_set_state() - Updates the packet state
199  * @nbuf:            network buffer
200  * @current_state :  layer at which the packet currently is
201  *
202  * This function updates the packet state to the layer at which the packet
203  * currently is
204  *
205  * Return: none
206  */
207 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
208 {
209 	/*
210 	 * Only Mgmt, Data Packets are tracked. WMI messages
211 	 * such as scan commands are not tracked
212 	 */
213 	uint8_t packet_type;
214 
215 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
216 
217 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
218 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
219 		return;
220 	}
221 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
222 	qdf_nbuf_tx_desc_count_update(packet_type,
223 					current_state);
224 }
225 qdf_export_symbol(qdf_nbuf_set_state);
226 
227 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
228 /**
229  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
230  *
231  * This function starts the alloc fail replenish timer.
232  *
233  * Return: void
234  */
235 static void __qdf_nbuf_start_replenish_timer(void)
236 {
237 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
238 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
239 	    QDF_TIMER_STATE_RUNNING)
240 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
241 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
242 }
243 
244 /**
245  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
246  *
247  * This function stops the alloc fail replenish timer.
248  *
249  * Return: void
250  */
251 static void __qdf_nbuf_stop_replenish_timer(void)
252 {
253 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
254 		return;
255 
256 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
257 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
258 	    QDF_TIMER_STATE_RUNNING)
259 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
260 }
261 
262 /**
263  * qdf_replenish_expire_handler - Replenish expire handler
264  *
265  * This function triggers when the alloc fail replenish timer expires.
266  *
267  * Return: void
268  */
269 static void qdf_replenish_expire_handler(void *arg)
270 {
271 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
272 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
273 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
274 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
275 
276 		/* Error handling here */
277 	}
278 }
279 
280 /**
281  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
282  *
283  * This function initializes the nbuf alloc fail replenish timer.
284  *
285  * Return: void
286  */
287 void __qdf_nbuf_init_replenish_timer(void)
288 {
289 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
290 			  qdf_replenish_expire_handler, NULL);
291 }
292 
293 /**
294  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
295  *
296  * This function deinitializes the nbuf alloc fail replenish timer.
297  *
298  * Return: void
299  */
300 void __qdf_nbuf_deinit_replenish_timer(void)
301 {
302 	__qdf_nbuf_stop_replenish_timer();
303 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
304 }
305 #else
306 
307 static inline void __qdf_nbuf_start_replenish_timer(void) {}
308 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
309 #endif
310 
311 /* globals do not need to be initialized to NULL/0 */
312 qdf_nbuf_trace_update_t qdf_trace_update_cb;
313 qdf_nbuf_free_t nbuf_free_cb;
314 
315 #ifdef QDF_NBUF_GLOBAL_COUNT
316 
317 /**
318  * __qdf_nbuf_count_get() - get nbuf global count
319  *
320  * Return: nbuf global count
321  */
322 int __qdf_nbuf_count_get(void)
323 {
324 	return qdf_atomic_read(&nbuf_count);
325 }
326 qdf_export_symbol(__qdf_nbuf_count_get);
327 
328 /**
329  * __qdf_nbuf_count_inc() - increment nbuf global count
330  *
331  * @buf: sk buff
332  *
333  * Return: void
334  */
335 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
336 {
337 	int num_nbuf = 1;
338 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(nbuf);
339 
340 	/* Take care to account for frag_list */
341 	while (ext_list) {
342 		++num_nbuf;
343 		ext_list = qdf_nbuf_queue_next(ext_list);
344 	}
345 
346 	qdf_atomic_add(num_nbuf, &nbuf_count);
347 }
348 qdf_export_symbol(__qdf_nbuf_count_inc);
349 
350 /**
351  * __qdf_nbuf_count_dec() - decrement nbuf global count
352  *
353  * @buf: sk buff
354  *
355  * Return: void
356  */
357 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
358 {
359 	qdf_nbuf_t ext_list;
360 	int num_nbuf;
361 
362 	if (qdf_nbuf_get_users(nbuf) > 1)
363 		return;
364 
365 	num_nbuf = 1;
366 
367 	/* Take care to account for frag_list */
368 	ext_list = qdf_nbuf_get_ext_list(nbuf);
369 	while (ext_list) {
370 		if (qdf_nbuf_get_users(ext_list) == 1)
371 			++num_nbuf;
372 		ext_list = qdf_nbuf_queue_next(ext_list);
373 	}
374 
375 	qdf_atomic_sub(num_nbuf, &nbuf_count);
376 }
377 qdf_export_symbol(__qdf_nbuf_count_dec);
378 #endif
379 
380 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) && \
381 	!defined(QCA_WIFI_QCN9000)
382 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
383 				 int align, int prio, const char *func,
384 				 uint32_t line)
385 {
386 	struct sk_buff *skb;
387 	unsigned long offset;
388 	uint32_t lowmem_alloc_tries = 0;
389 
390 	if (align)
391 		size += (align - 1);
392 
393 realloc:
394 	skb = dev_alloc_skb(size);
395 
396 	if (skb)
397 		goto skb_alloc;
398 
399 	skb = pld_nbuf_pre_alloc(size);
400 
401 	if (!skb) {
402 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
403 				size, func, line);
404 		return NULL;
405 	}
406 
407 skb_alloc:
408 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
409 	 * Though we are trying to reserve low memory upfront to prevent this,
410 	 * we sometimes see SKBs allocated from low memory.
411 	 */
412 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
413 		lowmem_alloc_tries++;
414 		if (lowmem_alloc_tries > 100) {
415 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
416 				     size, func, line);
417 			return NULL;
418 		} else {
419 			/* Not freeing to make sure it
420 			 * will not get allocated again
421 			 */
422 			goto realloc;
423 		}
424 	}
425 	memset(skb->cb, 0x0, sizeof(skb->cb));
426 
427 	/*
428 	 * The default is for netbuf fragments to be interpreted
429 	 * as wordstreams rather than bytestreams.
430 	 */
431 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
432 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
433 
434 	/*
435 	 * XXX:how about we reserve first then align
436 	 * Align & make sure that the tail & data are adjusted properly
437 	 */
438 
439 	if (align) {
440 		offset = ((unsigned long)skb->data) % align;
441 		if (offset)
442 			skb_reserve(skb, align - offset);
443 	}
444 
445 	/*
446 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
447 	 * pointer
448 	 */
449 	skb_reserve(skb, reserve);
450 	qdf_nbuf_count_inc(skb);
451 
452 	return skb;
453 }
454 #else
455 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
456 				 int align, int prio, const char *func,
457 				 uint32_t line)
458 {
459 	struct sk_buff *skb;
460 	unsigned long offset;
461 	int flags = GFP_KERNEL;
462 
463 	if (align)
464 		size += (align - 1);
465 
466 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
467 		flags = GFP_ATOMIC;
468 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
469 		/*
470 		 * Observed that kcompactd burns out CPU to make order-3 page.
471 		 *__netdev_alloc_skb has 4k page fallback option just in case of
472 		 * failing high order page allocation so we don't need to be
473 		 * hard. Make kcompactd rest in piece.
474 		 */
475 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
476 #endif
477 	}
478 
479 	skb = __netdev_alloc_skb(NULL, size, flags);
480 
481 	if (skb)
482 		goto skb_alloc;
483 
484 	skb = pld_nbuf_pre_alloc(size);
485 
486 	if (!skb) {
487 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
488 				size, func, line);
489 		__qdf_nbuf_start_replenish_timer();
490 		return NULL;
491 	} else {
492 		__qdf_nbuf_stop_replenish_timer();
493 	}
494 
495 skb_alloc:
496 	memset(skb->cb, 0x0, sizeof(skb->cb));
497 
498 	/*
499 	 * The default is for netbuf fragments to be interpreted
500 	 * as wordstreams rather than bytestreams.
501 	 */
502 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
503 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
504 
505 	/*
506 	 * XXX:how about we reserve first then align
507 	 * Align & make sure that the tail & data are adjusted properly
508 	 */
509 
510 	if (align) {
511 		offset = ((unsigned long)skb->data) % align;
512 		if (offset)
513 			skb_reserve(skb, align - offset);
514 	}
515 
516 	/*
517 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
518 	 * pointer
519 	 */
520 	skb_reserve(skb, reserve);
521 	qdf_nbuf_count_inc(skb);
522 
523 	return skb;
524 }
525 #endif
526 qdf_export_symbol(__qdf_nbuf_alloc);
527 
528 /**
529  * __qdf_nbuf_free() - free the nbuf its interrupt safe
530  * @skb: Pointer to network buffer
531  *
532  * Return: none
533  */
534 
535 void __qdf_nbuf_free(struct sk_buff *skb)
536 {
537 	if (pld_nbuf_pre_alloc_free(skb))
538 		return;
539 
540 	qdf_nbuf_count_dec(skb);
541 	if (nbuf_free_cb)
542 		nbuf_free_cb(skb);
543 	else
544 		dev_kfree_skb_any(skb);
545 }
546 
547 qdf_export_symbol(__qdf_nbuf_free);
548 
549 #ifdef NBUF_MEMORY_DEBUG
550 enum qdf_nbuf_event_type {
551 	QDF_NBUF_ALLOC,
552 	QDF_NBUF_ALLOC_CLONE,
553 	QDF_NBUF_ALLOC_COPY,
554 	QDF_NBUF_ALLOC_FAILURE,
555 	QDF_NBUF_FREE,
556 	QDF_NBUF_MAP,
557 	QDF_NBUF_UNMAP,
558 	QDF_NBUF_ALLOC_COPY_EXPAND,
559 };
560 
561 struct qdf_nbuf_event {
562 	qdf_nbuf_t nbuf;
563 	char func[QDF_MEM_FUNC_NAME_SIZE];
564 	uint32_t line;
565 	enum qdf_nbuf_event_type type;
566 	uint64_t timestamp;
567 };
568 
569 #define QDF_NBUF_HISTORY_SIZE 4096
570 static qdf_atomic_t qdf_nbuf_history_index;
571 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
572 
573 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
574 {
575 	int32_t next = qdf_atomic_inc_return(index);
576 
577 	if (next == size)
578 		qdf_atomic_sub(size, index);
579 
580 	return next % size;
581 }
582 
583 static void
584 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
585 		     enum qdf_nbuf_event_type type)
586 {
587 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
588 						   QDF_NBUF_HISTORY_SIZE);
589 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
590 
591 	event->nbuf = nbuf;
592 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
593 	event->line = line;
594 	event->type = type;
595 	event->timestamp = qdf_get_log_timestamp();
596 }
597 #endif /* NBUF_MEMORY_DEBUG */
598 
599 #ifdef NBUF_MAP_UNMAP_DEBUG
600 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
601 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
602 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
603 
604 static void qdf_nbuf_map_tracking_init(void)
605 {
606 	qdf_tracker_init(&qdf_nbuf_map_tracker);
607 }
608 
609 static void qdf_nbuf_map_tracking_deinit(void)
610 {
611 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
612 }
613 
614 static QDF_STATUS
615 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
616 {
617 	QDF_STATUS status;
618 
619 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
620 	if (QDF_IS_STATUS_ERROR(status))
621 		return status;
622 
623 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
624 
625 	return QDF_STATUS_SUCCESS;
626 }
627 
628 static void
629 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
630 {
631 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
632 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
633 }
634 
635 void qdf_nbuf_map_check_for_leaks(void)
636 {
637 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
638 }
639 
640 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
641 			      qdf_nbuf_t buf,
642 			      qdf_dma_dir_t dir,
643 			      const char *func,
644 			      uint32_t line)
645 {
646 	QDF_STATUS status;
647 
648 	status = qdf_nbuf_track_map(buf, func, line);
649 	if (QDF_IS_STATUS_ERROR(status))
650 		return status;
651 
652 	status = __qdf_nbuf_map(osdev, buf, dir);
653 	if (QDF_IS_STATUS_ERROR(status))
654 		qdf_nbuf_untrack_map(buf, func, line);
655 
656 	return status;
657 }
658 
659 qdf_export_symbol(qdf_nbuf_map_debug);
660 
661 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
662 			  qdf_nbuf_t buf,
663 			  qdf_dma_dir_t dir,
664 			  const char *func,
665 			  uint32_t line)
666 {
667 	qdf_nbuf_untrack_map(buf, func, line);
668 	__qdf_nbuf_unmap_single(osdev, buf, dir);
669 }
670 
671 qdf_export_symbol(qdf_nbuf_unmap_debug);
672 
673 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
674 				     qdf_nbuf_t buf,
675 				     qdf_dma_dir_t dir,
676 				     const char *func,
677 				     uint32_t line)
678 {
679 	QDF_STATUS status;
680 
681 	status = qdf_nbuf_track_map(buf, func, line);
682 	if (QDF_IS_STATUS_ERROR(status))
683 		return status;
684 
685 	status = __qdf_nbuf_map_single(osdev, buf, dir);
686 	if (QDF_IS_STATUS_ERROR(status))
687 		qdf_nbuf_untrack_map(buf, func, line);
688 
689 	return status;
690 }
691 
692 qdf_export_symbol(qdf_nbuf_map_single_debug);
693 
694 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
695 				 qdf_nbuf_t buf,
696 				 qdf_dma_dir_t dir,
697 				 const char *func,
698 				 uint32_t line)
699 {
700 	qdf_nbuf_untrack_map(buf, func, line);
701 	__qdf_nbuf_unmap_single(osdev, buf, dir);
702 }
703 
704 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
705 
706 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
707 				     qdf_nbuf_t buf,
708 				     qdf_dma_dir_t dir,
709 				     int nbytes,
710 				     const char *func,
711 				     uint32_t line)
712 {
713 	QDF_STATUS status;
714 
715 	status = qdf_nbuf_track_map(buf, func, line);
716 	if (QDF_IS_STATUS_ERROR(status))
717 		return status;
718 
719 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
720 	if (QDF_IS_STATUS_ERROR(status))
721 		qdf_nbuf_untrack_map(buf, func, line);
722 
723 	return status;
724 }
725 
726 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
727 
728 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
729 				 qdf_nbuf_t buf,
730 				 qdf_dma_dir_t dir,
731 				 int nbytes,
732 				 const char *func,
733 				 uint32_t line)
734 {
735 	qdf_nbuf_untrack_map(buf, func, line);
736 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
737 }
738 
739 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
740 
741 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
742 					    qdf_nbuf_t buf,
743 					    qdf_dma_dir_t dir,
744 					    int nbytes,
745 					    const char *func,
746 					    uint32_t line)
747 {
748 	QDF_STATUS status;
749 
750 	status = qdf_nbuf_track_map(buf, func, line);
751 	if (QDF_IS_STATUS_ERROR(status))
752 		return status;
753 
754 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
755 	if (QDF_IS_STATUS_ERROR(status))
756 		qdf_nbuf_untrack_map(buf, func, line);
757 
758 	return status;
759 }
760 
761 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
762 
763 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
764 					qdf_nbuf_t buf,
765 					qdf_dma_dir_t dir,
766 					int nbytes,
767 					const char *func,
768 					uint32_t line)
769 {
770 	qdf_nbuf_untrack_map(buf, func, line);
771 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
772 }
773 
774 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
775 
776 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
777 					     const char *func,
778 					     uint32_t line)
779 {
780 	char map_func[QDF_TRACKER_FUNC_SIZE];
781 	uint32_t map_line;
782 
783 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
784 				&map_func, &map_line))
785 		return;
786 
787 	QDF_DEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
788 			func, line, map_func, map_line);
789 }
790 #else
791 static inline void qdf_nbuf_map_tracking_init(void)
792 {
793 }
794 
795 static inline void qdf_nbuf_map_tracking_deinit(void)
796 {
797 }
798 
799 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
800 						    const char *func,
801 						    uint32_t line)
802 {
803 }
804 #endif /* NBUF_MAP_UNMAP_DEBUG */
805 
806 /**
807  * __qdf_nbuf_map() - map a buffer to local bus address space
808  * @osdev: OS device
809  * @bmap: Bitmap
810  * @skb: Pointer to network buffer
811  * @dir: Direction
812  *
813  * Return: QDF_STATUS
814  */
815 #ifdef QDF_OS_DEBUG
816 QDF_STATUS
817 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
818 {
819 	struct skb_shared_info *sh = skb_shinfo(skb);
820 
821 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
822 			|| (dir == QDF_DMA_FROM_DEVICE));
823 
824 	/*
825 	 * Assume there's only a single fragment.
826 	 * To support multiple fragments, it would be necessary to change
827 	 * qdf_nbuf_t to be a separate object that stores meta-info
828 	 * (including the bus address for each fragment) and a pointer
829 	 * to the underlying sk_buff.
830 	 */
831 	qdf_assert(sh->nr_frags == 0);
832 
833 	return __qdf_nbuf_map_single(osdev, skb, dir);
834 }
835 qdf_export_symbol(__qdf_nbuf_map);
836 
837 #else
838 QDF_STATUS
839 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
840 {
841 	return __qdf_nbuf_map_single(osdev, skb, dir);
842 }
843 qdf_export_symbol(__qdf_nbuf_map);
844 #endif
845 /**
846  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
847  * @osdev: OS device
848  * @skb: Pointer to network buffer
849  * @dir: dma direction
850  *
851  * Return: none
852  */
853 void
854 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
855 			qdf_dma_dir_t dir)
856 {
857 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
858 		   || (dir == QDF_DMA_FROM_DEVICE));
859 
860 	/*
861 	 * Assume there's a single fragment.
862 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
863 	 */
864 	__qdf_nbuf_unmap_single(osdev, skb, dir);
865 }
866 qdf_export_symbol(__qdf_nbuf_unmap);
867 
868 /**
869  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
870  * @osdev: OS device
871  * @skb: Pointer to network buffer
872  * @dir: Direction
873  *
874  * Return: QDF_STATUS
875  */
876 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
877 QDF_STATUS
878 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
879 {
880 	qdf_dma_addr_t paddr;
881 
882 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
883 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
884 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
885 	return QDF_STATUS_SUCCESS;
886 }
887 qdf_export_symbol(__qdf_nbuf_map_single);
888 #else
889 QDF_STATUS
890 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
891 {
892 	qdf_dma_addr_t paddr;
893 
894 	/* assume that the OS only provides a single fragment */
895 	QDF_NBUF_CB_PADDR(buf) = paddr =
896 		dma_map_single(osdev->dev, buf->data,
897 				skb_end_pointer(buf) - buf->data,
898 				__qdf_dma_dir_to_os(dir));
899 	return dma_mapping_error(osdev->dev, paddr)
900 		? QDF_STATUS_E_FAILURE
901 		: QDF_STATUS_SUCCESS;
902 }
903 qdf_export_symbol(__qdf_nbuf_map_single);
904 #endif
905 /**
906  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
907  * @osdev: OS device
908  * @skb: Pointer to network buffer
909  * @dir: Direction
910  *
911  * Return: none
912  */
913 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
914 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
915 				qdf_dma_dir_t dir)
916 {
917 }
918 #else
919 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
920 					qdf_dma_dir_t dir)
921 {
922 	if (QDF_NBUF_CB_PADDR(buf))
923 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
924 			skb_end_pointer(buf) - buf->data,
925 			__qdf_dma_dir_to_os(dir));
926 }
927 #endif
928 qdf_export_symbol(__qdf_nbuf_unmap_single);
929 
930 /**
931  * __qdf_nbuf_set_rx_cksum() - set rx checksum
932  * @skb: Pointer to network buffer
933  * @cksum: Pointer to checksum value
934  *
935  * Return: QDF_STATUS
936  */
937 QDF_STATUS
938 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
939 {
940 	switch (cksum->l4_result) {
941 	case QDF_NBUF_RX_CKSUM_NONE:
942 		skb->ip_summed = CHECKSUM_NONE;
943 		break;
944 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
945 		skb->ip_summed = CHECKSUM_UNNECESSARY;
946 		break;
947 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
948 		skb->ip_summed = CHECKSUM_PARTIAL;
949 		skb->csum = cksum->val;
950 		break;
951 	default:
952 		pr_err("Unknown checksum type\n");
953 		qdf_assert(0);
954 		return QDF_STATUS_E_NOSUPPORT;
955 	}
956 	return QDF_STATUS_SUCCESS;
957 }
958 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
959 
960 /**
961  * __qdf_nbuf_get_tx_cksum() - get tx checksum
962  * @skb: Pointer to network buffer
963  *
964  * Return: TX checksum value
965  */
966 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
967 {
968 	switch (skb->ip_summed) {
969 	case CHECKSUM_NONE:
970 		return QDF_NBUF_TX_CKSUM_NONE;
971 	case CHECKSUM_PARTIAL:
972 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
973 	case CHECKSUM_COMPLETE:
974 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
975 	default:
976 		return QDF_NBUF_TX_CKSUM_NONE;
977 	}
978 }
979 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
980 
981 /**
982  * __qdf_nbuf_get_tid() - get tid
983  * @skb: Pointer to network buffer
984  *
985  * Return: tid
986  */
987 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
988 {
989 	return skb->priority;
990 }
991 qdf_export_symbol(__qdf_nbuf_get_tid);
992 
993 /**
994  * __qdf_nbuf_set_tid() - set tid
995  * @skb: Pointer to network buffer
996  *
997  * Return: none
998  */
999 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1000 {
1001 	skb->priority = tid;
1002 }
1003 qdf_export_symbol(__qdf_nbuf_set_tid);
1004 
1005 /**
1006  * __qdf_nbuf_set_tid() - set tid
1007  * @skb: Pointer to network buffer
1008  *
1009  * Return: none
1010  */
1011 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1012 {
1013 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1014 }
1015 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1016 
1017 /**
1018  * __qdf_nbuf_reg_trace_cb() - register trace callback
1019  * @cb_func_ptr: Pointer to trace callback function
1020  *
1021  * Return: none
1022  */
1023 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1024 {
1025 	qdf_trace_update_cb = cb_func_ptr;
1026 }
1027 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1028 
1029 /**
1030  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1031  *              of DHCP packet.
1032  * @data: Pointer to DHCP packet data buffer
1033  *
1034  * This func. returns the subtype of DHCP packet.
1035  *
1036  * Return: subtype of the DHCP packet.
1037  */
1038 enum qdf_proto_subtype
1039 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1040 {
1041 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1042 
1043 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1044 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1045 					QDF_DHCP_OPTION53_LENGTH)) {
1046 
1047 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1048 		case QDF_DHCP_DISCOVER:
1049 			subtype = QDF_PROTO_DHCP_DISCOVER;
1050 			break;
1051 		case QDF_DHCP_REQUEST:
1052 			subtype = QDF_PROTO_DHCP_REQUEST;
1053 			break;
1054 		case QDF_DHCP_OFFER:
1055 			subtype = QDF_PROTO_DHCP_OFFER;
1056 			break;
1057 		case QDF_DHCP_ACK:
1058 			subtype = QDF_PROTO_DHCP_ACK;
1059 			break;
1060 		case QDF_DHCP_NAK:
1061 			subtype = QDF_PROTO_DHCP_NACK;
1062 			break;
1063 		case QDF_DHCP_RELEASE:
1064 			subtype = QDF_PROTO_DHCP_RELEASE;
1065 			break;
1066 		case QDF_DHCP_INFORM:
1067 			subtype = QDF_PROTO_DHCP_INFORM;
1068 			break;
1069 		case QDF_DHCP_DECLINE:
1070 			subtype = QDF_PROTO_DHCP_DECLINE;
1071 			break;
1072 		default:
1073 			break;
1074 		}
1075 	}
1076 
1077 	return subtype;
1078 }
1079 
1080 /**
1081  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1082  *            of EAPOL packet.
1083  * @data: Pointer to EAPOL packet data buffer
1084  *
1085  * This func. returns the subtype of EAPOL packet.
1086  *
1087  * Return: subtype of the EAPOL packet.
1088  */
1089 enum qdf_proto_subtype
1090 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1091 {
1092 	uint16_t eapol_key_info;
1093 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1094 	uint16_t mask;
1095 
1096 	eapol_key_info = (uint16_t)(*(uint16_t *)
1097 			(data + EAPOL_KEY_INFO_OFFSET));
1098 
1099 	mask = eapol_key_info & EAPOL_MASK;
1100 	switch (mask) {
1101 	case EAPOL_M1_BIT_MASK:
1102 		subtype = QDF_PROTO_EAPOL_M1;
1103 		break;
1104 	case EAPOL_M2_BIT_MASK:
1105 		subtype = QDF_PROTO_EAPOL_M2;
1106 		break;
1107 	case EAPOL_M3_BIT_MASK:
1108 		subtype = QDF_PROTO_EAPOL_M3;
1109 		break;
1110 	case EAPOL_M4_BIT_MASK:
1111 		subtype = QDF_PROTO_EAPOL_M4;
1112 		break;
1113 	default:
1114 		break;
1115 	}
1116 
1117 	return subtype;
1118 }
1119 
1120 /**
1121  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1122  *            of ARP packet.
1123  * @data: Pointer to ARP packet data buffer
1124  *
1125  * This func. returns the subtype of ARP packet.
1126  *
1127  * Return: subtype of the ARP packet.
1128  */
1129 enum qdf_proto_subtype
1130 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1131 {
1132 	uint16_t subtype;
1133 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1134 
1135 	subtype = (uint16_t)(*(uint16_t *)
1136 			(data + ARP_SUB_TYPE_OFFSET));
1137 
1138 	switch (QDF_SWAP_U16(subtype)) {
1139 	case ARP_REQUEST:
1140 		proto_subtype = QDF_PROTO_ARP_REQ;
1141 		break;
1142 	case ARP_RESPONSE:
1143 		proto_subtype = QDF_PROTO_ARP_RES;
1144 		break;
1145 	default:
1146 		break;
1147 	}
1148 
1149 	return proto_subtype;
1150 }
1151 
1152 /**
1153  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1154  *            of IPV4 ICMP packet.
1155  * @data: Pointer to IPV4 ICMP packet data buffer
1156  *
1157  * This func. returns the subtype of ICMP packet.
1158  *
1159  * Return: subtype of the ICMP packet.
1160  */
1161 enum qdf_proto_subtype
1162 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1163 {
1164 	uint8_t subtype;
1165 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1166 
1167 	subtype = (uint8_t)(*(uint8_t *)
1168 			(data + ICMP_SUBTYPE_OFFSET));
1169 
1170 	switch (subtype) {
1171 	case ICMP_REQUEST:
1172 		proto_subtype = QDF_PROTO_ICMP_REQ;
1173 		break;
1174 	case ICMP_RESPONSE:
1175 		proto_subtype = QDF_PROTO_ICMP_RES;
1176 		break;
1177 	default:
1178 		break;
1179 	}
1180 
1181 	return proto_subtype;
1182 }
1183 
1184 /**
1185  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1186  *            of IPV6 ICMPV6 packet.
1187  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1188  *
1189  * This func. returns the subtype of ICMPV6 packet.
1190  *
1191  * Return: subtype of the ICMPV6 packet.
1192  */
1193 enum qdf_proto_subtype
1194 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1195 {
1196 	uint8_t subtype;
1197 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1198 
1199 	subtype = (uint8_t)(*(uint8_t *)
1200 			(data + ICMPV6_SUBTYPE_OFFSET));
1201 
1202 	switch (subtype) {
1203 	case ICMPV6_REQUEST:
1204 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1205 		break;
1206 	case ICMPV6_RESPONSE:
1207 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1208 		break;
1209 	case ICMPV6_RS:
1210 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1211 		break;
1212 	case ICMPV6_RA:
1213 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1214 		break;
1215 	case ICMPV6_NS:
1216 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1217 		break;
1218 	case ICMPV6_NA:
1219 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1220 		break;
1221 	default:
1222 		break;
1223 	}
1224 
1225 	return proto_subtype;
1226 }
1227 
1228 /**
1229  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1230  *            of IPV4 packet.
1231  * @data: Pointer to IPV4 packet data buffer
1232  *
1233  * This func. returns the proto type of IPV4 packet.
1234  *
1235  * Return: proto type of IPV4 packet.
1236  */
1237 uint8_t
1238 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1239 {
1240 	uint8_t proto_type;
1241 
1242 	proto_type = (uint8_t)(*(uint8_t *)(data +
1243 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1244 	return proto_type;
1245 }
1246 
1247 /**
1248  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1249  *            of IPV6 packet.
1250  * @data: Pointer to IPV6 packet data buffer
1251  *
1252  * This func. returns the proto type of IPV6 packet.
1253  *
1254  * Return: proto type of IPV6 packet.
1255  */
1256 uint8_t
1257 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1258 {
1259 	uint8_t proto_type;
1260 
1261 	proto_type = (uint8_t)(*(uint8_t *)(data +
1262 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1263 	return proto_type;
1264 }
1265 
1266 /**
1267  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1268  * @data: Pointer to network data
1269  *
1270  * This api is for Tx packets.
1271  *
1272  * Return: true if packet is ipv4 packet
1273  *	   false otherwise
1274  */
1275 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1276 {
1277 	uint16_t ether_type;
1278 
1279 	ether_type = (uint16_t)(*(uint16_t *)(data +
1280 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1281 
1282 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1283 		return true;
1284 	else
1285 		return false;
1286 }
1287 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1288 
1289 /**
1290  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1291  * @data: Pointer to network data buffer
1292  *
1293  * This api is for ipv4 packet.
1294  *
1295  * Return: true if packet is DHCP packet
1296  *	   false otherwise
1297  */
1298 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1299 {
1300 	uint16_t sport;
1301 	uint16_t dport;
1302 
1303 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1304 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1305 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1306 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1307 					 sizeof(uint16_t)));
1308 
1309 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1310 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1311 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1312 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1313 		return true;
1314 	else
1315 		return false;
1316 }
1317 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1318 
1319 /**
1320  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1321  * @data: Pointer to network data buffer
1322  *
1323  * This api is for ipv4 packet.
1324  *
1325  * Return: true if packet is EAPOL packet
1326  *	   false otherwise.
1327  */
1328 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1329 {
1330 	uint16_t ether_type;
1331 
1332 	ether_type = (uint16_t)(*(uint16_t *)(data +
1333 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1334 
1335 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1336 		return true;
1337 	else
1338 		return false;
1339 }
1340 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1341 
1342 /**
1343  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1344  * @skb: Pointer to network buffer
1345  *
1346  * This api is for ipv4 packet.
1347  *
1348  * Return: true if packet is WAPI packet
1349  *	   false otherwise.
1350  */
1351 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1352 {
1353 	uint16_t ether_type;
1354 
1355 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1356 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1357 
1358 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1359 		return true;
1360 	else
1361 		return false;
1362 }
1363 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1364 
1365 /**
1366  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1367  * @skb: Pointer to network buffer
1368  *
1369  * This api is for ipv4 packet.
1370  *
1371  * Return: true if packet is tdls packet
1372  *	   false otherwise.
1373  */
1374 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1375 {
1376 	uint16_t ether_type;
1377 
1378 	ether_type = *(uint16_t *)(skb->data +
1379 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1380 
1381 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1382 		return true;
1383 	else
1384 		return false;
1385 }
1386 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1387 
1388 /**
1389  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1390  * @data: Pointer to network data buffer
1391  *
1392  * This api is for ipv4 packet.
1393  *
1394  * Return: true if packet is ARP packet
1395  *	   false otherwise.
1396  */
1397 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1398 {
1399 	uint16_t ether_type;
1400 
1401 	ether_type = (uint16_t)(*(uint16_t *)(data +
1402 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1403 
1404 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1405 		return true;
1406 	else
1407 		return false;
1408 }
1409 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1410 
1411 /**
1412  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1413  * @data: Pointer to network data buffer
1414  *
1415  * This api is for ipv4 packet.
1416  *
1417  * Return: true if packet is ARP request
1418  *	   false otherwise.
1419  */
1420 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1421 {
1422 	uint16_t op_code;
1423 
1424 	op_code = (uint16_t)(*(uint16_t *)(data +
1425 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1426 
1427 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1428 		return true;
1429 	return false;
1430 }
1431 
1432 /**
1433  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1434  * @data: Pointer to network data buffer
1435  *
1436  * This api is for ipv4 packet.
1437  *
1438  * Return: true if packet is ARP response
1439  *	   false otherwise.
1440  */
1441 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1442 {
1443 	uint16_t op_code;
1444 
1445 	op_code = (uint16_t)(*(uint16_t *)(data +
1446 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1447 
1448 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1449 		return true;
1450 	return false;
1451 }
1452 
1453 /**
1454  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1455  * @data: Pointer to network data buffer
1456  *
1457  * This api is for ipv4 packet.
1458  *
1459  * Return: ARP packet source IP value.
1460  */
1461 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1462 {
1463 	uint32_t src_ip;
1464 
1465 	src_ip = (uint32_t)(*(uint32_t *)(data +
1466 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1467 
1468 	return src_ip;
1469 }
1470 
1471 /**
1472  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1473  * @data: Pointer to network data buffer
1474  *
1475  * This api is for ipv4 packet.
1476  *
1477  * Return: ARP packet target IP value.
1478  */
1479 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1480 {
1481 	uint32_t tgt_ip;
1482 
1483 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1484 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1485 
1486 	return tgt_ip;
1487 }
1488 
1489 /**
1490  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1491  * @data: Pointer to network data buffer
1492  * @len: length to copy
1493  *
1494  * This api is for dns domain name
1495  *
1496  * Return: dns domain name.
1497  */
1498 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1499 {
1500 	uint8_t *domain_name;
1501 
1502 	domain_name = (uint8_t *)
1503 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1504 	return domain_name;
1505 }
1506 
1507 
1508 /**
1509  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1510  * @data: Pointer to network data buffer
1511  *
1512  * This api is for dns query packet.
1513  *
1514  * Return: true if packet is dns query packet.
1515  *	   false otherwise.
1516  */
1517 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1518 {
1519 	uint16_t op_code;
1520 	uint16_t tgt_port;
1521 
1522 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1523 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1524 	/* Standard DNS query always happen on Dest Port 53. */
1525 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1526 		op_code = (uint16_t)(*(uint16_t *)(data +
1527 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1528 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1529 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1530 			return true;
1531 	}
1532 	return false;
1533 }
1534 
1535 /**
1536  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1537  * @data: Pointer to network data buffer
1538  *
1539  * This api is for dns query response.
1540  *
1541  * Return: true if packet is dns response packet.
1542  *	   false otherwise.
1543  */
1544 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1545 {
1546 	uint16_t op_code;
1547 	uint16_t src_port;
1548 
1549 	src_port = (uint16_t)(*(uint16_t *)(data +
1550 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1551 	/* Standard DNS response always comes on Src Port 53. */
1552 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1553 		op_code = (uint16_t)(*(uint16_t *)(data +
1554 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1555 
1556 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1557 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1558 			return true;
1559 	}
1560 	return false;
1561 }
1562 
1563 /**
1564  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1565  * @data: Pointer to network data buffer
1566  *
1567  * This api is for tcp syn packet.
1568  *
1569  * Return: true if packet is tcp syn packet.
1570  *	   false otherwise.
1571  */
1572 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1573 {
1574 	uint8_t op_code;
1575 
1576 	op_code = (uint8_t)(*(uint8_t *)(data +
1577 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1578 
1579 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1580 		return true;
1581 	return false;
1582 }
1583 
1584 /**
1585  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1586  * @data: Pointer to network data buffer
1587  *
1588  * This api is for tcp syn ack packet.
1589  *
1590  * Return: true if packet is tcp syn ack packet.
1591  *	   false otherwise.
1592  */
1593 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1594 {
1595 	uint8_t op_code;
1596 
1597 	op_code = (uint8_t)(*(uint8_t *)(data +
1598 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1599 
1600 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1601 		return true;
1602 	return false;
1603 }
1604 
1605 /**
1606  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1607  * @data: Pointer to network data buffer
1608  *
1609  * This api is for tcp ack packet.
1610  *
1611  * Return: true if packet is tcp ack packet.
1612  *	   false otherwise.
1613  */
1614 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1615 {
1616 	uint8_t op_code;
1617 
1618 	op_code = (uint8_t)(*(uint8_t *)(data +
1619 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1620 
1621 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1622 		return true;
1623 	return false;
1624 }
1625 
1626 /**
1627  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1628  * @data: Pointer to network data buffer
1629  *
1630  * This api is for tcp packet.
1631  *
1632  * Return: tcp source port value.
1633  */
1634 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1635 {
1636 	uint16_t src_port;
1637 
1638 	src_port = (uint16_t)(*(uint16_t *)(data +
1639 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1640 
1641 	return src_port;
1642 }
1643 
1644 /**
1645  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1646  * @data: Pointer to network data buffer
1647  *
1648  * This api is for tcp packet.
1649  *
1650  * Return: tcp destination port value.
1651  */
1652 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1653 {
1654 	uint16_t tgt_port;
1655 
1656 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1657 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1658 
1659 	return tgt_port;
1660 }
1661 
1662 /**
1663  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1664  * @data: Pointer to network data buffer
1665  *
1666  * This api is for ipv4 req packet.
1667  *
1668  * Return: true if packet is icmpv4 request
1669  *	   false otherwise.
1670  */
1671 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1672 {
1673 	uint8_t op_code;
1674 
1675 	op_code = (uint8_t)(*(uint8_t *)(data +
1676 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1677 
1678 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1679 		return true;
1680 	return false;
1681 }
1682 
1683 /**
1684  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1685  * @data: Pointer to network data buffer
1686  *
1687  * This api is for ipv4 res packet.
1688  *
1689  * Return: true if packet is icmpv4 response
1690  *	   false otherwise.
1691  */
1692 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1693 {
1694 	uint8_t op_code;
1695 
1696 	op_code = (uint8_t)(*(uint8_t *)(data +
1697 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1698 
1699 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1700 		return true;
1701 	return false;
1702 }
1703 
1704 /**
1705  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1706  * @data: Pointer to network data buffer
1707  *
1708  * This api is for ipv4 packet.
1709  *
1710  * Return: icmpv4 packet source IP value.
1711  */
1712 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1713 {
1714 	uint32_t src_ip;
1715 
1716 	src_ip = (uint32_t)(*(uint32_t *)(data +
1717 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1718 
1719 	return src_ip;
1720 }
1721 
1722 /**
1723  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1724  * @data: Pointer to network data buffer
1725  *
1726  * This api is for ipv4 packet.
1727  *
1728  * Return: icmpv4 packet target IP value.
1729  */
1730 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1731 {
1732 	uint32_t tgt_ip;
1733 
1734 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1735 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1736 
1737 	return tgt_ip;
1738 }
1739 
1740 
1741 /**
1742  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1743  * @data: Pointer to IPV6 packet data buffer
1744  *
1745  * This func. checks whether it is a IPV6 packet or not.
1746  *
1747  * Return: TRUE if it is a IPV6 packet
1748  *         FALSE if not
1749  */
1750 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1751 {
1752 	uint16_t ether_type;
1753 
1754 	ether_type = (uint16_t)(*(uint16_t *)(data +
1755 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1756 
1757 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1758 		return true;
1759 	else
1760 		return false;
1761 }
1762 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1763 
1764 /**
1765  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1766  * @data: Pointer to network data buffer
1767  *
1768  * This api is for ipv6 packet.
1769  *
1770  * Return: true if packet is DHCP packet
1771  *	   false otherwise
1772  */
1773 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1774 {
1775 	uint16_t sport;
1776 	uint16_t dport;
1777 
1778 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1779 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1780 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1781 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1782 					sizeof(uint16_t));
1783 
1784 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1785 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1786 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1787 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1788 		return true;
1789 	else
1790 		return false;
1791 }
1792 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1793 
1794 /**
1795  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
1796  * @data: Pointer to network data buffer
1797  *
1798  * This api is for ipv6 packet.
1799  *
1800  * Return: true if packet is MDNS packet
1801  *	   false otherwise
1802  */
1803 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
1804 {
1805 	uint16_t sport;
1806 	uint16_t dport;
1807 
1808 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1809 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1810 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1811 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1812 					sizeof(uint16_t));
1813 
1814 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
1815 	    dport == sport)
1816 		return true;
1817 	else
1818 		return false;
1819 }
1820 
1821 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
1822 
1823 /**
1824  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1825  * @data: Pointer to IPV4 packet data buffer
1826  *
1827  * This func. checks whether it is a IPV4 multicast packet or not.
1828  *
1829  * Return: TRUE if it is a IPV4 multicast packet
1830  *         FALSE if not
1831  */
1832 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1833 {
1834 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1835 		uint32_t *dst_addr =
1836 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1837 
1838 		/*
1839 		 * Check first word of the IPV4 address and if it is
1840 		 * equal to 0xE then it represents multicast IP.
1841 		 */
1842 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1843 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1844 			return true;
1845 		else
1846 			return false;
1847 	} else
1848 		return false;
1849 }
1850 
1851 /**
1852  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1853  * @data: Pointer to IPV6 packet data buffer
1854  *
1855  * This func. checks whether it is a IPV6 multicast packet or not.
1856  *
1857  * Return: TRUE if it is a IPV6 multicast packet
1858  *         FALSE if not
1859  */
1860 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1861 {
1862 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1863 		uint16_t *dst_addr;
1864 
1865 		dst_addr = (uint16_t *)
1866 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1867 
1868 		/*
1869 		 * Check first byte of the IP address and if it
1870 		 * 0xFF00 then it is a IPV6 mcast packet.
1871 		 */
1872 		if (*dst_addr ==
1873 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1874 			return true;
1875 		else
1876 			return false;
1877 	} else
1878 		return false;
1879 }
1880 
1881 /**
1882  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1883  * @data: Pointer to IPV4 ICMP packet data buffer
1884  *
1885  * This func. checks whether it is a ICMP packet or not.
1886  *
1887  * Return: TRUE if it is a ICMP packet
1888  *         FALSE if not
1889  */
1890 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1891 {
1892 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1893 		uint8_t pkt_type;
1894 
1895 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1896 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1897 
1898 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1899 			return true;
1900 		else
1901 			return false;
1902 	} else
1903 		return false;
1904 }
1905 
1906 /**
1907  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1908  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1909  *
1910  * This func. checks whether it is a ICMPV6 packet or not.
1911  *
1912  * Return: TRUE if it is a ICMPV6 packet
1913  *         FALSE if not
1914  */
1915 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1916 {
1917 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1918 		uint8_t pkt_type;
1919 
1920 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1921 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1922 
1923 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1924 			return true;
1925 		else
1926 			return false;
1927 	} else
1928 		return false;
1929 }
1930 
1931 /**
1932  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1933  * @data: Pointer to IPV4 UDP packet data buffer
1934  *
1935  * This func. checks whether it is a IPV4 UDP packet or not.
1936  *
1937  * Return: TRUE if it is a IPV4 UDP packet
1938  *         FALSE if not
1939  */
1940 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1941 {
1942 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1943 		uint8_t pkt_type;
1944 
1945 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1946 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1947 
1948 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1949 			return true;
1950 		else
1951 			return false;
1952 	} else
1953 		return false;
1954 }
1955 
1956 /**
1957  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1958  * @data: Pointer to IPV4 TCP packet data buffer
1959  *
1960  * This func. checks whether it is a IPV4 TCP packet or not.
1961  *
1962  * Return: TRUE if it is a IPV4 TCP packet
1963  *         FALSE if not
1964  */
1965 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1966 {
1967 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1968 		uint8_t pkt_type;
1969 
1970 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1971 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1972 
1973 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1974 			return true;
1975 		else
1976 			return false;
1977 	} else
1978 		return false;
1979 }
1980 
1981 /**
1982  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
1983  * @data: Pointer to IPV6 UDP packet data buffer
1984  *
1985  * This func. checks whether it is a IPV6 UDP packet or not.
1986  *
1987  * Return: TRUE if it is a IPV6 UDP packet
1988  *         FALSE if not
1989  */
1990 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
1991 {
1992 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1993 		uint8_t pkt_type;
1994 
1995 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1996 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1997 
1998 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1999 			return true;
2000 		else
2001 			return false;
2002 	} else
2003 		return false;
2004 }
2005 
2006 /**
2007  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2008  * @data: Pointer to IPV6 TCP packet data buffer
2009  *
2010  * This func. checks whether it is a IPV6 TCP packet or not.
2011  *
2012  * Return: TRUE if it is a IPV6 TCP packet
2013  *         FALSE if not
2014  */
2015 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2016 {
2017 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2018 		uint8_t pkt_type;
2019 
2020 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2021 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2022 
2023 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2024 			return true;
2025 		else
2026 			return false;
2027 	} else
2028 		return false;
2029 }
2030 
2031 /**
2032  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2033  * @nbuf - sk buff
2034  *
2035  * Return: true if packet is broadcast
2036  *	   false otherwise
2037  */
2038 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2039 {
2040 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2041 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2042 }
2043 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2044 
2045 #ifdef NBUF_MEMORY_DEBUG
2046 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2047 
2048 /**
2049  * struct qdf_nbuf_track_t - Network buffer track structure
2050  *
2051  * @p_next: Pointer to next
2052  * @net_buf: Pointer to network buffer
2053  * @func_name: Function name
2054  * @line_num: Line number
2055  * @size: Size
2056  */
2057 struct qdf_nbuf_track_t {
2058 	struct qdf_nbuf_track_t *p_next;
2059 	qdf_nbuf_t net_buf;
2060 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2061 	uint32_t line_num;
2062 	size_t size;
2063 };
2064 
2065 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2066 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2067 
2068 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2069 static struct kmem_cache *nbuf_tracking_cache;
2070 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2071 static spinlock_t qdf_net_buf_track_free_list_lock;
2072 static uint32_t qdf_net_buf_track_free_list_count;
2073 static uint32_t qdf_net_buf_track_used_list_count;
2074 static uint32_t qdf_net_buf_track_max_used;
2075 static uint32_t qdf_net_buf_track_max_free;
2076 static uint32_t qdf_net_buf_track_max_allocated;
2077 
2078 /**
2079  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2080  *
2081  * tracks the max number of network buffers that the wlan driver was tracking
2082  * at any one time.
2083  *
2084  * Return: none
2085  */
2086 static inline void update_max_used(void)
2087 {
2088 	int sum;
2089 
2090 	if (qdf_net_buf_track_max_used <
2091 	    qdf_net_buf_track_used_list_count)
2092 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2093 	sum = qdf_net_buf_track_free_list_count +
2094 		qdf_net_buf_track_used_list_count;
2095 	if (qdf_net_buf_track_max_allocated < sum)
2096 		qdf_net_buf_track_max_allocated = sum;
2097 }
2098 
2099 /**
2100  * update_max_free() - update qdf_net_buf_track_free_list_count
2101  *
2102  * tracks the max number tracking buffers kept in the freelist.
2103  *
2104  * Return: none
2105  */
2106 static inline void update_max_free(void)
2107 {
2108 	if (qdf_net_buf_track_max_free <
2109 	    qdf_net_buf_track_free_list_count)
2110 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2111 }
2112 
2113 /**
2114  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2115  *
2116  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2117  * This function also ads fexibility to adjust the allocation and freelist
2118  * scheems.
2119  *
2120  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2121  */
2122 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2123 {
2124 	int flags = GFP_KERNEL;
2125 	unsigned long irq_flag;
2126 	QDF_NBUF_TRACK *new_node = NULL;
2127 
2128 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2129 	qdf_net_buf_track_used_list_count++;
2130 	if (qdf_net_buf_track_free_list) {
2131 		new_node = qdf_net_buf_track_free_list;
2132 		qdf_net_buf_track_free_list =
2133 			qdf_net_buf_track_free_list->p_next;
2134 		qdf_net_buf_track_free_list_count--;
2135 	}
2136 	update_max_used();
2137 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2138 
2139 	if (new_node)
2140 		return new_node;
2141 
2142 	if (in_interrupt() || irqs_disabled() || in_atomic())
2143 		flags = GFP_ATOMIC;
2144 
2145 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2146 }
2147 
2148 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2149 #define FREEQ_POOLSIZE 2048
2150 
2151 /**
2152  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2153  *
2154  * Matches calls to qdf_nbuf_track_alloc.
2155  * Either frees the tracking cookie to kernel or an internal
2156  * freelist based on the size of the freelist.
2157  *
2158  * Return: none
2159  */
2160 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2161 {
2162 	unsigned long irq_flag;
2163 
2164 	if (!node)
2165 		return;
2166 
2167 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2168 	 * only shrink the freelist if it is bigger than twice the number of
2169 	 * nbufs in use. If the driver is stalling in a consistent bursty
2170 	 * fasion, this will keep 3/4 of thee allocations from the free list
2171 	 * while also allowing the system to recover memory as less frantic
2172 	 * traffic occurs.
2173 	 */
2174 
2175 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2176 
2177 	qdf_net_buf_track_used_list_count--;
2178 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2179 	   (qdf_net_buf_track_free_list_count >
2180 	    qdf_net_buf_track_used_list_count << 1)) {
2181 		kmem_cache_free(nbuf_tracking_cache, node);
2182 	} else {
2183 		node->p_next = qdf_net_buf_track_free_list;
2184 		qdf_net_buf_track_free_list = node;
2185 		qdf_net_buf_track_free_list_count++;
2186 	}
2187 	update_max_free();
2188 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2189 }
2190 
2191 /**
2192  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2193  *
2194  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2195  * the freelist first makes it performant for the first iperf udp burst
2196  * as well as steady state.
2197  *
2198  * Return: None
2199  */
2200 static void qdf_nbuf_track_prefill(void)
2201 {
2202 	int i;
2203 	QDF_NBUF_TRACK *node, *head;
2204 
2205 	/* prepopulate the freelist */
2206 	head = NULL;
2207 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2208 		node = qdf_nbuf_track_alloc();
2209 		if (!node)
2210 			continue;
2211 		node->p_next = head;
2212 		head = node;
2213 	}
2214 	while (head) {
2215 		node = head->p_next;
2216 		qdf_nbuf_track_free(head);
2217 		head = node;
2218 	}
2219 
2220 	/* prefilled buffers should not count as used */
2221 	qdf_net_buf_track_max_used = 0;
2222 }
2223 
2224 /**
2225  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2226  *
2227  * This initializes the memory manager for the nbuf tracking cookies.  Because
2228  * these cookies are all the same size and only used in this feature, we can
2229  * use a kmem_cache to provide tracking as well as to speed up allocations.
2230  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2231  * features) a freelist is prepopulated here.
2232  *
2233  * Return: None
2234  */
2235 static void qdf_nbuf_track_memory_manager_create(void)
2236 {
2237 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2238 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2239 						sizeof(QDF_NBUF_TRACK),
2240 						0, 0, NULL);
2241 
2242 	qdf_nbuf_track_prefill();
2243 }
2244 
2245 /**
2246  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2247  *
2248  * Empty the freelist and print out usage statistics when it is no longer
2249  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2250  * any nbuf tracking cookies were leaked.
2251  *
2252  * Return: None
2253  */
2254 static void qdf_nbuf_track_memory_manager_destroy(void)
2255 {
2256 	QDF_NBUF_TRACK *node, *tmp;
2257 	unsigned long irq_flag;
2258 
2259 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2260 	node = qdf_net_buf_track_free_list;
2261 
2262 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2263 		qdf_print("%s: unexpectedly large max_used count %d",
2264 			  __func__, qdf_net_buf_track_max_used);
2265 
2266 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2267 		qdf_print("%s: %d unused trackers were allocated",
2268 			  __func__,
2269 			  qdf_net_buf_track_max_allocated -
2270 			  qdf_net_buf_track_max_used);
2271 
2272 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2273 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2274 		qdf_print("%s: check freelist shrinking functionality",
2275 			  __func__);
2276 
2277 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2278 		  "%s: %d residual freelist size",
2279 		  __func__, qdf_net_buf_track_free_list_count);
2280 
2281 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2282 		  "%s: %d max freelist size observed",
2283 		  __func__, qdf_net_buf_track_max_free);
2284 
2285 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2286 		  "%s: %d max buffers used observed",
2287 		  __func__, qdf_net_buf_track_max_used);
2288 
2289 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2290 		  "%s: %d max buffers allocated observed",
2291 		  __func__, qdf_net_buf_track_max_allocated);
2292 
2293 	while (node) {
2294 		tmp = node;
2295 		node = node->p_next;
2296 		kmem_cache_free(nbuf_tracking_cache, tmp);
2297 		qdf_net_buf_track_free_list_count--;
2298 	}
2299 
2300 	if (qdf_net_buf_track_free_list_count != 0)
2301 		qdf_info("%d unfreed tracking memory lost in freelist",
2302 			 qdf_net_buf_track_free_list_count);
2303 
2304 	if (qdf_net_buf_track_used_list_count != 0)
2305 		qdf_info("%d unfreed tracking memory still in use",
2306 			 qdf_net_buf_track_used_list_count);
2307 
2308 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2309 	kmem_cache_destroy(nbuf_tracking_cache);
2310 	qdf_net_buf_track_free_list = NULL;
2311 }
2312 
2313 /**
2314  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2315  *
2316  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2317  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2318  * WLAN driver module whose allocated SKB is freed by network stack are
2319  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2320  * reported as memory leak.
2321  *
2322  * Return: none
2323  */
2324 void qdf_net_buf_debug_init(void)
2325 {
2326 	uint32_t i;
2327 
2328 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2329 
2330 	qdf_nbuf_map_tracking_init();
2331 	qdf_nbuf_track_memory_manager_create();
2332 
2333 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2334 		gp_qdf_net_buf_track_tbl[i] = NULL;
2335 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2336 	}
2337 }
2338 qdf_export_symbol(qdf_net_buf_debug_init);
2339 
2340 /**
2341  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2342  *
2343  * Exit network buffer tracking debug functionality and log SKB memory leaks
2344  * As part of exiting the functionality, free the leaked memory and
2345  * cleanup the tracking buffers.
2346  *
2347  * Return: none
2348  */
2349 void qdf_net_buf_debug_exit(void)
2350 {
2351 	uint32_t i;
2352 	uint32_t count = 0;
2353 	unsigned long irq_flag;
2354 	QDF_NBUF_TRACK *p_node;
2355 	QDF_NBUF_TRACK *p_prev;
2356 
2357 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2358 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2359 		p_node = gp_qdf_net_buf_track_tbl[i];
2360 		while (p_node) {
2361 			p_prev = p_node;
2362 			p_node = p_node->p_next;
2363 			count++;
2364 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2365 				 p_prev->func_name, p_prev->line_num,
2366 				 p_prev->size, p_prev->net_buf);
2367 			qdf_nbuf_track_free(p_prev);
2368 		}
2369 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2370 	}
2371 
2372 	qdf_nbuf_track_memory_manager_destroy();
2373 	qdf_nbuf_map_tracking_deinit();
2374 
2375 #ifdef CONFIG_HALT_KMEMLEAK
2376 	if (count) {
2377 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2378 		QDF_BUG(0);
2379 	}
2380 #endif
2381 }
2382 qdf_export_symbol(qdf_net_buf_debug_exit);
2383 
2384 /**
2385  * qdf_net_buf_debug_hash() - hash network buffer pointer
2386  *
2387  * Return: hash value
2388  */
2389 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2390 {
2391 	uint32_t i;
2392 
2393 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2394 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2395 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2396 
2397 	return i;
2398 }
2399 
2400 /**
2401  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2402  *
2403  * Return: If skb is found in hash table then return pointer to network buffer
2404  *	else return %NULL
2405  */
2406 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2407 {
2408 	uint32_t i;
2409 	QDF_NBUF_TRACK *p_node;
2410 
2411 	i = qdf_net_buf_debug_hash(net_buf);
2412 	p_node = gp_qdf_net_buf_track_tbl[i];
2413 
2414 	while (p_node) {
2415 		if (p_node->net_buf == net_buf)
2416 			return p_node;
2417 		p_node = p_node->p_next;
2418 	}
2419 
2420 	return NULL;
2421 }
2422 
2423 /**
2424  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2425  *
2426  * Return: none
2427  */
2428 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2429 				const char *func_name, uint32_t line_num)
2430 {
2431 	uint32_t i;
2432 	unsigned long irq_flag;
2433 	QDF_NBUF_TRACK *p_node;
2434 	QDF_NBUF_TRACK *new_node;
2435 
2436 	new_node = qdf_nbuf_track_alloc();
2437 
2438 	i = qdf_net_buf_debug_hash(net_buf);
2439 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2440 
2441 	p_node = qdf_net_buf_debug_look_up(net_buf);
2442 
2443 	if (p_node) {
2444 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2445 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2446 			  net_buf, func_name, line_num);
2447 		qdf_nbuf_track_free(new_node);
2448 	} else {
2449 		p_node = new_node;
2450 		if (p_node) {
2451 			p_node->net_buf = net_buf;
2452 			qdf_str_lcopy(p_node->func_name, func_name,
2453 				      QDF_MEM_FUNC_NAME_SIZE);
2454 			p_node->line_num = line_num;
2455 			p_node->size = size;
2456 			qdf_mem_skb_inc(size);
2457 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2458 			gp_qdf_net_buf_track_tbl[i] = p_node;
2459 		} else
2460 			qdf_print(
2461 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2462 				  func_name, line_num, size);
2463 	}
2464 
2465 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2466 }
2467 qdf_export_symbol(qdf_net_buf_debug_add_node);
2468 
2469 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2470 				   uint32_t line_num)
2471 {
2472 	uint32_t i;
2473 	unsigned long irq_flag;
2474 	QDF_NBUF_TRACK *p_node;
2475 
2476 	i = qdf_net_buf_debug_hash(net_buf);
2477 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2478 
2479 	p_node = qdf_net_buf_debug_look_up(net_buf);
2480 
2481 	if (p_node) {
2482 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2483 			      QDF_MEM_FUNC_NAME_SIZE);
2484 		p_node->line_num = line_num;
2485 	}
2486 
2487 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2488 }
2489 
2490 qdf_export_symbol(qdf_net_buf_debug_update_node);
2491 
2492 /**
2493  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2494  *
2495  * Return: none
2496  */
2497 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2498 {
2499 	uint32_t i;
2500 	QDF_NBUF_TRACK *p_head;
2501 	QDF_NBUF_TRACK *p_node = NULL;
2502 	unsigned long irq_flag;
2503 	QDF_NBUF_TRACK *p_prev;
2504 
2505 	i = qdf_net_buf_debug_hash(net_buf);
2506 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2507 
2508 	p_head = gp_qdf_net_buf_track_tbl[i];
2509 
2510 	/* Unallocated SKB */
2511 	if (!p_head)
2512 		goto done;
2513 
2514 	p_node = p_head;
2515 	/* Found at head of the table */
2516 	if (p_head->net_buf == net_buf) {
2517 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2518 		goto done;
2519 	}
2520 
2521 	/* Search in collision list */
2522 	while (p_node) {
2523 		p_prev = p_node;
2524 		p_node = p_node->p_next;
2525 		if ((p_node) && (p_node->net_buf == net_buf)) {
2526 			p_prev->p_next = p_node->p_next;
2527 			break;
2528 		}
2529 	}
2530 
2531 done:
2532 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2533 
2534 	if (p_node) {
2535 		qdf_mem_skb_dec(p_node->size);
2536 		qdf_nbuf_track_free(p_node);
2537 	} else {
2538 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2539 			  net_buf);
2540 		QDF_BUG(0);
2541 	}
2542 }
2543 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2544 
2545 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2546 				   const char *func_name, uint32_t line_num)
2547 {
2548 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2549 
2550 	while (ext_list) {
2551 		/*
2552 		 * Take care to add if it is Jumbo packet connected using
2553 		 * frag_list
2554 		 */
2555 		qdf_nbuf_t next;
2556 
2557 		next = qdf_nbuf_queue_next(ext_list);
2558 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2559 		ext_list = next;
2560 	}
2561 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2562 }
2563 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2564 
2565 /**
2566  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2567  * @net_buf: Network buf holding head segment (single)
2568  *
2569  * WLAN driver module whose allocated SKB is freed by network stack are
2570  * suppose to call this API before returning SKB to network stack such
2571  * that the SKB is not reported as memory leak.
2572  *
2573  * Return: none
2574  */
2575 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2576 {
2577 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2578 
2579 	while (ext_list) {
2580 		/*
2581 		 * Take care to free if it is Jumbo packet connected using
2582 		 * frag_list
2583 		 */
2584 		qdf_nbuf_t next;
2585 
2586 		next = qdf_nbuf_queue_next(ext_list);
2587 
2588 		if (qdf_nbuf_get_users(ext_list) > 1) {
2589 			ext_list = next;
2590 			continue;
2591 		}
2592 
2593 		qdf_net_buf_debug_delete_node(ext_list);
2594 		ext_list = next;
2595 	}
2596 
2597 	if (qdf_nbuf_get_users(net_buf) > 1)
2598 		return;
2599 
2600 	qdf_net_buf_debug_delete_node(net_buf);
2601 }
2602 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2603 
2604 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2605 				int reserve, int align, int prio,
2606 				const char *func, uint32_t line)
2607 {
2608 	qdf_nbuf_t nbuf;
2609 
2610 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2611 
2612 	/* Store SKB in internal QDF tracking table */
2613 	if (qdf_likely(nbuf)) {
2614 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2615 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2616 	} else {
2617 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2618 	}
2619 
2620 	return nbuf;
2621 }
2622 qdf_export_symbol(qdf_nbuf_alloc_debug);
2623 
2624 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2625 {
2626 	qdf_nbuf_t ext_list;
2627 
2628 	if (qdf_unlikely(!nbuf))
2629 		return;
2630 
2631 	if (qdf_nbuf_get_users(nbuf) > 1)
2632 		goto free_buf;
2633 
2634 	/* Remove SKB from internal QDF tracking table */
2635 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2636 	qdf_net_buf_debug_delete_node(nbuf);
2637 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2638 
2639 	/* Take care to delete the debug entries for frag_list */
2640 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2641 	while (ext_list) {
2642 		if (qdf_nbuf_get_users(ext_list) == 1) {
2643 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2644 			qdf_net_buf_debug_delete_node(ext_list);
2645 		}
2646 
2647 		ext_list = qdf_nbuf_queue_next(ext_list);
2648 	}
2649 
2650 free_buf:
2651 	__qdf_nbuf_free(nbuf);
2652 }
2653 qdf_export_symbol(qdf_nbuf_free_debug);
2654 
2655 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2656 {
2657 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2658 
2659 	if (qdf_unlikely(!cloned_buf))
2660 		return NULL;
2661 
2662 	/* Store SKB in internal QDF tracking table */
2663 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2664 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2665 
2666 	return cloned_buf;
2667 }
2668 qdf_export_symbol(qdf_nbuf_clone_debug);
2669 
2670 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2671 {
2672 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2673 
2674 	if (qdf_unlikely(!copied_buf))
2675 		return NULL;
2676 
2677 	/* Store SKB in internal QDF tracking table */
2678 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2679 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2680 
2681 	return copied_buf;
2682 }
2683 qdf_export_symbol(qdf_nbuf_copy_debug);
2684 
2685 qdf_nbuf_t
2686 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
2687 			   const char *func, uint32_t line)
2688 {
2689 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
2690 
2691 	if (qdf_unlikely(!copied_buf))
2692 		return NULL;
2693 
2694 	/* Store SKB in internal QDF tracking table */
2695 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2696 	qdf_nbuf_history_add(copied_buf, func, line,
2697 			     QDF_NBUF_ALLOC_COPY_EXPAND);
2698 
2699 	return copied_buf;
2700 }
2701 
2702 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
2703 
2704 #endif /* NBUF_MEMORY_DEBUG */
2705 
2706 #if defined(FEATURE_TSO)
2707 
2708 /**
2709  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2710  *
2711  * @ethproto: ethernet type of the msdu
2712  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2713  * @l2_len: L2 length for the msdu
2714  * @eit_hdr: pointer to EIT header
2715  * @eit_hdr_len: EIT header length for the msdu
2716  * @eit_hdr_dma_map_addr: dma addr for EIT header
2717  * @tcphdr: pointer to tcp header
2718  * @ipv4_csum_en: ipv4 checksum enable
2719  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2720  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2721  * @ip_id: IP id
2722  * @tcp_seq_num: TCP sequence number
2723  *
2724  * This structure holds the TSO common info that is common
2725  * across all the TCP segments of the jumbo packet.
2726  */
2727 struct qdf_tso_cmn_seg_info_t {
2728 	uint16_t ethproto;
2729 	uint16_t ip_tcp_hdr_len;
2730 	uint16_t l2_len;
2731 	uint8_t *eit_hdr;
2732 	uint32_t eit_hdr_len;
2733 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2734 	struct tcphdr *tcphdr;
2735 	uint16_t ipv4_csum_en;
2736 	uint16_t tcp_ipv4_csum_en;
2737 	uint16_t tcp_ipv6_csum_en;
2738 	uint16_t ip_id;
2739 	uint32_t tcp_seq_num;
2740 };
2741 
2742 /**
2743  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2744  * information
2745  * @osdev: qdf device handle
2746  * @skb: skb buffer
2747  * @tso_info: Parameters common to all segements
2748  *
2749  * Get the TSO information that is common across all the TCP
2750  * segments of the jumbo packet
2751  *
2752  * Return: 0 - success 1 - failure
2753  */
2754 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2755 			struct sk_buff *skb,
2756 			struct qdf_tso_cmn_seg_info_t *tso_info)
2757 {
2758 	/* Get ethernet type and ethernet header length */
2759 	tso_info->ethproto = vlan_get_protocol(skb);
2760 
2761 	/* Determine whether this is an IPv4 or IPv6 packet */
2762 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2763 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2764 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2765 
2766 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2767 		tso_info->ipv4_csum_en = 1;
2768 		tso_info->tcp_ipv4_csum_en = 1;
2769 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2770 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2771 				ipv4_hdr->protocol);
2772 			return 1;
2773 		}
2774 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2775 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2776 		tso_info->tcp_ipv6_csum_en = 1;
2777 	} else {
2778 		qdf_err("TSO: ethertype 0x%x is not supported!",
2779 			tso_info->ethproto);
2780 		return 1;
2781 	}
2782 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2783 	tso_info->tcphdr = tcp_hdr(skb);
2784 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2785 	/* get pointer to the ethernet + IP + TCP header and their length */
2786 	tso_info->eit_hdr = skb->data;
2787 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2788 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2789 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2790 							tso_info->eit_hdr,
2791 							tso_info->eit_hdr_len,
2792 							DMA_TO_DEVICE);
2793 	if (unlikely(dma_mapping_error(osdev->dev,
2794 				       tso_info->eit_hdr_dma_map_addr))) {
2795 		qdf_err("DMA mapping error!");
2796 		qdf_assert(0);
2797 		return 1;
2798 	}
2799 
2800 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2801 		/* inlcude IPv4 header length for IPV4 (total length) */
2802 		tso_info->ip_tcp_hdr_len =
2803 			tso_info->eit_hdr_len - tso_info->l2_len;
2804 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2805 		/* exclude IPv6 header length for IPv6 (payload length) */
2806 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2807 	}
2808 	/*
2809 	 * The length of the payload (application layer data) is added to
2810 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2811 	 * descriptor.
2812 	 */
2813 
2814 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2815 		tso_info->tcp_seq_num,
2816 		tso_info->eit_hdr_len,
2817 		tso_info->l2_len,
2818 		skb->len);
2819 	return 0;
2820 }
2821 
2822 
2823 /**
2824  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2825  *
2826  * @curr_seg: Segment whose contents are initialized
2827  * @tso_cmn_info: Parameters common to all segements
2828  *
2829  * Return: None
2830  */
2831 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2832 				struct qdf_tso_seg_elem_t *curr_seg,
2833 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2834 {
2835 	/* Initialize the flags to 0 */
2836 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2837 
2838 	/*
2839 	 * The following fields remain the same across all segments of
2840 	 * a jumbo packet
2841 	 */
2842 	curr_seg->seg.tso_flags.tso_enable = 1;
2843 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2844 		tso_cmn_info->ipv4_csum_en;
2845 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2846 		tso_cmn_info->tcp_ipv6_csum_en;
2847 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2848 		tso_cmn_info->tcp_ipv4_csum_en;
2849 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2850 
2851 	/* The following fields change for the segments */
2852 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2853 	tso_cmn_info->ip_id++;
2854 
2855 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2856 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2857 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2858 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2859 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2860 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2861 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2862 
2863 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2864 
2865 	/*
2866 	 * First fragment for each segment always contains the ethernet,
2867 	 * IP and TCP header
2868 	 */
2869 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2870 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2871 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2872 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2873 
2874 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2875 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2876 		   tso_cmn_info->eit_hdr_len,
2877 		   curr_seg->seg.tso_flags.tcp_seq_num,
2878 		   curr_seg->seg.total_len);
2879 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2880 }
2881 
2882 /**
2883  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2884  * into segments
2885  * @nbuf: network buffer to be segmented
2886  * @tso_info: This is the output. The information about the
2887  *           TSO segments will be populated within this.
2888  *
2889  * This function fragments a TCP jumbo packet into smaller
2890  * segments to be transmitted by the driver. It chains the TSO
2891  * segments created into a list.
2892  *
2893  * Return: number of TSO segments
2894  */
2895 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2896 		struct qdf_tso_info_t *tso_info)
2897 {
2898 	/* common across all segments */
2899 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2900 	/* segment specific */
2901 	void *tso_frag_vaddr;
2902 	qdf_dma_addr_t tso_frag_paddr = 0;
2903 	uint32_t num_seg = 0;
2904 	struct qdf_tso_seg_elem_t *curr_seg;
2905 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2906 	skb_frag_t *frag = NULL;
2907 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2908 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2909 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2910 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2911 	int j = 0; /* skb fragment index */
2912 
2913 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2914 	total_num_seg = tso_info->tso_num_seg_list;
2915 	curr_seg = tso_info->tso_seg_list;
2916 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2917 
2918 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2919 						skb, &tso_cmn_info))) {
2920 		qdf_warn("TSO: error getting common segment info");
2921 		return 0;
2922 	}
2923 
2924 	/* length of the first chunk of data in the skb */
2925 	skb_frag_len = skb_headlen(skb);
2926 
2927 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2928 	/* update the remaining skb fragment length and TSO segment length */
2929 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2930 	skb_proc -= tso_cmn_info.eit_hdr_len;
2931 
2932 	/* get the address to the next tso fragment */
2933 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2934 	/* get the length of the next tso fragment */
2935 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2936 
2937 	if (tso_frag_len != 0) {
2938 		tso_frag_paddr = dma_map_single(osdev->dev,
2939 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2940 	}
2941 
2942 	if (unlikely(dma_mapping_error(osdev->dev,
2943 					tso_frag_paddr))) {
2944 		qdf_err("DMA mapping error!");
2945 		qdf_assert(0);
2946 		return 0;
2947 	}
2948 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2949 		__LINE__, skb_frag_len, tso_frag_len);
2950 	num_seg = tso_info->num_segs;
2951 	tso_info->num_segs = 0;
2952 	tso_info->is_tso = 1;
2953 
2954 	while (num_seg && curr_seg) {
2955 		int i = 1; /* tso fragment index */
2956 		uint8_t more_tso_frags = 1;
2957 
2958 		curr_seg->seg.num_frags = 0;
2959 		tso_info->num_segs++;
2960 		total_num_seg->num_seg.tso_cmn_num_seg++;
2961 
2962 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2963 						 &tso_cmn_info);
2964 
2965 		if (unlikely(skb_proc == 0))
2966 			return tso_info->num_segs;
2967 
2968 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2969 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2970 		/* frag len is added to ip_len in while loop below*/
2971 
2972 		curr_seg->seg.num_frags++;
2973 
2974 		while (more_tso_frags) {
2975 			if (tso_frag_len != 0) {
2976 				curr_seg->seg.tso_frags[i].vaddr =
2977 					tso_frag_vaddr;
2978 				curr_seg->seg.tso_frags[i].length =
2979 					tso_frag_len;
2980 				curr_seg->seg.total_len += tso_frag_len;
2981 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2982 				curr_seg->seg.num_frags++;
2983 				skb_proc = skb_proc - tso_frag_len;
2984 
2985 				/* increment the TCP sequence number */
2986 
2987 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2988 				curr_seg->seg.tso_frags[i].paddr =
2989 					tso_frag_paddr;
2990 			}
2991 
2992 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2993 					__func__, __LINE__,
2994 					i,
2995 					tso_frag_len,
2996 					curr_seg->seg.total_len,
2997 					curr_seg->seg.tso_frags[i].vaddr);
2998 
2999 			/* if there is no more data left in the skb */
3000 			if (!skb_proc)
3001 				return tso_info->num_segs;
3002 
3003 			/* get the next payload fragment information */
3004 			/* check if there are more fragments in this segment */
3005 			if (tso_frag_len < tso_seg_size) {
3006 				more_tso_frags = 1;
3007 				if (tso_frag_len != 0) {
3008 					tso_seg_size = tso_seg_size -
3009 						tso_frag_len;
3010 					i++;
3011 					if (curr_seg->seg.num_frags ==
3012 								FRAG_NUM_MAX) {
3013 						more_tso_frags = 0;
3014 						/*
3015 						 * reset i and the tso
3016 						 * payload size
3017 						 */
3018 						i = 1;
3019 						tso_seg_size =
3020 							skb_shinfo(skb)->
3021 								gso_size;
3022 					}
3023 				}
3024 			} else {
3025 				more_tso_frags = 0;
3026 				/* reset i and the tso payload size */
3027 				i = 1;
3028 				tso_seg_size = skb_shinfo(skb)->gso_size;
3029 			}
3030 
3031 			/* if the next fragment is contiguous */
3032 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3033 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3034 				skb_frag_len = skb_frag_len - tso_frag_len;
3035 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3036 
3037 			} else { /* the next fragment is not contiguous */
3038 				if (skb_shinfo(skb)->nr_frags == 0) {
3039 					qdf_info("TSO: nr_frags == 0!");
3040 					qdf_assert(0);
3041 					return 0;
3042 				}
3043 				if (j >= skb_shinfo(skb)->nr_frags) {
3044 					qdf_info("TSO: nr_frags %d j %d",
3045 						 skb_shinfo(skb)->nr_frags, j);
3046 					qdf_assert(0);
3047 					return 0;
3048 				}
3049 				frag = &skb_shinfo(skb)->frags[j];
3050 				skb_frag_len = skb_frag_size(frag);
3051 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3052 				tso_frag_vaddr = skb_frag_address_safe(frag);
3053 				j++;
3054 			}
3055 
3056 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3057 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3058 				tso_seg_size);
3059 
3060 			if (!(tso_frag_vaddr)) {
3061 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3062 						__func__);
3063 				return 0;
3064 			}
3065 
3066 			tso_frag_paddr =
3067 					 dma_map_single(osdev->dev,
3068 						 tso_frag_vaddr,
3069 						 tso_frag_len,
3070 						 DMA_TO_DEVICE);
3071 			if (unlikely(dma_mapping_error(osdev->dev,
3072 							tso_frag_paddr))) {
3073 				qdf_err("DMA mapping error!");
3074 				qdf_assert(0);
3075 				return 0;
3076 			}
3077 		}
3078 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3079 				curr_seg->seg.tso_flags.tcp_seq_num);
3080 		num_seg--;
3081 		/* if TCP FIN flag was set, set it in the last segment */
3082 		if (!num_seg)
3083 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3084 
3085 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3086 		curr_seg = curr_seg->next;
3087 	}
3088 	return tso_info->num_segs;
3089 }
3090 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3091 
3092 /**
3093  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3094  *
3095  * @osdev: qdf device handle
3096  * @tso_seg: TSO segment element to be unmapped
3097  * @is_last_seg: whether this is last tso seg or not
3098  *
3099  * Return: none
3100  */
3101 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3102 			  struct qdf_tso_seg_elem_t *tso_seg,
3103 			  bool is_last_seg)
3104 {
3105 	uint32_t num_frags = 0;
3106 
3107 	if (tso_seg->seg.num_frags > 0)
3108 		num_frags = tso_seg->seg.num_frags - 1;
3109 
3110 	/*Num of frags in a tso seg cannot be less than 2 */
3111 	if (num_frags < 1) {
3112 		/*
3113 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3114 		 * this may happen when qdf_nbuf_get_tso_info failed,
3115 		 * do dma unmap for the 0th frag in this seg.
3116 		 */
3117 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3118 			goto last_seg_free_first_frag;
3119 
3120 		qdf_assert(0);
3121 		qdf_err("ERROR: num of frags in a tso segment is %d",
3122 			(num_frags + 1));
3123 		return;
3124 	}
3125 
3126 	while (num_frags) {
3127 		/*Do dma unmap the tso seg except the 0th frag */
3128 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3129 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3130 				num_frags);
3131 			qdf_assert(0);
3132 			return;
3133 		}
3134 		dma_unmap_single(osdev->dev,
3135 				 tso_seg->seg.tso_frags[num_frags].paddr,
3136 				 tso_seg->seg.tso_frags[num_frags].length,
3137 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3138 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3139 		num_frags--;
3140 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3141 	}
3142 
3143 last_seg_free_first_frag:
3144 	if (is_last_seg) {
3145 		/*Do dma unmap for the tso seg 0th frag */
3146 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3147 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3148 			qdf_assert(0);
3149 			return;
3150 		}
3151 		dma_unmap_single(osdev->dev,
3152 				 tso_seg->seg.tso_frags[0].paddr,
3153 				 tso_seg->seg.tso_frags[0].length,
3154 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3155 		tso_seg->seg.tso_frags[0].paddr = 0;
3156 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3157 	}
3158 }
3159 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3160 
3161 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3162 {
3163 	size_t packet_len;
3164 
3165 	packet_len = skb->len -
3166 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3167 		 tcp_hdrlen(skb));
3168 
3169 	return packet_len;
3170 }
3171 
3172 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3173 
3174 /**
3175  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3176  * into segments
3177  * @nbuf:   network buffer to be segmented
3178  * @tso_info:  This is the output. The information about the
3179  *      TSO segments will be populated within this.
3180  *
3181  * This function fragments a TCP jumbo packet into smaller
3182  * segments to be transmitted by the driver. It chains the TSO
3183  * segments created into a list.
3184  *
3185  * Return: 0 - success, 1 - failure
3186  */
3187 #ifndef BUILD_X86
3188 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3189 {
3190 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3191 	uint32_t remainder, num_segs = 0;
3192 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3193 	uint8_t frags_per_tso = 0;
3194 	uint32_t skb_frag_len = 0;
3195 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3196 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3197 	skb_frag_t *frag = NULL;
3198 	int j = 0;
3199 	uint32_t temp_num_seg = 0;
3200 
3201 	/* length of the first chunk of data in the skb minus eit header*/
3202 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3203 
3204 	/* Calculate num of segs for skb's first chunk of data*/
3205 	remainder = skb_frag_len % tso_seg_size;
3206 	num_segs = skb_frag_len / tso_seg_size;
3207 	/**
3208 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3209 	 * In that case, one more tso seg is required to accommodate
3210 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3211 	 * then remaining data will be accomodated while doing the calculation
3212 	 * for nr_frags data. Hence, frags_per_tso++.
3213 	 */
3214 	if (remainder) {
3215 		if (!skb_nr_frags)
3216 			num_segs++;
3217 		else
3218 			frags_per_tso++;
3219 	}
3220 
3221 	while (skb_nr_frags) {
3222 		if (j >= skb_shinfo(skb)->nr_frags) {
3223 			qdf_info("TSO: nr_frags %d j %d",
3224 				 skb_shinfo(skb)->nr_frags, j);
3225 			qdf_assert(0);
3226 			return 0;
3227 		}
3228 		/**
3229 		 * Calculate the number of tso seg for nr_frags data:
3230 		 * Get the length of each frag in skb_frag_len, add to
3231 		 * remainder.Get the number of segments by dividing it to
3232 		 * tso_seg_size and calculate the new remainder.
3233 		 * Decrement the nr_frags value and keep
3234 		 * looping all the skb_fragments.
3235 		 */
3236 		frag = &skb_shinfo(skb)->frags[j];
3237 		skb_frag_len = skb_frag_size(frag);
3238 		temp_num_seg = num_segs;
3239 		remainder += skb_frag_len;
3240 		num_segs += remainder / tso_seg_size;
3241 		remainder = remainder % tso_seg_size;
3242 		skb_nr_frags--;
3243 		if (remainder) {
3244 			if (num_segs > temp_num_seg)
3245 				frags_per_tso = 0;
3246 			/**
3247 			 * increment the tso per frags whenever remainder is
3248 			 * positive. If frags_per_tso reaches the (max-1),
3249 			 * [First frags always have EIT header, therefore max-1]
3250 			 * increment the num_segs as no more data can be
3251 			 * accomodated in the curr tso seg. Reset the remainder
3252 			 * and frags per tso and keep looping.
3253 			 */
3254 			frags_per_tso++;
3255 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3256 				num_segs++;
3257 				frags_per_tso = 0;
3258 				remainder = 0;
3259 			}
3260 			/**
3261 			 * If this is the last skb frag and still remainder is
3262 			 * non-zero(frags_per_tso is not reached to the max-1)
3263 			 * then increment the num_segs to take care of the
3264 			 * remaining length.
3265 			 */
3266 			if (!skb_nr_frags && remainder) {
3267 				num_segs++;
3268 				frags_per_tso = 0;
3269 			}
3270 		} else {
3271 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3272 			frags_per_tso = 0;
3273 		}
3274 		j++;
3275 	}
3276 
3277 	return num_segs;
3278 }
3279 #elif !defined(QCA_WIFI_QCN9000)
3280 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3281 {
3282 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3283 	skb_frag_t *frag = NULL;
3284 
3285 	/*
3286 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3287 	 * region which cannot be accessed by Target
3288 	 */
3289 	if (virt_to_phys(skb->data) < 0x50000040) {
3290 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3291 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3292 				virt_to_phys(skb->data));
3293 		goto fail;
3294 
3295 	}
3296 
3297 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3298 		frag = &skb_shinfo(skb)->frags[i];
3299 
3300 		if (!frag)
3301 			goto fail;
3302 
3303 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3304 			goto fail;
3305 	}
3306 
3307 
3308 	gso_size = skb_shinfo(skb)->gso_size;
3309 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3310 			+ tcp_hdrlen(skb));
3311 	while (tmp_len) {
3312 		num_segs++;
3313 		if (tmp_len > gso_size)
3314 			tmp_len -= gso_size;
3315 		else
3316 			break;
3317 	}
3318 
3319 	return num_segs;
3320 
3321 	/*
3322 	 * Do not free this frame, just do socket level accounting
3323 	 * so that this is not reused.
3324 	 */
3325 fail:
3326 	if (skb->sk)
3327 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3328 
3329 	return 0;
3330 }
3331 #else
3332 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3333 {
3334 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3335 	skb_frag_t *frag = NULL;
3336 
3337 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3338 		frag = &skb_shinfo(skb)->frags[i];
3339 
3340 		if (!frag)
3341 			goto fail;
3342 	}
3343 
3344 	gso_size = skb_shinfo(skb)->gso_size;
3345 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3346 			+ tcp_hdrlen(skb));
3347 	while (tmp_len) {
3348 		num_segs++;
3349 		if (tmp_len > gso_size)
3350 			tmp_len -= gso_size;
3351 		else
3352 			break;
3353 	}
3354 
3355 	return num_segs;
3356 
3357 	/*
3358 	 * Do not free this frame, just do socket level accounting
3359 	 * so that this is not reused.
3360 	 */
3361 fail:
3362 	if (skb->sk)
3363 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3364 
3365 	return 0;
3366 }
3367 #endif
3368 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3369 
3370 #endif /* FEATURE_TSO */
3371 
3372 /**
3373  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3374  *
3375  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3376  *
3377  * Return: N/A
3378  */
3379 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3380 			  uint32_t *lo, uint32_t *hi)
3381 {
3382 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3383 		*lo = lower_32_bits(dmaaddr);
3384 		*hi = upper_32_bits(dmaaddr);
3385 	} else {
3386 		*lo = dmaaddr;
3387 		*hi = 0;
3388 	}
3389 }
3390 
3391 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3392 
3393 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3394 {
3395 	qdf_nbuf_users_inc(&skb->users);
3396 	return skb;
3397 }
3398 qdf_export_symbol(__qdf_nbuf_inc_users);
3399 
3400 int __qdf_nbuf_get_users(struct sk_buff *skb)
3401 {
3402 	return qdf_nbuf_users_read(&skb->users);
3403 }
3404 qdf_export_symbol(__qdf_nbuf_get_users);
3405 
3406 /**
3407  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3408  * @skb: sk_buff handle
3409  *
3410  * Return: none
3411  */
3412 
3413 void __qdf_nbuf_ref(struct sk_buff *skb)
3414 {
3415 	skb_get(skb);
3416 }
3417 qdf_export_symbol(__qdf_nbuf_ref);
3418 
3419 /**
3420  * __qdf_nbuf_shared() - Check whether the buffer is shared
3421  *  @skb: sk_buff buffer
3422  *
3423  *  Return: true if more than one person has a reference to this buffer.
3424  */
3425 int __qdf_nbuf_shared(struct sk_buff *skb)
3426 {
3427 	return skb_shared(skb);
3428 }
3429 qdf_export_symbol(__qdf_nbuf_shared);
3430 
3431 /**
3432  * __qdf_nbuf_dmamap_create() - create a DMA map.
3433  * @osdev: qdf device handle
3434  * @dmap: dma map handle
3435  *
3436  * This can later be used to map networking buffers. They :
3437  * - need space in adf_drv's software descriptor
3438  * - are typically created during adf_drv_create
3439  * - need to be created before any API(qdf_nbuf_map) that uses them
3440  *
3441  * Return: QDF STATUS
3442  */
3443 QDF_STATUS
3444 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3445 {
3446 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3447 	/*
3448 	 * driver can tell its SG capablity, it must be handled.
3449 	 * Bounce buffers if they are there
3450 	 */
3451 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3452 	if (!(*dmap))
3453 		error = QDF_STATUS_E_NOMEM;
3454 
3455 	return error;
3456 }
3457 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3458 /**
3459  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3460  * @osdev: qdf device handle
3461  * @dmap: dma map handle
3462  *
3463  * Return: none
3464  */
3465 void
3466 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3467 {
3468 	kfree(dmap);
3469 }
3470 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3471 
3472 /**
3473  * __qdf_nbuf_map_nbytes_single() - map nbytes
3474  * @osdev: os device
3475  * @buf: buffer
3476  * @dir: direction
3477  * @nbytes: number of bytes
3478  *
3479  * Return: QDF_STATUS
3480  */
3481 #ifdef A_SIMOS_DEVHOST
3482 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3483 		qdf_device_t osdev, struct sk_buff *buf,
3484 		 qdf_dma_dir_t dir, int nbytes)
3485 {
3486 	qdf_dma_addr_t paddr;
3487 
3488 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3489 	return QDF_STATUS_SUCCESS;
3490 }
3491 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3492 #else
3493 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3494 		qdf_device_t osdev, struct sk_buff *buf,
3495 		 qdf_dma_dir_t dir, int nbytes)
3496 {
3497 	qdf_dma_addr_t paddr;
3498 
3499 	/* assume that the OS only provides a single fragment */
3500 	QDF_NBUF_CB_PADDR(buf) = paddr =
3501 		dma_map_single(osdev->dev, buf->data,
3502 			nbytes, __qdf_dma_dir_to_os(dir));
3503 	return dma_mapping_error(osdev->dev, paddr) ?
3504 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3505 }
3506 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3507 #endif
3508 /**
3509  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3510  * @osdev: os device
3511  * @buf: buffer
3512  * @dir: direction
3513  * @nbytes: number of bytes
3514  *
3515  * Return: none
3516  */
3517 #if defined(A_SIMOS_DEVHOST)
3518 void
3519 __qdf_nbuf_unmap_nbytes_single(
3520 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3521 {
3522 }
3523 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3524 
3525 #else
3526 void
3527 __qdf_nbuf_unmap_nbytes_single(
3528 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3529 {
3530 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3531 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3532 		return;
3533 	}
3534 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3535 			nbytes, __qdf_dma_dir_to_os(dir));
3536 }
3537 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3538 #endif
3539 /**
3540  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3541  * @osdev: os device
3542  * @skb: skb handle
3543  * @dir: dma direction
3544  * @nbytes: number of bytes to be mapped
3545  *
3546  * Return: QDF_STATUS
3547  */
3548 #ifdef QDF_OS_DEBUG
3549 QDF_STATUS
3550 __qdf_nbuf_map_nbytes(
3551 	qdf_device_t osdev,
3552 	struct sk_buff *skb,
3553 	qdf_dma_dir_t dir,
3554 	int nbytes)
3555 {
3556 	struct skb_shared_info  *sh = skb_shinfo(skb);
3557 
3558 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3559 
3560 	/*
3561 	 * Assume there's only a single fragment.
3562 	 * To support multiple fragments, it would be necessary to change
3563 	 * adf_nbuf_t to be a separate object that stores meta-info
3564 	 * (including the bus address for each fragment) and a pointer
3565 	 * to the underlying sk_buff.
3566 	 */
3567 	qdf_assert(sh->nr_frags == 0);
3568 
3569 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3570 }
3571 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3572 #else
3573 QDF_STATUS
3574 __qdf_nbuf_map_nbytes(
3575 	qdf_device_t osdev,
3576 	struct sk_buff *skb,
3577 	qdf_dma_dir_t dir,
3578 	int nbytes)
3579 {
3580 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3581 }
3582 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3583 #endif
3584 /**
3585  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3586  * @osdev: OS device
3587  * @skb: skb handle
3588  * @dir: direction
3589  * @nbytes: number of bytes
3590  *
3591  * Return: none
3592  */
3593 void
3594 __qdf_nbuf_unmap_nbytes(
3595 	qdf_device_t osdev,
3596 	struct sk_buff *skb,
3597 	qdf_dma_dir_t dir,
3598 	int nbytes)
3599 {
3600 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3601 
3602 	/*
3603 	 * Assume there's a single fragment.
3604 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3605 	 */
3606 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3607 }
3608 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3609 
3610 /**
3611  * __qdf_nbuf_dma_map_info() - return the dma map info
3612  * @bmap: dma map
3613  * @sg: dma map info
3614  *
3615  * Return: none
3616  */
3617 void
3618 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3619 {
3620 	qdf_assert(bmap->mapped);
3621 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3622 
3623 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3624 			sizeof(struct __qdf_segment));
3625 	sg->nsegs = bmap->nsegs;
3626 }
3627 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3628 /**
3629  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3630  *			specified by the index
3631  * @skb: sk buff
3632  * @sg: scatter/gather list of all the frags
3633  *
3634  * Return: none
3635  */
3636 #if defined(__QDF_SUPPORT_FRAG_MEM)
3637 void
3638 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3639 {
3640 	qdf_assert(skb);
3641 	sg->sg_segs[0].vaddr = skb->data;
3642 	sg->sg_segs[0].len   = skb->len;
3643 	sg->nsegs            = 1;
3644 
3645 	for (int i = 1; i <= sh->nr_frags; i++) {
3646 		skb_frag_t    *f        = &sh->frags[i - 1];
3647 
3648 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3649 			f->page_offset);
3650 		sg->sg_segs[i].len      = f->size;
3651 
3652 		qdf_assert(i < QDF_MAX_SGLIST);
3653 	}
3654 	sg->nsegs += i;
3655 
3656 }
3657 qdf_export_symbol(__qdf_nbuf_frag_info);
3658 #else
3659 #ifdef QDF_OS_DEBUG
3660 void
3661 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3662 {
3663 
3664 	struct skb_shared_info  *sh = skb_shinfo(skb);
3665 
3666 	qdf_assert(skb);
3667 	sg->sg_segs[0].vaddr = skb->data;
3668 	sg->sg_segs[0].len   = skb->len;
3669 	sg->nsegs            = 1;
3670 
3671 	qdf_assert(sh->nr_frags == 0);
3672 }
3673 qdf_export_symbol(__qdf_nbuf_frag_info);
3674 #else
3675 void
3676 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3677 {
3678 	sg->sg_segs[0].vaddr = skb->data;
3679 	sg->sg_segs[0].len   = skb->len;
3680 	sg->nsegs            = 1;
3681 }
3682 qdf_export_symbol(__qdf_nbuf_frag_info);
3683 #endif
3684 #endif
3685 /**
3686  * __qdf_nbuf_get_frag_size() - get frag size
3687  * @nbuf: sk buffer
3688  * @cur_frag: current frag
3689  *
3690  * Return: frag size
3691  */
3692 uint32_t
3693 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3694 {
3695 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3696 	const skb_frag_t *frag = sh->frags + cur_frag;
3697 
3698 	return skb_frag_size(frag);
3699 }
3700 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3701 
3702 /**
3703  * __qdf_nbuf_frag_map() - dma map frag
3704  * @osdev: os device
3705  * @nbuf: sk buff
3706  * @offset: offset
3707  * @dir: direction
3708  * @cur_frag: current fragment
3709  *
3710  * Return: QDF status
3711  */
3712 #ifdef A_SIMOS_DEVHOST
3713 QDF_STATUS __qdf_nbuf_frag_map(
3714 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3715 	int offset, qdf_dma_dir_t dir, int cur_frag)
3716 {
3717 	int32_t paddr, frag_len;
3718 
3719 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3720 	return QDF_STATUS_SUCCESS;
3721 }
3722 qdf_export_symbol(__qdf_nbuf_frag_map);
3723 #else
3724 QDF_STATUS __qdf_nbuf_frag_map(
3725 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3726 	int offset, qdf_dma_dir_t dir, int cur_frag)
3727 {
3728 	dma_addr_t paddr, frag_len;
3729 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3730 	const skb_frag_t *frag = sh->frags + cur_frag;
3731 
3732 	frag_len = skb_frag_size(frag);
3733 
3734 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3735 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3736 					__qdf_dma_dir_to_os(dir));
3737 	return dma_mapping_error(osdev->dev, paddr) ?
3738 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3739 }
3740 qdf_export_symbol(__qdf_nbuf_frag_map);
3741 #endif
3742 /**
3743  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3744  * @dmap: dma map
3745  * @cb: callback
3746  * @arg: argument
3747  *
3748  * Return: none
3749  */
3750 void
3751 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3752 {
3753 	return;
3754 }
3755 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3756 
3757 
3758 /**
3759  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3760  * @osdev: os device
3761  * @buf: sk buff
3762  * @dir: direction
3763  *
3764  * Return: none
3765  */
3766 #if defined(A_SIMOS_DEVHOST)
3767 static void __qdf_nbuf_sync_single_for_cpu(
3768 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3769 {
3770 	return;
3771 }
3772 #else
3773 static void __qdf_nbuf_sync_single_for_cpu(
3774 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3775 {
3776 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3777 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3778 		return;
3779 	}
3780 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3781 		skb_end_offset(buf) - skb_headroom(buf),
3782 		__qdf_dma_dir_to_os(dir));
3783 }
3784 #endif
3785 /**
3786  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3787  * @osdev: os device
3788  * @skb: sk buff
3789  * @dir: direction
3790  *
3791  * Return: none
3792  */
3793 void
3794 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3795 	struct sk_buff *skb, qdf_dma_dir_t dir)
3796 {
3797 	qdf_assert(
3798 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3799 
3800 	/*
3801 	 * Assume there's a single fragment.
3802 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3803 	 */
3804 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3805 }
3806 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3807 
3808 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3809 /**
3810  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3811  * @rx_status: Pointer to rx_status.
3812  * @rtap_buf: Buf to which VHT info has to be updated.
3813  * @rtap_len: Current length of radiotap buffer
3814  *
3815  * Return: Length of radiotap after VHT flags updated.
3816  */
3817 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3818 					struct mon_rx_status *rx_status,
3819 					int8_t *rtap_buf,
3820 					uint32_t rtap_len)
3821 {
3822 	uint16_t vht_flags = 0;
3823 
3824 	rtap_len = qdf_align(rtap_len, 2);
3825 
3826 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3827 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3828 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3829 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3830 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3831 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3832 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3833 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3834 	rtap_len += 2;
3835 
3836 	rtap_buf[rtap_len] |=
3837 		(rx_status->is_stbc ?
3838 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3839 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3840 		(rx_status->ldpc ?
3841 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3842 		(rx_status->beamformed ?
3843 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3844 	rtap_len += 1;
3845 	switch (rx_status->vht_flag_values2) {
3846 	case IEEE80211_RADIOTAP_VHT_BW_20:
3847 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3848 		break;
3849 	case IEEE80211_RADIOTAP_VHT_BW_40:
3850 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3851 		break;
3852 	case IEEE80211_RADIOTAP_VHT_BW_80:
3853 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3854 		break;
3855 	case IEEE80211_RADIOTAP_VHT_BW_160:
3856 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3857 		break;
3858 	}
3859 	rtap_len += 1;
3860 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3861 	rtap_len += 1;
3862 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3863 	rtap_len += 1;
3864 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3865 	rtap_len += 1;
3866 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3867 	rtap_len += 1;
3868 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3869 	rtap_len += 1;
3870 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3871 	rtap_len += 1;
3872 	put_unaligned_le16(rx_status->vht_flag_values6,
3873 			   &rtap_buf[rtap_len]);
3874 	rtap_len += 2;
3875 
3876 	return rtap_len;
3877 }
3878 
3879 /**
3880  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3881  * @rx_status: Pointer to rx_status.
3882  * @rtap_buf: buffer to which radiotap has to be updated
3883  * @rtap_len: radiotap length
3884  *
3885  * API update high-efficiency (11ax) fields in the radiotap header
3886  *
3887  * Return: length of rtap_len updated.
3888  */
3889 static unsigned int
3890 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3891 				     int8_t *rtap_buf, uint32_t rtap_len)
3892 {
3893 	/*
3894 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3895 	 * Enable all "known" HE radiotap flags for now
3896 	 */
3897 	rtap_len = qdf_align(rtap_len, 2);
3898 
3899 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3900 	rtap_len += 2;
3901 
3902 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3903 	rtap_len += 2;
3904 
3905 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3906 	rtap_len += 2;
3907 
3908 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3909 	rtap_len += 2;
3910 
3911 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3912 	rtap_len += 2;
3913 
3914 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3915 	rtap_len += 2;
3916 	qdf_debug("he data %x %x %x %x %x %x",
3917 		  rx_status->he_data1,
3918 		  rx_status->he_data2, rx_status->he_data3,
3919 		  rx_status->he_data4, rx_status->he_data5,
3920 		  rx_status->he_data6);
3921 	return rtap_len;
3922 }
3923 
3924 
3925 /**
3926  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3927  * @rx_status: Pointer to rx_status.
3928  * @rtap_buf: buffer to which radiotap has to be updated
3929  * @rtap_len: radiotap length
3930  *
3931  * API update HE-MU fields in the radiotap header
3932  *
3933  * Return: length of rtap_len updated.
3934  */
3935 static unsigned int
3936 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3937 				     int8_t *rtap_buf, uint32_t rtap_len)
3938 {
3939 	rtap_len = qdf_align(rtap_len, 2);
3940 
3941 	/*
3942 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3943 	 * Enable all "known" he-mu radiotap flags for now
3944 	 */
3945 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3946 	rtap_len += 2;
3947 
3948 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3949 	rtap_len += 2;
3950 
3951 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3952 	rtap_len += 1;
3953 
3954 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3955 	rtap_len += 1;
3956 
3957 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3958 	rtap_len += 1;
3959 
3960 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3961 	rtap_len += 1;
3962 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3963 		  rx_status->he_flags1,
3964 		  rx_status->he_flags2, rx_status->he_RU[0],
3965 		  rx_status->he_RU[1], rx_status->he_RU[2],
3966 		  rx_status->he_RU[3]);
3967 
3968 	return rtap_len;
3969 }
3970 
3971 /**
3972  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3973  * @rx_status: Pointer to rx_status.
3974  * @rtap_buf: buffer to which radiotap has to be updated
3975  * @rtap_len: radiotap length
3976  *
3977  * API update he-mu-other fields in the radiotap header
3978  *
3979  * Return: length of rtap_len updated.
3980  */
3981 static unsigned int
3982 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3983 				     int8_t *rtap_buf, uint32_t rtap_len)
3984 {
3985 	rtap_len = qdf_align(rtap_len, 2);
3986 
3987 	/*
3988 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3989 	 * Enable all "known" he-mu-other radiotap flags for now
3990 	 */
3991 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3992 	rtap_len += 2;
3993 
3994 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3995 	rtap_len += 2;
3996 
3997 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3998 	rtap_len += 1;
3999 
4000 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4001 	rtap_len += 1;
4002 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4003 		  rx_status->he_per_user_1,
4004 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4005 		  rx_status->he_per_user_known);
4006 	return rtap_len;
4007 }
4008 
4009 
4010 /**
4011  * This is the length for radiotap, combined length
4012  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
4013  * cannot be more than available headroom_sz.
4014  * increase this when we add more radiotap elements.
4015  * Number after '+' indicates maximum possible increase due to alignment
4016  */
4017 
4018 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
4019 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4020 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4021 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4022 #define RADIOTAP_FIXED_HEADER_LEN 17
4023 #define RADIOTAP_HT_FLAGS_LEN 3
4024 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4025 #define RADIOTAP_VENDOR_NS_LEN \
4026 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4027 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4028 				RADIOTAP_FIXED_HEADER_LEN + \
4029 				RADIOTAP_HT_FLAGS_LEN + \
4030 				RADIOTAP_VHT_FLAGS_LEN + \
4031 				RADIOTAP_AMPDU_STATUS_LEN + \
4032 				RADIOTAP_HE_FLAGS_LEN + \
4033 				RADIOTAP_HE_MU_FLAGS_LEN + \
4034 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4035 				RADIOTAP_VENDOR_NS_LEN)
4036 
4037 #define IEEE80211_RADIOTAP_HE 23
4038 #define IEEE80211_RADIOTAP_HE_MU	24
4039 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4040 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4041 
4042 /**
4043  * radiotap_num_to_freq() - Get frequency from chan number
4044  * @chan_num - Input channel number
4045  *
4046  * Return - Channel frequency in Mhz
4047  */
4048 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
4049 {
4050 	if (chan_num == CHANNEL_NUM_14)
4051 		return CHANNEL_FREQ_2484;
4052 	if (chan_num < CHANNEL_NUM_14)
4053 		return CHANNEL_FREQ_2407 +
4054 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4055 
4056 	if (chan_num < CHANNEL_NUM_27)
4057 		return CHANNEL_FREQ_2512 +
4058 			((chan_num - CHANNEL_NUM_15) *
4059 			 FREQ_MULTIPLIER_CONST_20MHZ);
4060 
4061 	if (chan_num > CHANNEL_NUM_182 &&
4062 			chan_num < CHANNEL_NUM_197)
4063 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
4064 			CHANNEL_FREQ_4000);
4065 
4066 	return CHANNEL_FREQ_5000 +
4067 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4068 }
4069 
4070 /**
4071  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4072  * @rx_status: Pointer to rx_status.
4073  * @rtap_buf: Buf to which AMPDU info has to be updated.
4074  * @rtap_len: Current length of radiotap buffer
4075  *
4076  * Return: Length of radiotap after AMPDU flags updated.
4077  */
4078 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4079 					struct mon_rx_status *rx_status,
4080 					uint8_t *rtap_buf,
4081 					uint32_t rtap_len)
4082 {
4083 	/*
4084 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4085 	 * First 32 bits of AMPDU represents the reference number
4086 	 */
4087 
4088 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4089 	uint16_t ampdu_flags = 0;
4090 	uint16_t ampdu_reserved_flags = 0;
4091 
4092 	rtap_len = qdf_align(rtap_len, 4);
4093 
4094 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4095 	rtap_len += 4;
4096 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4097 	rtap_len += 2;
4098 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4099 	rtap_len += 2;
4100 
4101 	return rtap_len;
4102 }
4103 
4104 /**
4105  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4106  * @rx_status: Pointer to rx_status.
4107  * @nbuf:      nbuf pointer to which radiotap has to be updated
4108  * @headroom_sz: Available headroom size.
4109  *
4110  * Return: length of rtap_len updated.
4111  */
4112 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4113 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4114 {
4115 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4116 	struct ieee80211_radiotap_header *rthdr =
4117 		(struct ieee80211_radiotap_header *)rtap_buf;
4118 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4119 	uint32_t rtap_len = rtap_hdr_len;
4120 	uint8_t length = rtap_len;
4121 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4122 
4123 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4124 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4125 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4126 	rtap_len += 8;
4127 
4128 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4129 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4130 
4131 	if (rx_status->rs_fcs_err)
4132 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4133 
4134 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4135 	rtap_len += 1;
4136 
4137 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4138 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4139 	    !rx_status->he_flags) {
4140 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4141 		rtap_buf[rtap_len] = rx_status->rate;
4142 	} else
4143 		rtap_buf[rtap_len] = 0;
4144 	rtap_len += 1;
4145 
4146 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4147 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4148 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4149 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4150 	rtap_len += 2;
4151 	/* Channel flags. */
4152 	if (rx_status->chan_num > CHANNEL_NUM_35)
4153 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4154 	else
4155 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4156 	if (rx_status->cck_flag)
4157 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4158 	if (rx_status->ofdm_flag)
4159 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4160 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4161 	rtap_len += 2;
4162 
4163 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4164 	 *					(dBm)
4165 	 */
4166 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4167 	/*
4168 	 * rssi_comb is int dB, need to convert it to dBm.
4169 	 * normalize value to noise floor of -96 dBm
4170 	 */
4171 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4172 	rtap_len += 1;
4173 
4174 	/* RX signal noise floor */
4175 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4176 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4177 	rtap_len += 1;
4178 
4179 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4180 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4181 	rtap_buf[rtap_len] = rx_status->nr_ant;
4182 	rtap_len += 1;
4183 
4184 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4185 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4186 		return 0;
4187 	}
4188 
4189 	if (rx_status->ht_flags) {
4190 		length = rtap_len;
4191 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4192 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4193 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4194 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4195 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4196 		rtap_len += 1;
4197 
4198 		if (rx_status->sgi)
4199 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4200 		if (rx_status->bw)
4201 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4202 		else
4203 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4204 		rtap_len += 1;
4205 
4206 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4207 		rtap_len += 1;
4208 
4209 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4210 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4211 			return 0;
4212 		}
4213 	}
4214 
4215 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4216 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4217 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4218 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4219 								rtap_buf,
4220 								rtap_len);
4221 	}
4222 
4223 	if (rx_status->vht_flags) {
4224 		length = rtap_len;
4225 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4226 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4227 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4228 								rtap_buf,
4229 								rtap_len);
4230 
4231 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4232 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4233 			return 0;
4234 		}
4235 	}
4236 
4237 	if (rx_status->he_flags) {
4238 		length = rtap_len;
4239 		/* IEEE80211_RADIOTAP_HE */
4240 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4241 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4242 								rtap_buf,
4243 								rtap_len);
4244 
4245 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4246 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4247 			return 0;
4248 		}
4249 	}
4250 
4251 	if (rx_status->he_mu_flags) {
4252 		length = rtap_len;
4253 		/* IEEE80211_RADIOTAP_HE-MU */
4254 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4255 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4256 								rtap_buf,
4257 								rtap_len);
4258 
4259 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4260 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4261 			return 0;
4262 		}
4263 	}
4264 
4265 	if (rx_status->he_mu_other_flags) {
4266 		length = rtap_len;
4267 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4268 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4269 		rtap_len =
4270 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4271 								rtap_buf,
4272 								rtap_len);
4273 
4274 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4275 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4276 			return 0;
4277 		}
4278 	}
4279 
4280 	rtap_len = qdf_align(rtap_len, 2);
4281 	/*
4282 	 * Radiotap Vendor Namespace
4283 	 */
4284 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4285 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4286 					(rtap_buf + rtap_len);
4287 	/*
4288 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4289 	 */
4290 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4291 	/*
4292 	 * Name space selector = 0
4293 	 * We only will have one namespace for now
4294 	 */
4295 	radiotap_vendor_ns_ath->hdr.selector = 0;
4296 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4297 					sizeof(*radiotap_vendor_ns_ath) -
4298 					sizeof(radiotap_vendor_ns_ath->hdr));
4299 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4300 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4301 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4302 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4303 				cpu_to_le32(rx_status->ppdu_timestamp);
4304 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4305 
4306 	rthdr->it_len = cpu_to_le16(rtap_len);
4307 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4308 
4309 	if (headroom_sz < rtap_len) {
4310 		qdf_err("ERROR: not enough space to update radiotap");
4311 		return 0;
4312 	}
4313 	qdf_nbuf_push_head(nbuf, rtap_len);
4314 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4315 	return rtap_len;
4316 }
4317 #else
4318 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4319 					struct mon_rx_status *rx_status,
4320 					int8_t *rtap_buf,
4321 					uint32_t rtap_len)
4322 {
4323 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4324 	return 0;
4325 }
4326 
4327 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4328 				      int8_t *rtap_buf, uint32_t rtap_len)
4329 {
4330 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4331 	return 0;
4332 }
4333 
4334 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4335 					struct mon_rx_status *rx_status,
4336 					uint8_t *rtap_buf,
4337 					uint32_t rtap_len)
4338 {
4339 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4340 	return 0;
4341 }
4342 
4343 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4344 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4345 {
4346 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4347 	return 0;
4348 }
4349 #endif
4350 qdf_export_symbol(qdf_nbuf_update_radiotap);
4351 
4352 /**
4353  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4354  * @cb_func_ptr: function pointer to the nbuf free callback
4355  *
4356  * This function registers a callback function for nbuf free.
4357  *
4358  * Return: none
4359  */
4360 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4361 {
4362 	nbuf_free_cb = cb_func_ptr;
4363 }
4364 
4365 /**
4366  * qdf_nbuf_classify_pkt() - classify packet
4367  * @skb - sk buff
4368  *
4369  * Return: none
4370  */
4371 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4372 {
4373 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4374 
4375 	/* check destination mac address is broadcast/multicast */
4376 	if (is_broadcast_ether_addr((uint8_t *)eh))
4377 		QDF_NBUF_CB_SET_BCAST(skb);
4378 	else if (is_multicast_ether_addr((uint8_t *)eh))
4379 		QDF_NBUF_CB_SET_MCAST(skb);
4380 
4381 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4382 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4383 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4384 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4385 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4386 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4387 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4388 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4389 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4390 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4391 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4392 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4393 }
4394 qdf_export_symbol(qdf_nbuf_classify_pkt);
4395 
4396 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4397 {
4398 	qdf_nbuf_users_set(&nbuf->users, 1);
4399 	nbuf->data = nbuf->head + NET_SKB_PAD;
4400 	skb_reset_tail_pointer(nbuf);
4401 }
4402 qdf_export_symbol(__qdf_nbuf_init);
4403 
4404 #ifdef WLAN_FEATURE_FASTPATH
4405 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4406 {
4407 	qdf_nbuf_users_set(&nbuf->users, 1);
4408 	nbuf->data = nbuf->head + NET_SKB_PAD;
4409 	skb_reset_tail_pointer(nbuf);
4410 }
4411 qdf_export_symbol(qdf_nbuf_init_fast);
4412 #endif /* WLAN_FEATURE_FASTPATH */
4413 
4414 
4415 #ifdef QDF_NBUF_GLOBAL_COUNT
4416 /**
4417  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4418  *
4419  * Return void
4420  */
4421 void __qdf_nbuf_mod_init(void)
4422 {
4423 	qdf_atomic_init(&nbuf_count);
4424 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4425 }
4426 
4427 /**
4428  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4429  *
4430  * Return void
4431  */
4432 void __qdf_nbuf_mod_exit(void)
4433 {
4434 }
4435 #endif
4436