xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 #include "qdf_str.h"
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* channel number to freq conversion */
78 #define CHANNEL_NUM_14 14
79 #define CHANNEL_NUM_15 15
80 #define CHANNEL_NUM_27 27
81 #define CHANNEL_NUM_35 35
82 #define CHANNEL_NUM_182 182
83 #define CHANNEL_NUM_197 197
84 #define CHANNEL_FREQ_2484 2484
85 #define CHANNEL_FREQ_2407 2407
86 #define CHANNEL_FREQ_2512 2512
87 #define CHANNEL_FREQ_5000 5000
88 #define CHANNEL_FREQ_4000 4000
89 #define FREQ_MULTIPLIER_CONST_5MHZ 5
90 #define FREQ_MULTIPLIER_CONST_20MHZ 20
91 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
92 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
93 #define RADIOTAP_CCK_CHANNEL 0x0020
94 #define RADIOTAP_OFDM_CHANNEL 0x0040
95 
96 #ifdef CONFIG_MCL
97 #include <qdf_mc_timer.h>
98 
99 struct qdf_track_timer {
100 	qdf_mc_timer_t track_timer;
101 	qdf_atomic_t alloc_fail_cnt;
102 };
103 
104 static struct qdf_track_timer alloc_track_timer;
105 
106 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
107 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
108 #endif
109 
110 /* Packet Counter */
111 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
112 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
113 #ifdef QDF_NBUF_GLOBAL_COUNT
114 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
115 static qdf_atomic_t nbuf_count;
116 #endif
117 
118 /**
119  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
120  *
121  * Return: none
122  */
123 void qdf_nbuf_tx_desc_count_display(void)
124 {
125 	qdf_debug("Current Snapshot of the Driver:");
126 	qdf_debug("Data Packets:");
127 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
128 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
129 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
138 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
146 	qdf_debug("Mgmt Packets:");
147 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
148 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
149 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
161 }
162 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
163 
164 /**
165  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
166  * @packet_type   : packet type either mgmt/data
167  * @current_state : layer at which the packet currently present
168  *
169  * Return: none
170  */
171 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
172 			uint8_t current_state)
173 {
174 	switch (packet_type) {
175 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
176 		nbuf_tx_mgmt[current_state]++;
177 		break;
178 	case QDF_NBUF_TX_PKT_DATA_TRACK:
179 		nbuf_tx_data[current_state]++;
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
186 
187 /**
188  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_clear(void)
193 {
194 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
195 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
196 }
197 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
198 
199 /**
200  * qdf_nbuf_set_state() - Updates the packet state
201  * @nbuf:            network buffer
202  * @current_state :  layer at which the packet currently is
203  *
204  * This function updates the packet state to the layer at which the packet
205  * currently is
206  *
207  * Return: none
208  */
209 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
210 {
211 	/*
212 	 * Only Mgmt, Data Packets are tracked. WMI messages
213 	 * such as scan commands are not tracked
214 	 */
215 	uint8_t packet_type;
216 
217 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
218 
219 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
220 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
221 		return;
222 	}
223 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
224 	qdf_nbuf_tx_desc_count_update(packet_type,
225 					current_state);
226 }
227 qdf_export_symbol(qdf_nbuf_set_state);
228 
229 #ifdef CONFIG_MCL
230 /**
231  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
232  *
233  * This function starts the alloc fail replenish timer.
234  *
235  * Return: void
236  */
237 static void __qdf_nbuf_start_replenish_timer(void)
238 {
239 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
240 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
241 	    QDF_TIMER_STATE_RUNNING)
242 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
243 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
244 }
245 
246 /**
247  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
248  *
249  * This function stops the alloc fail replenish timer.
250  *
251  * Return: void
252  */
253 static void __qdf_nbuf_stop_replenish_timer(void)
254 {
255 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
256 		return;
257 
258 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
259 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
260 	    QDF_TIMER_STATE_RUNNING)
261 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
262 }
263 
264 /**
265  * qdf_replenish_expire_handler - Replenish expire handler
266  *
267  * This function triggers when the alloc fail replenish timer expires.
268  *
269  * Return: void
270  */
271 static void qdf_replenish_expire_handler(void *arg)
272 {
273 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
274 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
275 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
276 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
277 
278 		/* Error handling here */
279 	}
280 }
281 
282 /**
283  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
284  *
285  * This function initializes the nbuf alloc fail replenish timer.
286  *
287  * Return: void
288  */
289 void __qdf_nbuf_init_replenish_timer(void)
290 {
291 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
292 			  qdf_replenish_expire_handler, NULL);
293 }
294 
295 /**
296  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
297  *
298  * This function deinitializes the nbuf alloc fail replenish timer.
299  *
300  * Return: void
301  */
302 void __qdf_nbuf_deinit_replenish_timer(void)
303 {
304 	__qdf_nbuf_stop_replenish_timer();
305 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
306 }
307 #else
308 
309 static inline void __qdf_nbuf_start_replenish_timer(void) {}
310 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
311 #endif
312 
313 /* globals do not need to be initialized to NULL/0 */
314 qdf_nbuf_trace_update_t qdf_trace_update_cb;
315 qdf_nbuf_free_t nbuf_free_cb;
316 
317 #ifdef QDF_NBUF_GLOBAL_COUNT
318 
319 /**
320  * __qdf_nbuf_count_get() - get nbuf global count
321  *
322  * Return: nbuf global count
323  */
324 int __qdf_nbuf_count_get(void)
325 {
326 	return qdf_atomic_read(&nbuf_count);
327 }
328 qdf_export_symbol(__qdf_nbuf_count_get);
329 
330 /**
331  * __qdf_nbuf_count_inc() - increment nbuf global count
332  *
333  * @buf: sk buff
334  *
335  * Return: void
336  */
337 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
338 {
339 	qdf_atomic_inc(&nbuf_count);
340 }
341 qdf_export_symbol(__qdf_nbuf_count_inc);
342 
343 /**
344  * __qdf_nbuf_count_dec() - decrement nbuf global count
345  *
346  * @buf: sk buff
347  *
348  * Return: void
349  */
350 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
351 {
352 	qdf_atomic_dec(&nbuf_count);
353 }
354 qdf_export_symbol(__qdf_nbuf_count_dec);
355 #endif
356 
357 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
358 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
359 				 int align, int prio, const char *func,
360 				 uint32_t line)
361 {
362 	struct sk_buff *skb;
363 	unsigned long offset;
364 	uint32_t lowmem_alloc_tries = 0;
365 
366 	if (align)
367 		size += (align - 1);
368 
369 realloc:
370 	skb = dev_alloc_skb(size);
371 
372 	if (skb)
373 		goto skb_alloc;
374 
375 	skb = pld_nbuf_pre_alloc(size);
376 
377 	if (!skb) {
378 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
379 				size, func, line);
380 		return NULL;
381 	}
382 
383 skb_alloc:
384 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
385 	 * Though we are trying to reserve low memory upfront to prevent this,
386 	 * we sometimes see SKBs allocated from low memory.
387 	 */
388 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
389 		lowmem_alloc_tries++;
390 		if (lowmem_alloc_tries > 100) {
391 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
392 				     size, func, line);
393 			return NULL;
394 		} else {
395 			/* Not freeing to make sure it
396 			 * will not get allocated again
397 			 */
398 			goto realloc;
399 		}
400 	}
401 	memset(skb->cb, 0x0, sizeof(skb->cb));
402 
403 	/*
404 	 * The default is for netbuf fragments to be interpreted
405 	 * as wordstreams rather than bytestreams.
406 	 */
407 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
408 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
409 
410 	/*
411 	 * XXX:how about we reserve first then align
412 	 * Align & make sure that the tail & data are adjusted properly
413 	 */
414 
415 	if (align) {
416 		offset = ((unsigned long)skb->data) % align;
417 		if (offset)
418 			skb_reserve(skb, align - offset);
419 	}
420 
421 	/*
422 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
423 	 * pointer
424 	 */
425 	skb_reserve(skb, reserve);
426 	qdf_nbuf_count_inc(skb);
427 
428 	return skb;
429 }
430 #else
431 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
432 				 int align, int prio, const char *func,
433 				 uint32_t line)
434 {
435 	struct sk_buff *skb;
436 	unsigned long offset;
437 	int flags = GFP_KERNEL;
438 
439 	if (align)
440 		size += (align - 1);
441 
442 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
443 		flags = GFP_ATOMIC;
444 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
445 		/*
446 		 * Observed that kcompactd burns out CPU to make order-3 page.
447 		 *__netdev_alloc_skb has 4k page fallback option just in case of
448 		 * failing high order page allocation so we don't need to be
449 		 * hard. Make kcompactd rest in piece.
450 		 */
451 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
452 #endif
453 	}
454 
455 	skb = __netdev_alloc_skb(NULL, size, flags);
456 
457 	if (skb)
458 		goto skb_alloc;
459 
460 	skb = pld_nbuf_pre_alloc(size);
461 
462 	if (!skb) {
463 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
464 				size, func, line);
465 		__qdf_nbuf_start_replenish_timer();
466 		return NULL;
467 	} else {
468 		__qdf_nbuf_stop_replenish_timer();
469 	}
470 
471 skb_alloc:
472 	memset(skb->cb, 0x0, sizeof(skb->cb));
473 
474 	/*
475 	 * The default is for netbuf fragments to be interpreted
476 	 * as wordstreams rather than bytestreams.
477 	 */
478 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
479 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
480 
481 	/*
482 	 * XXX:how about we reserve first then align
483 	 * Align & make sure that the tail & data are adjusted properly
484 	 */
485 
486 	if (align) {
487 		offset = ((unsigned long)skb->data) % align;
488 		if (offset)
489 			skb_reserve(skb, align - offset);
490 	}
491 
492 	/*
493 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
494 	 * pointer
495 	 */
496 	skb_reserve(skb, reserve);
497 	qdf_nbuf_count_inc(skb);
498 
499 	return skb;
500 }
501 #endif
502 qdf_export_symbol(__qdf_nbuf_alloc);
503 
504 /**
505  * __qdf_nbuf_free() - free the nbuf its interrupt safe
506  * @skb: Pointer to network buffer
507  *
508  * Return: none
509  */
510 
511 #ifdef CONFIG_MCL
512 void __qdf_nbuf_free(struct sk_buff *skb)
513 {
514 	if (pld_nbuf_pre_alloc_free(skb))
515 		return;
516 
517 	qdf_nbuf_count_dec(skb);
518 	if (nbuf_free_cb)
519 		nbuf_free_cb(skb);
520 	else
521 		dev_kfree_skb_any(skb);
522 }
523 #else
524 void __qdf_nbuf_free(struct sk_buff *skb)
525 {
526 	if (pld_nbuf_pre_alloc_free(skb))
527 		return;
528 
529 	qdf_nbuf_count_dec(skb);
530 	dev_kfree_skb_any(skb);
531 }
532 #endif
533 
534 qdf_export_symbol(__qdf_nbuf_free);
535 
536 #ifdef NBUF_MEMORY_DEBUG
537 enum qdf_nbuf_event_type {
538 	QDF_NBUF_ALLOC,
539 	QDF_NBUF_ALLOC_CLONE,
540 	QDF_NBUF_ALLOC_COPY,
541 	QDF_NBUF_ALLOC_FAILURE,
542 	QDF_NBUF_FREE,
543 	QDF_NBUF_MAP,
544 	QDF_NBUF_UNMAP,
545 };
546 
547 struct qdf_nbuf_event {
548 	qdf_nbuf_t nbuf;
549 	char func[QDF_MEM_FUNC_NAME_SIZE];
550 	uint32_t line;
551 	enum qdf_nbuf_event_type type;
552 	uint64_t timestamp;
553 };
554 
555 #define QDF_NBUF_HISTORY_SIZE 4096
556 static qdf_atomic_t qdf_nbuf_history_index;
557 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
558 
559 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
560 {
561 	int32_t next = qdf_atomic_inc_return(index);
562 
563 	if (next == size)
564 		qdf_atomic_sub(size, index);
565 
566 	return next % size;
567 }
568 
569 static void
570 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
571 		     enum qdf_nbuf_event_type type)
572 {
573 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
574 						   QDF_NBUF_HISTORY_SIZE);
575 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
576 
577 	event->nbuf = nbuf;
578 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
579 	event->line = line;
580 	event->type = type;
581 	event->timestamp = qdf_get_log_timestamp();
582 }
583 #endif /* NBUF_MEMORY_DEBUG */
584 
585 #ifdef NBUF_MAP_UNMAP_DEBUG
586 struct qdf_nbuf_map_metadata {
587 	struct hlist_node node;
588 	qdf_nbuf_t nbuf;
589 	char func[QDF_MEM_FUNC_NAME_SIZE];
590 	uint32_t line;
591 };
592 
593 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
594 			 sizeof(struct qdf_nbuf_map_metadata), 0);
595 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
596 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
597 static qdf_spinlock_t qdf_nbuf_map_lock;
598 
599 static void qdf_nbuf_map_tracking_init(void)
600 {
601 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
602 	hash_init(qdf_nbuf_map_ht);
603 	qdf_spinlock_create(&qdf_nbuf_map_lock);
604 }
605 
606 static void qdf_nbuf_map_leaks_print(void)
607 {
608 	struct qdf_nbuf_map_metadata *meta;
609 	int bucket;
610 	uint32_t count = 0;
611 
612 	QDF_BUG(qdf_spin_is_locked(&qdf_nbuf_map_lock));
613 
614 	qdf_nofl_alert("Nbuf map-no-unmap events detected!");
615 	qdf_nofl_alert("-----------------------------------------------------");
616 
617 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
618 		count++;
619 		qdf_nofl_alert("0x%zx @ %s:%u",
620 			       (uintptr_t)meta->nbuf, meta->func, meta->line);
621 	}
622 
623 	QDF_DEBUG_PANIC("%u fatal nbuf map-no-unmap events detected!", count);
624 }
625 
626 void qdf_nbuf_map_check_for_leaks(void)
627 {
628 	qdf_flex_mem_release(&qdf_nbuf_map_pool);
629 
630 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
631 	if (!hash_empty(qdf_nbuf_map_ht))
632 		qdf_nbuf_map_leaks_print();
633 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
634 }
635 
636 static void qdf_nbuf_map_tracking_deinit(void)
637 {
638 	qdf_nbuf_map_check_for_leaks();
639 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
640 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
641 }
642 
643 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
644 {
645 	struct qdf_nbuf_map_metadata *meta;
646 
647 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
648 		if (meta->nbuf == nbuf)
649 			return meta;
650 	}
651 
652 	return NULL;
653 }
654 
655 static QDF_STATUS
656 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
657 {
658 	struct qdf_nbuf_map_metadata *meta;
659 
660 	QDF_BUG(nbuf);
661 	if (!nbuf) {
662 		qdf_err("Cannot map null nbuf");
663 		return QDF_STATUS_E_INVAL;
664 	}
665 
666 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
667 	meta = qdf_nbuf_meta_get(nbuf);
668 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
669 	if (meta)
670 		QDF_DEBUG_PANIC(
671 			"Double nbuf map detected @ %s:%u; last map from %s:%u",
672 			func, line, meta->func, meta->line);
673 
674 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
675 	if (!meta) {
676 		qdf_err("Failed to allocate nbuf map tracking metadata");
677 		return QDF_STATUS_E_NOMEM;
678 	}
679 
680 	meta->nbuf = nbuf;
681 	qdf_str_lcopy(meta->func, func, QDF_MEM_FUNC_NAME_SIZE);
682 	meta->line = line;
683 
684 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
685 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
686 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
687 
688 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
689 
690 	return QDF_STATUS_SUCCESS;
691 }
692 
693 static void
694 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
695 {
696 	struct qdf_nbuf_map_metadata *meta;
697 
698 	QDF_BUG(nbuf);
699 	if (!nbuf) {
700 		qdf_err("Cannot unmap null nbuf");
701 		return;
702 	}
703 
704 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
705 	meta = qdf_nbuf_meta_get(nbuf);
706 
707 	if (!meta)
708 		QDF_DEBUG_PANIC(
709 		      "Double nbuf unmap or unmap without map detected @ %s:%u",
710 		      func, line);
711 
712 	hash_del(&meta->node);
713 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
714 
715 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
716 
717 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
718 }
719 
720 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
721 			      qdf_nbuf_t buf,
722 			      qdf_dma_dir_t dir,
723 			      const char *func,
724 			      uint32_t line)
725 {
726 	QDF_STATUS status;
727 
728 	status = qdf_nbuf_track_map(buf, func, line);
729 	if (QDF_IS_STATUS_ERROR(status))
730 		return status;
731 
732 	status = __qdf_nbuf_map(osdev, buf, dir);
733 	if (QDF_IS_STATUS_ERROR(status))
734 		qdf_nbuf_untrack_map(buf, func, line);
735 
736 	return status;
737 }
738 
739 qdf_export_symbol(qdf_nbuf_map_debug);
740 
741 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
742 			  qdf_nbuf_t buf,
743 			  qdf_dma_dir_t dir,
744 			  const char *func,
745 			  uint32_t line)
746 {
747 	qdf_nbuf_untrack_map(buf, func, line);
748 	__qdf_nbuf_unmap_single(osdev, buf, dir);
749 }
750 
751 qdf_export_symbol(qdf_nbuf_unmap_debug);
752 
753 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
754 				     qdf_nbuf_t buf,
755 				     qdf_dma_dir_t dir,
756 				     const char *func,
757 				     uint32_t line)
758 {
759 	QDF_STATUS status;
760 
761 	status = qdf_nbuf_track_map(buf, func, line);
762 	if (QDF_IS_STATUS_ERROR(status))
763 		return status;
764 
765 	status = __qdf_nbuf_map_single(osdev, buf, dir);
766 	if (QDF_IS_STATUS_ERROR(status))
767 		qdf_nbuf_untrack_map(buf, func, line);
768 
769 	return status;
770 }
771 
772 qdf_export_symbol(qdf_nbuf_map_single_debug);
773 
774 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
775 				 qdf_nbuf_t buf,
776 				 qdf_dma_dir_t dir,
777 				 const char *func,
778 				 uint32_t line)
779 {
780 	qdf_nbuf_untrack_map(buf, func, line);
781 	__qdf_nbuf_unmap_single(osdev, buf, dir);
782 }
783 
784 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
785 
786 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
787 				     qdf_nbuf_t buf,
788 				     qdf_dma_dir_t dir,
789 				     int nbytes,
790 				     const char *func,
791 				     uint32_t line)
792 {
793 	QDF_STATUS status;
794 
795 	status = qdf_nbuf_track_map(buf, func, line);
796 	if (QDF_IS_STATUS_ERROR(status))
797 		return status;
798 
799 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
800 	if (QDF_IS_STATUS_ERROR(status))
801 		qdf_nbuf_untrack_map(buf, func, line);
802 
803 	return status;
804 }
805 
806 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
807 
808 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
809 				 qdf_nbuf_t buf,
810 				 qdf_dma_dir_t dir,
811 				 int nbytes,
812 				 const char *func,
813 				 uint32_t line)
814 {
815 	qdf_nbuf_untrack_map(buf, func, line);
816 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
817 }
818 
819 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
820 
821 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
822 					    qdf_nbuf_t buf,
823 					    qdf_dma_dir_t dir,
824 					    int nbytes,
825 					    const char *func,
826 					    uint32_t line)
827 {
828 	QDF_STATUS status;
829 
830 	status = qdf_nbuf_track_map(buf, func, line);
831 	if (QDF_IS_STATUS_ERROR(status))
832 		return status;
833 
834 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
835 	if (QDF_IS_STATUS_ERROR(status))
836 		qdf_nbuf_untrack_map(buf, func, line);
837 
838 	return status;
839 }
840 
841 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
842 
843 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
844 					qdf_nbuf_t buf,
845 					qdf_dma_dir_t dir,
846 					int nbytes,
847 					const char *func,
848 					uint32_t line)
849 {
850 	qdf_nbuf_untrack_map(buf, func, line);
851 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
852 }
853 
854 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
855 
856 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf, const char *func,
857 					     uint32_t line)
858 {
859 	struct qdf_nbuf_map_metadata *meta;
860 
861 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
862 	meta = qdf_nbuf_meta_get(nbuf);
863 	if (meta)
864 		QDF_DEBUG_PANIC(
865 			"Nbuf freed @ %s:%u while mapped from %s:%u",
866 			kbasename(func), line, meta->func, meta->line);
867 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
868 }
869 #else
870 static inline void qdf_nbuf_map_tracking_init(void)
871 {
872 }
873 
874 static inline void qdf_nbuf_map_tracking_deinit(void)
875 {
876 }
877 
878 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
879 						    const char *func,
880 						    uint32_t line)
881 {
882 }
883 #endif /* NBUF_MAP_UNMAP_DEBUG */
884 
885 /**
886  * __qdf_nbuf_map() - map a buffer to local bus address space
887  * @osdev: OS device
888  * @bmap: Bitmap
889  * @skb: Pointer to network buffer
890  * @dir: Direction
891  *
892  * Return: QDF_STATUS
893  */
894 #ifdef QDF_OS_DEBUG
895 QDF_STATUS
896 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
897 {
898 	struct skb_shared_info *sh = skb_shinfo(skb);
899 
900 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
901 			|| (dir == QDF_DMA_FROM_DEVICE));
902 
903 	/*
904 	 * Assume there's only a single fragment.
905 	 * To support multiple fragments, it would be necessary to change
906 	 * qdf_nbuf_t to be a separate object that stores meta-info
907 	 * (including the bus address for each fragment) and a pointer
908 	 * to the underlying sk_buff.
909 	 */
910 	qdf_assert(sh->nr_frags == 0);
911 
912 	return __qdf_nbuf_map_single(osdev, skb, dir);
913 }
914 qdf_export_symbol(__qdf_nbuf_map);
915 
916 #else
917 QDF_STATUS
918 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
919 {
920 	return __qdf_nbuf_map_single(osdev, skb, dir);
921 }
922 qdf_export_symbol(__qdf_nbuf_map);
923 #endif
924 /**
925  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
926  * @osdev: OS device
927  * @skb: Pointer to network buffer
928  * @dir: dma direction
929  *
930  * Return: none
931  */
932 void
933 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
934 			qdf_dma_dir_t dir)
935 {
936 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
937 		   || (dir == QDF_DMA_FROM_DEVICE));
938 
939 	/*
940 	 * Assume there's a single fragment.
941 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
942 	 */
943 	__qdf_nbuf_unmap_single(osdev, skb, dir);
944 }
945 qdf_export_symbol(__qdf_nbuf_unmap);
946 
947 /**
948  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
949  * @osdev: OS device
950  * @skb: Pointer to network buffer
951  * @dir: Direction
952  *
953  * Return: QDF_STATUS
954  */
955 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
956 QDF_STATUS
957 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
958 {
959 	qdf_dma_addr_t paddr;
960 
961 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
962 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
963 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
964 	return QDF_STATUS_SUCCESS;
965 }
966 qdf_export_symbol(__qdf_nbuf_map_single);
967 #else
968 QDF_STATUS
969 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
970 {
971 	qdf_dma_addr_t paddr;
972 
973 	/* assume that the OS only provides a single fragment */
974 	QDF_NBUF_CB_PADDR(buf) = paddr =
975 		dma_map_single(osdev->dev, buf->data,
976 				skb_end_pointer(buf) - buf->data,
977 				__qdf_dma_dir_to_os(dir));
978 	return dma_mapping_error(osdev->dev, paddr)
979 		? QDF_STATUS_E_FAILURE
980 		: QDF_STATUS_SUCCESS;
981 }
982 qdf_export_symbol(__qdf_nbuf_map_single);
983 #endif
984 /**
985  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
986  * @osdev: OS device
987  * @skb: Pointer to network buffer
988  * @dir: Direction
989  *
990  * Return: none
991  */
992 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
993 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
994 				qdf_dma_dir_t dir)
995 {
996 }
997 #else
998 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
999 					qdf_dma_dir_t dir)
1000 {
1001 	if (QDF_NBUF_CB_PADDR(buf))
1002 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1003 			skb_end_pointer(buf) - buf->data,
1004 			__qdf_dma_dir_to_os(dir));
1005 }
1006 #endif
1007 qdf_export_symbol(__qdf_nbuf_unmap_single);
1008 
1009 /**
1010  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1011  * @skb: Pointer to network buffer
1012  * @cksum: Pointer to checksum value
1013  *
1014  * Return: QDF_STATUS
1015  */
1016 QDF_STATUS
1017 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1018 {
1019 	switch (cksum->l4_result) {
1020 	case QDF_NBUF_RX_CKSUM_NONE:
1021 		skb->ip_summed = CHECKSUM_NONE;
1022 		break;
1023 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1024 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1025 		break;
1026 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1027 		skb->ip_summed = CHECKSUM_PARTIAL;
1028 		skb->csum = cksum->val;
1029 		break;
1030 	default:
1031 		pr_err("Unknown checksum type\n");
1032 		qdf_assert(0);
1033 		return QDF_STATUS_E_NOSUPPORT;
1034 	}
1035 	return QDF_STATUS_SUCCESS;
1036 }
1037 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1038 
1039 /**
1040  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1041  * @skb: Pointer to network buffer
1042  *
1043  * Return: TX checksum value
1044  */
1045 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1046 {
1047 	switch (skb->ip_summed) {
1048 	case CHECKSUM_NONE:
1049 		return QDF_NBUF_TX_CKSUM_NONE;
1050 	case CHECKSUM_PARTIAL:
1051 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1052 	case CHECKSUM_COMPLETE:
1053 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1054 	default:
1055 		return QDF_NBUF_TX_CKSUM_NONE;
1056 	}
1057 }
1058 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1059 
1060 /**
1061  * __qdf_nbuf_get_tid() - get tid
1062  * @skb: Pointer to network buffer
1063  *
1064  * Return: tid
1065  */
1066 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1067 {
1068 	return skb->priority;
1069 }
1070 qdf_export_symbol(__qdf_nbuf_get_tid);
1071 
1072 /**
1073  * __qdf_nbuf_set_tid() - set tid
1074  * @skb: Pointer to network buffer
1075  *
1076  * Return: none
1077  */
1078 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1079 {
1080 	skb->priority = tid;
1081 }
1082 qdf_export_symbol(__qdf_nbuf_set_tid);
1083 
1084 /**
1085  * __qdf_nbuf_set_tid() - set tid
1086  * @skb: Pointer to network buffer
1087  *
1088  * Return: none
1089  */
1090 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1091 {
1092 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1093 }
1094 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1095 
1096 /**
1097  * __qdf_nbuf_reg_trace_cb() - register trace callback
1098  * @cb_func_ptr: Pointer to trace callback function
1099  *
1100  * Return: none
1101  */
1102 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1103 {
1104 	qdf_trace_update_cb = cb_func_ptr;
1105 }
1106 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1107 
1108 /**
1109  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1110  *              of DHCP packet.
1111  * @data: Pointer to DHCP packet data buffer
1112  *
1113  * This func. returns the subtype of DHCP packet.
1114  *
1115  * Return: subtype of the DHCP packet.
1116  */
1117 enum qdf_proto_subtype
1118 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1119 {
1120 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1121 
1122 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1123 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1124 					QDF_DHCP_OPTION53_LENGTH)) {
1125 
1126 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1127 		case QDF_DHCP_DISCOVER:
1128 			subtype = QDF_PROTO_DHCP_DISCOVER;
1129 			break;
1130 		case QDF_DHCP_REQUEST:
1131 			subtype = QDF_PROTO_DHCP_REQUEST;
1132 			break;
1133 		case QDF_DHCP_OFFER:
1134 			subtype = QDF_PROTO_DHCP_OFFER;
1135 			break;
1136 		case QDF_DHCP_ACK:
1137 			subtype = QDF_PROTO_DHCP_ACK;
1138 			break;
1139 		case QDF_DHCP_NAK:
1140 			subtype = QDF_PROTO_DHCP_NACK;
1141 			break;
1142 		case QDF_DHCP_RELEASE:
1143 			subtype = QDF_PROTO_DHCP_RELEASE;
1144 			break;
1145 		case QDF_DHCP_INFORM:
1146 			subtype = QDF_PROTO_DHCP_INFORM;
1147 			break;
1148 		case QDF_DHCP_DECLINE:
1149 			subtype = QDF_PROTO_DHCP_DECLINE;
1150 			break;
1151 		default:
1152 			break;
1153 		}
1154 	}
1155 
1156 	return subtype;
1157 }
1158 
1159 /**
1160  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1161  *            of EAPOL packet.
1162  * @data: Pointer to EAPOL packet data buffer
1163  *
1164  * This func. returns the subtype of EAPOL packet.
1165  *
1166  * Return: subtype of the EAPOL packet.
1167  */
1168 enum qdf_proto_subtype
1169 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1170 {
1171 	uint16_t eapol_key_info;
1172 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1173 	uint16_t mask;
1174 
1175 	eapol_key_info = (uint16_t)(*(uint16_t *)
1176 			(data + EAPOL_KEY_INFO_OFFSET));
1177 
1178 	mask = eapol_key_info & EAPOL_MASK;
1179 	switch (mask) {
1180 	case EAPOL_M1_BIT_MASK:
1181 		subtype = QDF_PROTO_EAPOL_M1;
1182 		break;
1183 	case EAPOL_M2_BIT_MASK:
1184 		subtype = QDF_PROTO_EAPOL_M2;
1185 		break;
1186 	case EAPOL_M3_BIT_MASK:
1187 		subtype = QDF_PROTO_EAPOL_M3;
1188 		break;
1189 	case EAPOL_M4_BIT_MASK:
1190 		subtype = QDF_PROTO_EAPOL_M4;
1191 		break;
1192 	default:
1193 		break;
1194 	}
1195 
1196 	return subtype;
1197 }
1198 
1199 /**
1200  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1201  *            of ARP packet.
1202  * @data: Pointer to ARP packet data buffer
1203  *
1204  * This func. returns the subtype of ARP packet.
1205  *
1206  * Return: subtype of the ARP packet.
1207  */
1208 enum qdf_proto_subtype
1209 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1210 {
1211 	uint16_t subtype;
1212 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1213 
1214 	subtype = (uint16_t)(*(uint16_t *)
1215 			(data + ARP_SUB_TYPE_OFFSET));
1216 
1217 	switch (QDF_SWAP_U16(subtype)) {
1218 	case ARP_REQUEST:
1219 		proto_subtype = QDF_PROTO_ARP_REQ;
1220 		break;
1221 	case ARP_RESPONSE:
1222 		proto_subtype = QDF_PROTO_ARP_RES;
1223 		break;
1224 	default:
1225 		break;
1226 	}
1227 
1228 	return proto_subtype;
1229 }
1230 
1231 /**
1232  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1233  *            of IPV4 ICMP packet.
1234  * @data: Pointer to IPV4 ICMP packet data buffer
1235  *
1236  * This func. returns the subtype of ICMP packet.
1237  *
1238  * Return: subtype of the ICMP packet.
1239  */
1240 enum qdf_proto_subtype
1241 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1242 {
1243 	uint8_t subtype;
1244 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1245 
1246 	subtype = (uint8_t)(*(uint8_t *)
1247 			(data + ICMP_SUBTYPE_OFFSET));
1248 
1249 	switch (subtype) {
1250 	case ICMP_REQUEST:
1251 		proto_subtype = QDF_PROTO_ICMP_REQ;
1252 		break;
1253 	case ICMP_RESPONSE:
1254 		proto_subtype = QDF_PROTO_ICMP_RES;
1255 		break;
1256 	default:
1257 		break;
1258 	}
1259 
1260 	return proto_subtype;
1261 }
1262 
1263 /**
1264  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1265  *            of IPV6 ICMPV6 packet.
1266  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1267  *
1268  * This func. returns the subtype of ICMPV6 packet.
1269  *
1270  * Return: subtype of the ICMPV6 packet.
1271  */
1272 enum qdf_proto_subtype
1273 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1274 {
1275 	uint8_t subtype;
1276 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1277 
1278 	subtype = (uint8_t)(*(uint8_t *)
1279 			(data + ICMPV6_SUBTYPE_OFFSET));
1280 
1281 	switch (subtype) {
1282 	case ICMPV6_REQUEST:
1283 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1284 		break;
1285 	case ICMPV6_RESPONSE:
1286 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1287 		break;
1288 	case ICMPV6_RS:
1289 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1290 		break;
1291 	case ICMPV6_RA:
1292 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1293 		break;
1294 	case ICMPV6_NS:
1295 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1296 		break;
1297 	case ICMPV6_NA:
1298 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1299 		break;
1300 	default:
1301 		break;
1302 	}
1303 
1304 	return proto_subtype;
1305 }
1306 
1307 /**
1308  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1309  *            of IPV4 packet.
1310  * @data: Pointer to IPV4 packet data buffer
1311  *
1312  * This func. returns the proto type of IPV4 packet.
1313  *
1314  * Return: proto type of IPV4 packet.
1315  */
1316 uint8_t
1317 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1318 {
1319 	uint8_t proto_type;
1320 
1321 	proto_type = (uint8_t)(*(uint8_t *)(data +
1322 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1323 	return proto_type;
1324 }
1325 
1326 /**
1327  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1328  *            of IPV6 packet.
1329  * @data: Pointer to IPV6 packet data buffer
1330  *
1331  * This func. returns the proto type of IPV6 packet.
1332  *
1333  * Return: proto type of IPV6 packet.
1334  */
1335 uint8_t
1336 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1337 {
1338 	uint8_t proto_type;
1339 
1340 	proto_type = (uint8_t)(*(uint8_t *)(data +
1341 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1342 	return proto_type;
1343 }
1344 
1345 /**
1346  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1347  * @data: Pointer to network data
1348  *
1349  * This api is for Tx packets.
1350  *
1351  * Return: true if packet is ipv4 packet
1352  *	   false otherwise
1353  */
1354 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1355 {
1356 	uint16_t ether_type;
1357 
1358 	ether_type = (uint16_t)(*(uint16_t *)(data +
1359 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1360 
1361 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1362 		return true;
1363 	else
1364 		return false;
1365 }
1366 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1367 
1368 /**
1369  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1370  * @data: Pointer to network data buffer
1371  *
1372  * This api is for ipv4 packet.
1373  *
1374  * Return: true if packet is DHCP packet
1375  *	   false otherwise
1376  */
1377 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1378 {
1379 	uint16_t sport;
1380 	uint16_t dport;
1381 
1382 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1383 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1384 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1385 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1386 					 sizeof(uint16_t)));
1387 
1388 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1389 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1390 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1391 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1392 		return true;
1393 	else
1394 		return false;
1395 }
1396 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1397 
1398 /**
1399  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1400  * @data: Pointer to network data buffer
1401  *
1402  * This api is for ipv4 packet.
1403  *
1404  * Return: true if packet is EAPOL packet
1405  *	   false otherwise.
1406  */
1407 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1408 {
1409 	uint16_t ether_type;
1410 
1411 	ether_type = (uint16_t)(*(uint16_t *)(data +
1412 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1413 
1414 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1415 		return true;
1416 	else
1417 		return false;
1418 }
1419 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1420 
1421 /**
1422  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1423  * @skb: Pointer to network buffer
1424  *
1425  * This api is for ipv4 packet.
1426  *
1427  * Return: true if packet is WAPI packet
1428  *	   false otherwise.
1429  */
1430 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1431 {
1432 	uint16_t ether_type;
1433 
1434 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1435 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1436 
1437 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1438 		return true;
1439 	else
1440 		return false;
1441 }
1442 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1443 
1444 /**
1445  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1446  * @skb: Pointer to network buffer
1447  *
1448  * This api is for ipv4 packet.
1449  *
1450  * Return: true if packet is tdls packet
1451  *	   false otherwise.
1452  */
1453 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1454 {
1455 	uint16_t ether_type;
1456 
1457 	ether_type = *(uint16_t *)(skb->data +
1458 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1459 
1460 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1461 		return true;
1462 	else
1463 		return false;
1464 }
1465 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1466 
1467 /**
1468  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1469  * @data: Pointer to network data buffer
1470  *
1471  * This api is for ipv4 packet.
1472  *
1473  * Return: true if packet is ARP packet
1474  *	   false otherwise.
1475  */
1476 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1477 {
1478 	uint16_t ether_type;
1479 
1480 	ether_type = (uint16_t)(*(uint16_t *)(data +
1481 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1482 
1483 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1484 		return true;
1485 	else
1486 		return false;
1487 }
1488 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1489 
1490 /**
1491  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1492  * @data: Pointer to network data buffer
1493  *
1494  * This api is for ipv4 packet.
1495  *
1496  * Return: true if packet is ARP request
1497  *	   false otherwise.
1498  */
1499 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1500 {
1501 	uint16_t op_code;
1502 
1503 	op_code = (uint16_t)(*(uint16_t *)(data +
1504 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1505 
1506 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1507 		return true;
1508 	return false;
1509 }
1510 
1511 /**
1512  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1513  * @data: Pointer to network data buffer
1514  *
1515  * This api is for ipv4 packet.
1516  *
1517  * Return: true if packet is ARP response
1518  *	   false otherwise.
1519  */
1520 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1521 {
1522 	uint16_t op_code;
1523 
1524 	op_code = (uint16_t)(*(uint16_t *)(data +
1525 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1526 
1527 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1528 		return true;
1529 	return false;
1530 }
1531 
1532 /**
1533  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1534  * @data: Pointer to network data buffer
1535  *
1536  * This api is for ipv4 packet.
1537  *
1538  * Return: ARP packet source IP value.
1539  */
1540 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1541 {
1542 	uint32_t src_ip;
1543 
1544 	src_ip = (uint32_t)(*(uint32_t *)(data +
1545 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1546 
1547 	return src_ip;
1548 }
1549 
1550 /**
1551  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1552  * @data: Pointer to network data buffer
1553  *
1554  * This api is for ipv4 packet.
1555  *
1556  * Return: ARP packet target IP value.
1557  */
1558 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1559 {
1560 	uint32_t tgt_ip;
1561 
1562 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1563 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1564 
1565 	return tgt_ip;
1566 }
1567 
1568 /**
1569  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1570  * @data: Pointer to network data buffer
1571  * @len: length to copy
1572  *
1573  * This api is for dns domain name
1574  *
1575  * Return: dns domain name.
1576  */
1577 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1578 {
1579 	uint8_t *domain_name;
1580 
1581 	domain_name = (uint8_t *)
1582 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1583 	return domain_name;
1584 }
1585 
1586 
1587 /**
1588  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1589  * @data: Pointer to network data buffer
1590  *
1591  * This api is for dns query packet.
1592  *
1593  * Return: true if packet is dns query packet.
1594  *	   false otherwise.
1595  */
1596 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1597 {
1598 	uint16_t op_code;
1599 	uint16_t tgt_port;
1600 
1601 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1602 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1603 	/* Standard DNS query always happen on Dest Port 53. */
1604 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1605 		op_code = (uint16_t)(*(uint16_t *)(data +
1606 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1607 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1608 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1609 			return true;
1610 	}
1611 	return false;
1612 }
1613 
1614 /**
1615  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1616  * @data: Pointer to network data buffer
1617  *
1618  * This api is for dns query response.
1619  *
1620  * Return: true if packet is dns response packet.
1621  *	   false otherwise.
1622  */
1623 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1624 {
1625 	uint16_t op_code;
1626 	uint16_t src_port;
1627 
1628 	src_port = (uint16_t)(*(uint16_t *)(data +
1629 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1630 	/* Standard DNS response always comes on Src Port 53. */
1631 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1632 		op_code = (uint16_t)(*(uint16_t *)(data +
1633 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1634 
1635 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1636 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1637 			return true;
1638 	}
1639 	return false;
1640 }
1641 
1642 /**
1643  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1644  * @data: Pointer to network data buffer
1645  *
1646  * This api is for tcp syn packet.
1647  *
1648  * Return: true if packet is tcp syn packet.
1649  *	   false otherwise.
1650  */
1651 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1652 {
1653 	uint8_t op_code;
1654 
1655 	op_code = (uint8_t)(*(uint8_t *)(data +
1656 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1657 
1658 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1659 		return true;
1660 	return false;
1661 }
1662 
1663 /**
1664  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1665  * @data: Pointer to network data buffer
1666  *
1667  * This api is for tcp syn ack packet.
1668  *
1669  * Return: true if packet is tcp syn ack packet.
1670  *	   false otherwise.
1671  */
1672 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1673 {
1674 	uint8_t op_code;
1675 
1676 	op_code = (uint8_t)(*(uint8_t *)(data +
1677 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1678 
1679 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1680 		return true;
1681 	return false;
1682 }
1683 
1684 /**
1685  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1686  * @data: Pointer to network data buffer
1687  *
1688  * This api is for tcp ack packet.
1689  *
1690  * Return: true if packet is tcp ack packet.
1691  *	   false otherwise.
1692  */
1693 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1694 {
1695 	uint8_t op_code;
1696 
1697 	op_code = (uint8_t)(*(uint8_t *)(data +
1698 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1699 
1700 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1701 		return true;
1702 	return false;
1703 }
1704 
1705 /**
1706  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1707  * @data: Pointer to network data buffer
1708  *
1709  * This api is for tcp packet.
1710  *
1711  * Return: tcp source port value.
1712  */
1713 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1714 {
1715 	uint16_t src_port;
1716 
1717 	src_port = (uint16_t)(*(uint16_t *)(data +
1718 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1719 
1720 	return src_port;
1721 }
1722 
1723 /**
1724  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1725  * @data: Pointer to network data buffer
1726  *
1727  * This api is for tcp packet.
1728  *
1729  * Return: tcp destination port value.
1730  */
1731 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1732 {
1733 	uint16_t tgt_port;
1734 
1735 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1736 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1737 
1738 	return tgt_port;
1739 }
1740 
1741 /**
1742  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1743  * @data: Pointer to network data buffer
1744  *
1745  * This api is for ipv4 req packet.
1746  *
1747  * Return: true if packet is icmpv4 request
1748  *	   false otherwise.
1749  */
1750 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1751 {
1752 	uint8_t op_code;
1753 
1754 	op_code = (uint8_t)(*(uint8_t *)(data +
1755 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1756 
1757 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1758 		return true;
1759 	return false;
1760 }
1761 
1762 /**
1763  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1764  * @data: Pointer to network data buffer
1765  *
1766  * This api is for ipv4 res packet.
1767  *
1768  * Return: true if packet is icmpv4 response
1769  *	   false otherwise.
1770  */
1771 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1772 {
1773 	uint8_t op_code;
1774 
1775 	op_code = (uint8_t)(*(uint8_t *)(data +
1776 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1777 
1778 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1779 		return true;
1780 	return false;
1781 }
1782 
1783 /**
1784  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1785  * @data: Pointer to network data buffer
1786  *
1787  * This api is for ipv4 packet.
1788  *
1789  * Return: icmpv4 packet source IP value.
1790  */
1791 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1792 {
1793 	uint32_t src_ip;
1794 
1795 	src_ip = (uint32_t)(*(uint32_t *)(data +
1796 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1797 
1798 	return src_ip;
1799 }
1800 
1801 /**
1802  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1803  * @data: Pointer to network data buffer
1804  *
1805  * This api is for ipv4 packet.
1806  *
1807  * Return: icmpv4 packet target IP value.
1808  */
1809 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1810 {
1811 	uint32_t tgt_ip;
1812 
1813 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1814 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1815 
1816 	return tgt_ip;
1817 }
1818 
1819 
1820 /**
1821  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1822  * @data: Pointer to IPV6 packet data buffer
1823  *
1824  * This func. checks whether it is a IPV6 packet or not.
1825  *
1826  * Return: TRUE if it is a IPV6 packet
1827  *         FALSE if not
1828  */
1829 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1830 {
1831 	uint16_t ether_type;
1832 
1833 	ether_type = (uint16_t)(*(uint16_t *)(data +
1834 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1835 
1836 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1837 		return true;
1838 	else
1839 		return false;
1840 }
1841 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1842 
1843 /**
1844  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1845  * @data: Pointer to network data buffer
1846  *
1847  * This api is for ipv6 packet.
1848  *
1849  * Return: true if packet is DHCP packet
1850  *	   false otherwise
1851  */
1852 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1853 {
1854 	uint16_t sport;
1855 	uint16_t dport;
1856 
1857 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1858 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1859 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1860 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1861 					sizeof(uint16_t));
1862 
1863 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1864 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1865 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1866 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1867 		return true;
1868 	else
1869 		return false;
1870 }
1871 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1872 
1873 /**
1874  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1875  * @data: Pointer to IPV4 packet data buffer
1876  *
1877  * This func. checks whether it is a IPV4 multicast packet or not.
1878  *
1879  * Return: TRUE if it is a IPV4 multicast packet
1880  *         FALSE if not
1881  */
1882 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1883 {
1884 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1885 		uint32_t *dst_addr =
1886 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1887 
1888 		/*
1889 		 * Check first word of the IPV4 address and if it is
1890 		 * equal to 0xE then it represents multicast IP.
1891 		 */
1892 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1893 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1894 			return true;
1895 		else
1896 			return false;
1897 	} else
1898 		return false;
1899 }
1900 
1901 /**
1902  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1903  * @data: Pointer to IPV6 packet data buffer
1904  *
1905  * This func. checks whether it is a IPV6 multicast packet or not.
1906  *
1907  * Return: TRUE if it is a IPV6 multicast packet
1908  *         FALSE if not
1909  */
1910 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1911 {
1912 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1913 		uint16_t *dst_addr;
1914 
1915 		dst_addr = (uint16_t *)
1916 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1917 
1918 		/*
1919 		 * Check first byte of the IP address and if it
1920 		 * 0xFF00 then it is a IPV6 mcast packet.
1921 		 */
1922 		if (*dst_addr ==
1923 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1924 			return true;
1925 		else
1926 			return false;
1927 	} else
1928 		return false;
1929 }
1930 
1931 /**
1932  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1933  * @data: Pointer to IPV4 ICMP packet data buffer
1934  *
1935  * This func. checks whether it is a ICMP packet or not.
1936  *
1937  * Return: TRUE if it is a ICMP packet
1938  *         FALSE if not
1939  */
1940 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1941 {
1942 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1943 		uint8_t pkt_type;
1944 
1945 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1946 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1947 
1948 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1949 			return true;
1950 		else
1951 			return false;
1952 	} else
1953 		return false;
1954 }
1955 
1956 /**
1957  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1958  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1959  *
1960  * This func. checks whether it is a ICMPV6 packet or not.
1961  *
1962  * Return: TRUE if it is a ICMPV6 packet
1963  *         FALSE if not
1964  */
1965 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1966 {
1967 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1968 		uint8_t pkt_type;
1969 
1970 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1971 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1972 
1973 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1974 			return true;
1975 		else
1976 			return false;
1977 	} else
1978 		return false;
1979 }
1980 
1981 /**
1982  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1983  * @data: Pointer to IPV4 UDP packet data buffer
1984  *
1985  * This func. checks whether it is a IPV4 UDP packet or not.
1986  *
1987  * Return: TRUE if it is a IPV4 UDP packet
1988  *         FALSE if not
1989  */
1990 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1991 {
1992 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1993 		uint8_t pkt_type;
1994 
1995 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1996 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1997 
1998 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1999 			return true;
2000 		else
2001 			return false;
2002 	} else
2003 		return false;
2004 }
2005 
2006 /**
2007  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2008  * @data: Pointer to IPV4 TCP packet data buffer
2009  *
2010  * This func. checks whether it is a IPV4 TCP packet or not.
2011  *
2012  * Return: TRUE if it is a IPV4 TCP packet
2013  *         FALSE if not
2014  */
2015 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2016 {
2017 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2018 		uint8_t pkt_type;
2019 
2020 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2021 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2022 
2023 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2024 			return true;
2025 		else
2026 			return false;
2027 	} else
2028 		return false;
2029 }
2030 
2031 /**
2032  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2033  * @data: Pointer to IPV6 UDP packet data buffer
2034  *
2035  * This func. checks whether it is a IPV6 UDP packet or not.
2036  *
2037  * Return: TRUE if it is a IPV6 UDP packet
2038  *         FALSE if not
2039  */
2040 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2041 {
2042 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2043 		uint8_t pkt_type;
2044 
2045 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2046 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2047 
2048 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2049 			return true;
2050 		else
2051 			return false;
2052 	} else
2053 		return false;
2054 }
2055 
2056 /**
2057  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2058  * @data: Pointer to IPV6 TCP packet data buffer
2059  *
2060  * This func. checks whether it is a IPV6 TCP packet or not.
2061  *
2062  * Return: TRUE if it is a IPV6 TCP packet
2063  *         FALSE if not
2064  */
2065 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2066 {
2067 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2068 		uint8_t pkt_type;
2069 
2070 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2071 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2072 
2073 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2074 			return true;
2075 		else
2076 			return false;
2077 	} else
2078 		return false;
2079 }
2080 
2081 /**
2082  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2083  * @nbuf - sk buff
2084  *
2085  * Return: true if packet is broadcast
2086  *	   false otherwise
2087  */
2088 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2089 {
2090 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2091 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2092 }
2093 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2094 
2095 #ifdef NBUF_MEMORY_DEBUG
2096 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2097 
2098 /**
2099  * struct qdf_nbuf_track_t - Network buffer track structure
2100  *
2101  * @p_next: Pointer to next
2102  * @net_buf: Pointer to network buffer
2103  * @func_name: Function name
2104  * @line_num: Line number
2105  * @size: Size
2106  */
2107 struct qdf_nbuf_track_t {
2108 	struct qdf_nbuf_track_t *p_next;
2109 	qdf_nbuf_t net_buf;
2110 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2111 	uint32_t line_num;
2112 	size_t size;
2113 };
2114 
2115 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2116 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2117 
2118 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2119 static struct kmem_cache *nbuf_tracking_cache;
2120 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2121 static spinlock_t qdf_net_buf_track_free_list_lock;
2122 static uint32_t qdf_net_buf_track_free_list_count;
2123 static uint32_t qdf_net_buf_track_used_list_count;
2124 static uint32_t qdf_net_buf_track_max_used;
2125 static uint32_t qdf_net_buf_track_max_free;
2126 static uint32_t qdf_net_buf_track_max_allocated;
2127 
2128 /**
2129  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2130  *
2131  * tracks the max number of network buffers that the wlan driver was tracking
2132  * at any one time.
2133  *
2134  * Return: none
2135  */
2136 static inline void update_max_used(void)
2137 {
2138 	int sum;
2139 
2140 	if (qdf_net_buf_track_max_used <
2141 	    qdf_net_buf_track_used_list_count)
2142 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2143 	sum = qdf_net_buf_track_free_list_count +
2144 		qdf_net_buf_track_used_list_count;
2145 	if (qdf_net_buf_track_max_allocated < sum)
2146 		qdf_net_buf_track_max_allocated = sum;
2147 }
2148 
2149 /**
2150  * update_max_free() - update qdf_net_buf_track_free_list_count
2151  *
2152  * tracks the max number tracking buffers kept in the freelist.
2153  *
2154  * Return: none
2155  */
2156 static inline void update_max_free(void)
2157 {
2158 	if (qdf_net_buf_track_max_free <
2159 	    qdf_net_buf_track_free_list_count)
2160 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2161 }
2162 
2163 /**
2164  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2165  *
2166  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2167  * This function also ads fexibility to adjust the allocation and freelist
2168  * scheems.
2169  *
2170  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2171  */
2172 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2173 {
2174 	int flags = GFP_KERNEL;
2175 	unsigned long irq_flag;
2176 	QDF_NBUF_TRACK *new_node = NULL;
2177 
2178 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2179 	qdf_net_buf_track_used_list_count++;
2180 	if (qdf_net_buf_track_free_list != NULL) {
2181 		new_node = qdf_net_buf_track_free_list;
2182 		qdf_net_buf_track_free_list =
2183 			qdf_net_buf_track_free_list->p_next;
2184 		qdf_net_buf_track_free_list_count--;
2185 	}
2186 	update_max_used();
2187 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2188 
2189 	if (new_node != NULL)
2190 		return new_node;
2191 
2192 	if (in_interrupt() || irqs_disabled() || in_atomic())
2193 		flags = GFP_ATOMIC;
2194 
2195 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2196 }
2197 
2198 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2199 #define FREEQ_POOLSIZE 2048
2200 
2201 /**
2202  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2203  *
2204  * Matches calls to qdf_nbuf_track_alloc.
2205  * Either frees the tracking cookie to kernel or an internal
2206  * freelist based on the size of the freelist.
2207  *
2208  * Return: none
2209  */
2210 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2211 {
2212 	unsigned long irq_flag;
2213 
2214 	if (!node)
2215 		return;
2216 
2217 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2218 	 * only shrink the freelist if it is bigger than twice the number of
2219 	 * nbufs in use. If the driver is stalling in a consistent bursty
2220 	 * fasion, this will keep 3/4 of thee allocations from the free list
2221 	 * while also allowing the system to recover memory as less frantic
2222 	 * traffic occurs.
2223 	 */
2224 
2225 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2226 
2227 	qdf_net_buf_track_used_list_count--;
2228 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2229 	   (qdf_net_buf_track_free_list_count >
2230 	    qdf_net_buf_track_used_list_count << 1)) {
2231 		kmem_cache_free(nbuf_tracking_cache, node);
2232 	} else {
2233 		node->p_next = qdf_net_buf_track_free_list;
2234 		qdf_net_buf_track_free_list = node;
2235 		qdf_net_buf_track_free_list_count++;
2236 	}
2237 	update_max_free();
2238 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2239 }
2240 
2241 /**
2242  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2243  *
2244  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2245  * the freelist first makes it performant for the first iperf udp burst
2246  * as well as steady state.
2247  *
2248  * Return: None
2249  */
2250 static void qdf_nbuf_track_prefill(void)
2251 {
2252 	int i;
2253 	QDF_NBUF_TRACK *node, *head;
2254 
2255 	/* prepopulate the freelist */
2256 	head = NULL;
2257 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2258 		node = qdf_nbuf_track_alloc();
2259 		if (node == NULL)
2260 			continue;
2261 		node->p_next = head;
2262 		head = node;
2263 	}
2264 	while (head) {
2265 		node = head->p_next;
2266 		qdf_nbuf_track_free(head);
2267 		head = node;
2268 	}
2269 
2270 	/* prefilled buffers should not count as used */
2271 	qdf_net_buf_track_max_used = 0;
2272 }
2273 
2274 /**
2275  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2276  *
2277  * This initializes the memory manager for the nbuf tracking cookies.  Because
2278  * these cookies are all the same size and only used in this feature, we can
2279  * use a kmem_cache to provide tracking as well as to speed up allocations.
2280  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2281  * features) a freelist is prepopulated here.
2282  *
2283  * Return: None
2284  */
2285 static void qdf_nbuf_track_memory_manager_create(void)
2286 {
2287 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2288 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2289 						sizeof(QDF_NBUF_TRACK),
2290 						0, 0, NULL);
2291 
2292 	qdf_nbuf_track_prefill();
2293 }
2294 
2295 /**
2296  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2297  *
2298  * Empty the freelist and print out usage statistics when it is no longer
2299  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2300  * any nbuf tracking cookies were leaked.
2301  *
2302  * Return: None
2303  */
2304 static void qdf_nbuf_track_memory_manager_destroy(void)
2305 {
2306 	QDF_NBUF_TRACK *node, *tmp;
2307 	unsigned long irq_flag;
2308 
2309 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2310 	node = qdf_net_buf_track_free_list;
2311 
2312 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2313 		qdf_print("%s: unexpectedly large max_used count %d",
2314 			  __func__, qdf_net_buf_track_max_used);
2315 
2316 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2317 		qdf_print("%s: %d unused trackers were allocated",
2318 			  __func__,
2319 			  qdf_net_buf_track_max_allocated -
2320 			  qdf_net_buf_track_max_used);
2321 
2322 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2323 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2324 		qdf_print("%s: check freelist shrinking functionality",
2325 			  __func__);
2326 
2327 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2328 		  "%s: %d residual freelist size",
2329 		  __func__, qdf_net_buf_track_free_list_count);
2330 
2331 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2332 		  "%s: %d max freelist size observed",
2333 		  __func__, qdf_net_buf_track_max_free);
2334 
2335 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2336 		  "%s: %d max buffers used observed",
2337 		  __func__, qdf_net_buf_track_max_used);
2338 
2339 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2340 		  "%s: %d max buffers allocated observed",
2341 		  __func__, qdf_net_buf_track_max_allocated);
2342 
2343 	while (node) {
2344 		tmp = node;
2345 		node = node->p_next;
2346 		kmem_cache_free(nbuf_tracking_cache, tmp);
2347 		qdf_net_buf_track_free_list_count--;
2348 	}
2349 
2350 	if (qdf_net_buf_track_free_list_count != 0)
2351 		qdf_info("%d unfreed tracking memory lost in freelist",
2352 			 qdf_net_buf_track_free_list_count);
2353 
2354 	if (qdf_net_buf_track_used_list_count != 0)
2355 		qdf_info("%d unfreed tracking memory still in use",
2356 			 qdf_net_buf_track_used_list_count);
2357 
2358 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2359 	kmem_cache_destroy(nbuf_tracking_cache);
2360 	qdf_net_buf_track_free_list = NULL;
2361 }
2362 
2363 /**
2364  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2365  *
2366  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2367  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2368  * WLAN driver module whose allocated SKB is freed by network stack are
2369  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2370  * reported as memory leak.
2371  *
2372  * Return: none
2373  */
2374 void qdf_net_buf_debug_init(void)
2375 {
2376 	uint32_t i;
2377 
2378 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2379 
2380 	qdf_nbuf_map_tracking_init();
2381 	qdf_nbuf_track_memory_manager_create();
2382 
2383 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2384 		gp_qdf_net_buf_track_tbl[i] = NULL;
2385 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2386 	}
2387 }
2388 qdf_export_symbol(qdf_net_buf_debug_init);
2389 
2390 /**
2391  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2392  *
2393  * Exit network buffer tracking debug functionality and log SKB memory leaks
2394  * As part of exiting the functionality, free the leaked memory and
2395  * cleanup the tracking buffers.
2396  *
2397  * Return: none
2398  */
2399 void qdf_net_buf_debug_exit(void)
2400 {
2401 	uint32_t i;
2402 	uint32_t count = 0;
2403 	unsigned long irq_flag;
2404 	QDF_NBUF_TRACK *p_node;
2405 	QDF_NBUF_TRACK *p_prev;
2406 
2407 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2408 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2409 		p_node = gp_qdf_net_buf_track_tbl[i];
2410 		while (p_node) {
2411 			p_prev = p_node;
2412 			p_node = p_node->p_next;
2413 			count++;
2414 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2415 				 p_prev->func_name, p_prev->line_num,
2416 				 p_prev->size, p_prev->net_buf);
2417 			qdf_nbuf_track_free(p_prev);
2418 		}
2419 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2420 	}
2421 
2422 	qdf_nbuf_track_memory_manager_destroy();
2423 	qdf_nbuf_map_tracking_deinit();
2424 
2425 #ifdef CONFIG_HALT_KMEMLEAK
2426 	if (count) {
2427 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2428 		QDF_BUG(0);
2429 	}
2430 #endif
2431 }
2432 qdf_export_symbol(qdf_net_buf_debug_exit);
2433 
2434 /**
2435  * qdf_net_buf_debug_hash() - hash network buffer pointer
2436  *
2437  * Return: hash value
2438  */
2439 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2440 {
2441 	uint32_t i;
2442 
2443 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2444 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2445 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2446 
2447 	return i;
2448 }
2449 
2450 /**
2451  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2452  *
2453  * Return: If skb is found in hash table then return pointer to network buffer
2454  *	else return %NULL
2455  */
2456 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2457 {
2458 	uint32_t i;
2459 	QDF_NBUF_TRACK *p_node;
2460 
2461 	i = qdf_net_buf_debug_hash(net_buf);
2462 	p_node = gp_qdf_net_buf_track_tbl[i];
2463 
2464 	while (p_node) {
2465 		if (p_node->net_buf == net_buf)
2466 			return p_node;
2467 		p_node = p_node->p_next;
2468 	}
2469 
2470 	return NULL;
2471 }
2472 
2473 /**
2474  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2475  *
2476  * Return: none
2477  */
2478 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2479 				const char *func_name, uint32_t line_num)
2480 {
2481 	uint32_t i;
2482 	unsigned long irq_flag;
2483 	QDF_NBUF_TRACK *p_node;
2484 	QDF_NBUF_TRACK *new_node;
2485 
2486 	new_node = qdf_nbuf_track_alloc();
2487 
2488 	i = qdf_net_buf_debug_hash(net_buf);
2489 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2490 
2491 	p_node = qdf_net_buf_debug_look_up(net_buf);
2492 
2493 	if (p_node) {
2494 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2495 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2496 			  net_buf, func_name, line_num);
2497 		qdf_nbuf_track_free(new_node);
2498 	} else {
2499 		p_node = new_node;
2500 		if (p_node) {
2501 			p_node->net_buf = net_buf;
2502 			qdf_str_lcopy(p_node->func_name, func_name,
2503 				      QDF_MEM_FUNC_NAME_SIZE);
2504 			p_node->line_num = line_num;
2505 			p_node->size = size;
2506 			qdf_mem_skb_inc(size);
2507 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2508 			gp_qdf_net_buf_track_tbl[i] = p_node;
2509 		} else
2510 			qdf_print(
2511 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2512 				  func_name, line_num, size);
2513 	}
2514 
2515 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2516 }
2517 qdf_export_symbol(qdf_net_buf_debug_add_node);
2518 
2519 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2520 				   uint32_t line_num)
2521 {
2522 	uint32_t i;
2523 	unsigned long irq_flag;
2524 	QDF_NBUF_TRACK *p_node;
2525 
2526 	i = qdf_net_buf_debug_hash(net_buf);
2527 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2528 
2529 	p_node = qdf_net_buf_debug_look_up(net_buf);
2530 
2531 	if (p_node) {
2532 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2533 			      QDF_MEM_FUNC_NAME_SIZE);
2534 		p_node->line_num = line_num;
2535 	}
2536 
2537 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2538 }
2539 
2540 qdf_export_symbol(qdf_net_buf_debug_update_node);
2541 
2542 /**
2543  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2544  *
2545  * Return: none
2546  */
2547 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2548 {
2549 	uint32_t i;
2550 	QDF_NBUF_TRACK *p_head;
2551 	QDF_NBUF_TRACK *p_node = NULL;
2552 	unsigned long irq_flag;
2553 	QDF_NBUF_TRACK *p_prev;
2554 
2555 	i = qdf_net_buf_debug_hash(net_buf);
2556 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2557 
2558 	p_head = gp_qdf_net_buf_track_tbl[i];
2559 
2560 	/* Unallocated SKB */
2561 	if (!p_head)
2562 		goto done;
2563 
2564 	p_node = p_head;
2565 	/* Found at head of the table */
2566 	if (p_head->net_buf == net_buf) {
2567 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2568 		goto done;
2569 	}
2570 
2571 	/* Search in collision list */
2572 	while (p_node) {
2573 		p_prev = p_node;
2574 		p_node = p_node->p_next;
2575 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2576 			p_prev->p_next = p_node->p_next;
2577 			break;
2578 		}
2579 	}
2580 
2581 done:
2582 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2583 
2584 	if (p_node) {
2585 		qdf_mem_skb_dec(p_node->size);
2586 		qdf_nbuf_track_free(p_node);
2587 	} else {
2588 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2589 			  net_buf);
2590 		QDF_BUG(0);
2591 	}
2592 }
2593 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2594 
2595 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2596 				   const char *func_name, uint32_t line_num)
2597 {
2598 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2599 
2600 	while (ext_list) {
2601 		/*
2602 		 * Take care to add if it is Jumbo packet connected using
2603 		 * frag_list
2604 		 */
2605 		qdf_nbuf_t next;
2606 
2607 		next = qdf_nbuf_queue_next(ext_list);
2608 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2609 		ext_list = next;
2610 	}
2611 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2612 }
2613 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2614 
2615 /**
2616  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2617  * @net_buf: Network buf holding head segment (single)
2618  *
2619  * WLAN driver module whose allocated SKB is freed by network stack are
2620  * suppose to call this API before returning SKB to network stack such
2621  * that the SKB is not reported as memory leak.
2622  *
2623  * Return: none
2624  */
2625 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2626 {
2627 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2628 
2629 	while (ext_list) {
2630 		/*
2631 		 * Take care to free if it is Jumbo packet connected using
2632 		 * frag_list
2633 		 */
2634 		qdf_nbuf_t next;
2635 
2636 		next = qdf_nbuf_queue_next(ext_list);
2637 
2638 		if (qdf_nbuf_is_tso(ext_list) &&
2639 			qdf_nbuf_get_users(ext_list) > 1) {
2640 			ext_list = next;
2641 			continue;
2642 		}
2643 
2644 		qdf_net_buf_debug_delete_node(ext_list);
2645 		ext_list = next;
2646 	}
2647 
2648 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2649 		return;
2650 
2651 	qdf_net_buf_debug_delete_node(net_buf);
2652 }
2653 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2654 
2655 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2656 				int reserve, int align, int prio,
2657 				const char *func, uint32_t line)
2658 {
2659 	qdf_nbuf_t nbuf;
2660 
2661 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2662 
2663 	/* Store SKB in internal QDF tracking table */
2664 	if (qdf_likely(nbuf)) {
2665 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2666 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2667 	} else {
2668 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2669 	}
2670 
2671 	return nbuf;
2672 }
2673 qdf_export_symbol(qdf_nbuf_alloc_debug);
2674 
2675 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2676 {
2677 	if (qdf_unlikely(!nbuf))
2678 		return;
2679 
2680 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2681 		goto free_buf;
2682 
2683 	/* Remove SKB from internal QDF tracking table */
2684 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2685 	qdf_net_buf_debug_delete_node(nbuf);
2686 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2687 
2688 free_buf:
2689 	__qdf_nbuf_free(nbuf);
2690 }
2691 qdf_export_symbol(qdf_nbuf_free_debug);
2692 
2693 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2694 {
2695 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2696 
2697 	if (qdf_unlikely(!cloned_buf))
2698 		return NULL;
2699 
2700 	/* Store SKB in internal QDF tracking table */
2701 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2702 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2703 
2704 	return cloned_buf;
2705 }
2706 qdf_export_symbol(qdf_nbuf_clone_debug);
2707 
2708 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2709 {
2710 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2711 
2712 	if (qdf_unlikely(!copied_buf))
2713 		return NULL;
2714 
2715 	/* Store SKB in internal QDF tracking table */
2716 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2717 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2718 
2719 	return copied_buf;
2720 }
2721 qdf_export_symbol(qdf_nbuf_copy_debug);
2722 
2723 #endif /* NBUF_MEMORY_DEBUG */
2724 
2725 #if defined(FEATURE_TSO)
2726 
2727 /**
2728  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2729  *
2730  * @ethproto: ethernet type of the msdu
2731  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2732  * @l2_len: L2 length for the msdu
2733  * @eit_hdr: pointer to EIT header
2734  * @eit_hdr_len: EIT header length for the msdu
2735  * @eit_hdr_dma_map_addr: dma addr for EIT header
2736  * @tcphdr: pointer to tcp header
2737  * @ipv4_csum_en: ipv4 checksum enable
2738  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2739  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2740  * @ip_id: IP id
2741  * @tcp_seq_num: TCP sequence number
2742  *
2743  * This structure holds the TSO common info that is common
2744  * across all the TCP segments of the jumbo packet.
2745  */
2746 struct qdf_tso_cmn_seg_info_t {
2747 	uint16_t ethproto;
2748 	uint16_t ip_tcp_hdr_len;
2749 	uint16_t l2_len;
2750 	uint8_t *eit_hdr;
2751 	uint32_t eit_hdr_len;
2752 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2753 	struct tcphdr *tcphdr;
2754 	uint16_t ipv4_csum_en;
2755 	uint16_t tcp_ipv4_csum_en;
2756 	uint16_t tcp_ipv6_csum_en;
2757 	uint16_t ip_id;
2758 	uint32_t tcp_seq_num;
2759 };
2760 
2761 /**
2762  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2763  * information
2764  * @osdev: qdf device handle
2765  * @skb: skb buffer
2766  * @tso_info: Parameters common to all segements
2767  *
2768  * Get the TSO information that is common across all the TCP
2769  * segments of the jumbo packet
2770  *
2771  * Return: 0 - success 1 - failure
2772  */
2773 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2774 			struct sk_buff *skb,
2775 			struct qdf_tso_cmn_seg_info_t *tso_info)
2776 {
2777 	/* Get ethernet type and ethernet header length */
2778 	tso_info->ethproto = vlan_get_protocol(skb);
2779 
2780 	/* Determine whether this is an IPv4 or IPv6 packet */
2781 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2782 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2783 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2784 
2785 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2786 		tso_info->ipv4_csum_en = 1;
2787 		tso_info->tcp_ipv4_csum_en = 1;
2788 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2789 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2790 				ipv4_hdr->protocol);
2791 			return 1;
2792 		}
2793 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2794 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2795 		tso_info->tcp_ipv6_csum_en = 1;
2796 	} else {
2797 		qdf_err("TSO: ethertype 0x%x is not supported!",
2798 			tso_info->ethproto);
2799 		return 1;
2800 	}
2801 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2802 	tso_info->tcphdr = tcp_hdr(skb);
2803 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2804 	/* get pointer to the ethernet + IP + TCP header and their length */
2805 	tso_info->eit_hdr = skb->data;
2806 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2807 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2808 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2809 							tso_info->eit_hdr,
2810 							tso_info->eit_hdr_len,
2811 							DMA_TO_DEVICE);
2812 	if (unlikely(dma_mapping_error(osdev->dev,
2813 				       tso_info->eit_hdr_dma_map_addr))) {
2814 		qdf_err("DMA mapping error!");
2815 		qdf_assert(0);
2816 		return 1;
2817 	}
2818 
2819 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2820 		/* inlcude IPv4 header length for IPV4 (total length) */
2821 		tso_info->ip_tcp_hdr_len =
2822 			tso_info->eit_hdr_len - tso_info->l2_len;
2823 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2824 		/* exclude IPv6 header length for IPv6 (payload length) */
2825 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2826 	}
2827 	/*
2828 	 * The length of the payload (application layer data) is added to
2829 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2830 	 * descriptor.
2831 	 */
2832 
2833 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2834 		tso_info->tcp_seq_num,
2835 		tso_info->eit_hdr_len,
2836 		tso_info->l2_len,
2837 		skb->len);
2838 	return 0;
2839 }
2840 
2841 
2842 /**
2843  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2844  *
2845  * @curr_seg: Segment whose contents are initialized
2846  * @tso_cmn_info: Parameters common to all segements
2847  *
2848  * Return: None
2849  */
2850 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2851 				struct qdf_tso_seg_elem_t *curr_seg,
2852 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2853 {
2854 	/* Initialize the flags to 0 */
2855 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2856 
2857 	/*
2858 	 * The following fields remain the same across all segments of
2859 	 * a jumbo packet
2860 	 */
2861 	curr_seg->seg.tso_flags.tso_enable = 1;
2862 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2863 		tso_cmn_info->ipv4_csum_en;
2864 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2865 		tso_cmn_info->tcp_ipv6_csum_en;
2866 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2867 		tso_cmn_info->tcp_ipv4_csum_en;
2868 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2869 
2870 	/* The following fields change for the segments */
2871 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2872 	tso_cmn_info->ip_id++;
2873 
2874 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2875 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2876 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2877 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2878 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2879 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2880 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2881 
2882 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2883 
2884 	/*
2885 	 * First fragment for each segment always contains the ethernet,
2886 	 * IP and TCP header
2887 	 */
2888 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2889 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2890 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2891 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2892 
2893 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2894 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2895 		   tso_cmn_info->eit_hdr_len,
2896 		   curr_seg->seg.tso_flags.tcp_seq_num,
2897 		   curr_seg->seg.total_len);
2898 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2899 }
2900 
2901 /**
2902  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2903  * into segments
2904  * @nbuf: network buffer to be segmented
2905  * @tso_info: This is the output. The information about the
2906  *           TSO segments will be populated within this.
2907  *
2908  * This function fragments a TCP jumbo packet into smaller
2909  * segments to be transmitted by the driver. It chains the TSO
2910  * segments created into a list.
2911  *
2912  * Return: number of TSO segments
2913  */
2914 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2915 		struct qdf_tso_info_t *tso_info)
2916 {
2917 	/* common across all segments */
2918 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2919 	/* segment specific */
2920 	void *tso_frag_vaddr;
2921 	qdf_dma_addr_t tso_frag_paddr = 0;
2922 	uint32_t num_seg = 0;
2923 	struct qdf_tso_seg_elem_t *curr_seg;
2924 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2925 	struct skb_frag_struct *frag = NULL;
2926 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2927 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2928 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2929 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2930 	int j = 0; /* skb fragment index */
2931 
2932 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2933 	total_num_seg = tso_info->tso_num_seg_list;
2934 	curr_seg = tso_info->tso_seg_list;
2935 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2936 
2937 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2938 						skb, &tso_cmn_info))) {
2939 		qdf_warn("TSO: error getting common segment info");
2940 		return 0;
2941 	}
2942 
2943 	/* length of the first chunk of data in the skb */
2944 	skb_frag_len = skb_headlen(skb);
2945 
2946 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2947 	/* update the remaining skb fragment length and TSO segment length */
2948 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2949 	skb_proc -= tso_cmn_info.eit_hdr_len;
2950 
2951 	/* get the address to the next tso fragment */
2952 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2953 	/* get the length of the next tso fragment */
2954 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2955 
2956 	if (tso_frag_len != 0) {
2957 		tso_frag_paddr = dma_map_single(osdev->dev,
2958 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2959 	}
2960 
2961 	if (unlikely(dma_mapping_error(osdev->dev,
2962 					tso_frag_paddr))) {
2963 		qdf_err("DMA mapping error!");
2964 		qdf_assert(0);
2965 		return 0;
2966 	}
2967 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2968 		__LINE__, skb_frag_len, tso_frag_len);
2969 	num_seg = tso_info->num_segs;
2970 	tso_info->num_segs = 0;
2971 	tso_info->is_tso = 1;
2972 
2973 	while (num_seg && curr_seg) {
2974 		int i = 1; /* tso fragment index */
2975 		uint8_t more_tso_frags = 1;
2976 
2977 		curr_seg->seg.num_frags = 0;
2978 		tso_info->num_segs++;
2979 		total_num_seg->num_seg.tso_cmn_num_seg++;
2980 
2981 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2982 						 &tso_cmn_info);
2983 
2984 		if (unlikely(skb_proc == 0))
2985 			return tso_info->num_segs;
2986 
2987 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2988 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2989 		/* frag len is added to ip_len in while loop below*/
2990 
2991 		curr_seg->seg.num_frags++;
2992 
2993 		while (more_tso_frags) {
2994 			if (tso_frag_len != 0) {
2995 				curr_seg->seg.tso_frags[i].vaddr =
2996 					tso_frag_vaddr;
2997 				curr_seg->seg.tso_frags[i].length =
2998 					tso_frag_len;
2999 				curr_seg->seg.total_len += tso_frag_len;
3000 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3001 				curr_seg->seg.num_frags++;
3002 				skb_proc = skb_proc - tso_frag_len;
3003 
3004 				/* increment the TCP sequence number */
3005 
3006 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3007 				curr_seg->seg.tso_frags[i].paddr =
3008 					tso_frag_paddr;
3009 			}
3010 
3011 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3012 					__func__, __LINE__,
3013 					i,
3014 					tso_frag_len,
3015 					curr_seg->seg.total_len,
3016 					curr_seg->seg.tso_frags[i].vaddr);
3017 
3018 			/* if there is no more data left in the skb */
3019 			if (!skb_proc)
3020 				return tso_info->num_segs;
3021 
3022 			/* get the next payload fragment information */
3023 			/* check if there are more fragments in this segment */
3024 			if (tso_frag_len < tso_seg_size) {
3025 				more_tso_frags = 1;
3026 				if (tso_frag_len != 0) {
3027 					tso_seg_size = tso_seg_size -
3028 						tso_frag_len;
3029 					i++;
3030 					if (curr_seg->seg.num_frags ==
3031 								FRAG_NUM_MAX) {
3032 						more_tso_frags = 0;
3033 						/*
3034 						 * reset i and the tso
3035 						 * payload size
3036 						 */
3037 						i = 1;
3038 						tso_seg_size =
3039 							skb_shinfo(skb)->
3040 								gso_size;
3041 					}
3042 				}
3043 			} else {
3044 				more_tso_frags = 0;
3045 				/* reset i and the tso payload size */
3046 				i = 1;
3047 				tso_seg_size = skb_shinfo(skb)->gso_size;
3048 			}
3049 
3050 			/* if the next fragment is contiguous */
3051 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3052 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3053 				skb_frag_len = skb_frag_len - tso_frag_len;
3054 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3055 
3056 			} else { /* the next fragment is not contiguous */
3057 				if (skb_shinfo(skb)->nr_frags == 0) {
3058 					qdf_info("TSO: nr_frags == 0!");
3059 					qdf_assert(0);
3060 					return 0;
3061 				}
3062 				if (j >= skb_shinfo(skb)->nr_frags) {
3063 					qdf_info("TSO: nr_frags %d j %d",
3064 						 skb_shinfo(skb)->nr_frags, j);
3065 					qdf_assert(0);
3066 					return 0;
3067 				}
3068 				frag = &skb_shinfo(skb)->frags[j];
3069 				skb_frag_len = skb_frag_size(frag);
3070 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3071 				tso_frag_vaddr = skb_frag_address_safe(frag);
3072 				j++;
3073 			}
3074 
3075 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3076 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3077 				tso_seg_size);
3078 
3079 			if (!(tso_frag_vaddr)) {
3080 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3081 						__func__);
3082 				return 0;
3083 			}
3084 
3085 			tso_frag_paddr =
3086 					 dma_map_single(osdev->dev,
3087 						 tso_frag_vaddr,
3088 						 tso_frag_len,
3089 						 DMA_TO_DEVICE);
3090 			if (unlikely(dma_mapping_error(osdev->dev,
3091 							tso_frag_paddr))) {
3092 				qdf_err("DMA mapping error!");
3093 				qdf_assert(0);
3094 				return 0;
3095 			}
3096 		}
3097 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3098 				curr_seg->seg.tso_flags.tcp_seq_num);
3099 		num_seg--;
3100 		/* if TCP FIN flag was set, set it in the last segment */
3101 		if (!num_seg)
3102 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3103 
3104 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3105 		curr_seg = curr_seg->next;
3106 	}
3107 	return tso_info->num_segs;
3108 }
3109 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3110 
3111 /**
3112  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3113  *
3114  * @osdev: qdf device handle
3115  * @tso_seg: TSO segment element to be unmapped
3116  * @is_last_seg: whether this is last tso seg or not
3117  *
3118  * Return: none
3119  */
3120 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3121 			  struct qdf_tso_seg_elem_t *tso_seg,
3122 			  bool is_last_seg)
3123 {
3124 	uint32_t num_frags = 0;
3125 
3126 	if (tso_seg->seg.num_frags > 0)
3127 		num_frags = tso_seg->seg.num_frags - 1;
3128 
3129 	/*Num of frags in a tso seg cannot be less than 2 */
3130 	if (num_frags < 1) {
3131 		/*
3132 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3133 		 * this may happen when qdf_nbuf_get_tso_info failed,
3134 		 * do dma unmap for the 0th frag in this seg.
3135 		 */
3136 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3137 			goto last_seg_free_first_frag;
3138 
3139 		qdf_assert(0);
3140 		qdf_err("ERROR: num of frags in a tso segment is %d",
3141 			(num_frags + 1));
3142 		return;
3143 	}
3144 
3145 	while (num_frags) {
3146 		/*Do dma unmap the tso seg except the 0th frag */
3147 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3148 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3149 				num_frags);
3150 			qdf_assert(0);
3151 			return;
3152 		}
3153 		dma_unmap_single(osdev->dev,
3154 				 tso_seg->seg.tso_frags[num_frags].paddr,
3155 				 tso_seg->seg.tso_frags[num_frags].length,
3156 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3157 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3158 		num_frags--;
3159 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3160 	}
3161 
3162 last_seg_free_first_frag:
3163 	if (is_last_seg) {
3164 		/*Do dma unmap for the tso seg 0th frag */
3165 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3166 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3167 			qdf_assert(0);
3168 			return;
3169 		}
3170 		dma_unmap_single(osdev->dev,
3171 				 tso_seg->seg.tso_frags[0].paddr,
3172 				 tso_seg->seg.tso_frags[0].length,
3173 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3174 		tso_seg->seg.tso_frags[0].paddr = 0;
3175 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3176 	}
3177 }
3178 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3179 
3180 /**
3181  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3182  * into segments
3183  * @nbuf:   network buffer to be segmented
3184  * @tso_info:  This is the output. The information about the
3185  *      TSO segments will be populated within this.
3186  *
3187  * This function fragments a TCP jumbo packet into smaller
3188  * segments to be transmitted by the driver. It chains the TSO
3189  * segments created into a list.
3190  *
3191  * Return: 0 - success, 1 - failure
3192  */
3193 #ifndef BUILD_X86
3194 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3195 {
3196 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3197 	uint32_t remainder, num_segs = 0;
3198 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3199 	uint8_t frags_per_tso = 0;
3200 	uint32_t skb_frag_len = 0;
3201 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3202 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3203 	struct skb_frag_struct *frag = NULL;
3204 	int j = 0;
3205 	uint32_t temp_num_seg = 0;
3206 
3207 	/* length of the first chunk of data in the skb minus eit header*/
3208 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3209 
3210 	/* Calculate num of segs for skb's first chunk of data*/
3211 	remainder = skb_frag_len % tso_seg_size;
3212 	num_segs = skb_frag_len / tso_seg_size;
3213 	/**
3214 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3215 	 * In that case, one more tso seg is required to accommodate
3216 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3217 	 * then remaining data will be accomodated while doing the calculation
3218 	 * for nr_frags data. Hence, frags_per_tso++.
3219 	 */
3220 	if (remainder) {
3221 		if (!skb_nr_frags)
3222 			num_segs++;
3223 		else
3224 			frags_per_tso++;
3225 	}
3226 
3227 	while (skb_nr_frags) {
3228 		if (j >= skb_shinfo(skb)->nr_frags) {
3229 			qdf_info("TSO: nr_frags %d j %d",
3230 				 skb_shinfo(skb)->nr_frags, j);
3231 			qdf_assert(0);
3232 			return 0;
3233 		}
3234 		/**
3235 		 * Calculate the number of tso seg for nr_frags data:
3236 		 * Get the length of each frag in skb_frag_len, add to
3237 		 * remainder.Get the number of segments by dividing it to
3238 		 * tso_seg_size and calculate the new remainder.
3239 		 * Decrement the nr_frags value and keep
3240 		 * looping all the skb_fragments.
3241 		 */
3242 		frag = &skb_shinfo(skb)->frags[j];
3243 		skb_frag_len = skb_frag_size(frag);
3244 		temp_num_seg = num_segs;
3245 		remainder += skb_frag_len;
3246 		num_segs += remainder / tso_seg_size;
3247 		remainder = remainder % tso_seg_size;
3248 		skb_nr_frags--;
3249 		if (remainder) {
3250 			if (num_segs > temp_num_seg)
3251 				frags_per_tso = 0;
3252 			/**
3253 			 * increment the tso per frags whenever remainder is
3254 			 * positive. If frags_per_tso reaches the (max-1),
3255 			 * [First frags always have EIT header, therefore max-1]
3256 			 * increment the num_segs as no more data can be
3257 			 * accomodated in the curr tso seg. Reset the remainder
3258 			 * and frags per tso and keep looping.
3259 			 */
3260 			frags_per_tso++;
3261 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3262 				num_segs++;
3263 				frags_per_tso = 0;
3264 				remainder = 0;
3265 			}
3266 			/**
3267 			 * If this is the last skb frag and still remainder is
3268 			 * non-zero(frags_per_tso is not reached to the max-1)
3269 			 * then increment the num_segs to take care of the
3270 			 * remaining length.
3271 			 */
3272 			if (!skb_nr_frags && remainder) {
3273 				num_segs++;
3274 				frags_per_tso = 0;
3275 			}
3276 		} else {
3277 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3278 			frags_per_tso = 0;
3279 		}
3280 		j++;
3281 	}
3282 
3283 	return num_segs;
3284 }
3285 #else
3286 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3287 {
3288 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3289 	struct skb_frag_struct *frag = NULL;
3290 
3291 	/*
3292 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3293 	 * region which cannot be accessed by Target
3294 	 */
3295 	if (virt_to_phys(skb->data) < 0x50000040) {
3296 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3297 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3298 				virt_to_phys(skb->data));
3299 		goto fail;
3300 
3301 	}
3302 
3303 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3304 		frag = &skb_shinfo(skb)->frags[i];
3305 
3306 		if (!frag)
3307 			goto fail;
3308 
3309 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3310 			goto fail;
3311 	}
3312 
3313 
3314 	gso_size = skb_shinfo(skb)->gso_size;
3315 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3316 			+ tcp_hdrlen(skb));
3317 	while (tmp_len) {
3318 		num_segs++;
3319 		if (tmp_len > gso_size)
3320 			tmp_len -= gso_size;
3321 		else
3322 			break;
3323 	}
3324 
3325 	return num_segs;
3326 
3327 	/*
3328 	 * Do not free this frame, just do socket level accounting
3329 	 * so that this is not reused.
3330 	 */
3331 fail:
3332 	if (skb->sk)
3333 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3334 
3335 	return 0;
3336 }
3337 #endif
3338 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3339 
3340 #endif /* FEATURE_TSO */
3341 
3342 /**
3343  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3344  *
3345  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3346  *
3347  * Return: N/A
3348  */
3349 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3350 			  uint32_t *lo, uint32_t *hi)
3351 {
3352 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3353 		*lo = lower_32_bits(dmaaddr);
3354 		*hi = upper_32_bits(dmaaddr);
3355 	} else {
3356 		*lo = dmaaddr;
3357 		*hi = 0;
3358 	}
3359 }
3360 
3361 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3362 
3363 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3364 {
3365 	qdf_nbuf_users_inc(&skb->users);
3366 	return skb;
3367 }
3368 qdf_export_symbol(__qdf_nbuf_inc_users);
3369 
3370 int __qdf_nbuf_get_users(struct sk_buff *skb)
3371 {
3372 	return qdf_nbuf_users_read(&skb->users);
3373 }
3374 qdf_export_symbol(__qdf_nbuf_get_users);
3375 
3376 /**
3377  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3378  * @skb: sk_buff handle
3379  *
3380  * Return: none
3381  */
3382 
3383 void __qdf_nbuf_ref(struct sk_buff *skb)
3384 {
3385 	skb_get(skb);
3386 }
3387 qdf_export_symbol(__qdf_nbuf_ref);
3388 
3389 /**
3390  * __qdf_nbuf_shared() - Check whether the buffer is shared
3391  *  @skb: sk_buff buffer
3392  *
3393  *  Return: true if more than one person has a reference to this buffer.
3394  */
3395 int __qdf_nbuf_shared(struct sk_buff *skb)
3396 {
3397 	return skb_shared(skb);
3398 }
3399 qdf_export_symbol(__qdf_nbuf_shared);
3400 
3401 /**
3402  * __qdf_nbuf_dmamap_create() - create a DMA map.
3403  * @osdev: qdf device handle
3404  * @dmap: dma map handle
3405  *
3406  * This can later be used to map networking buffers. They :
3407  * - need space in adf_drv's software descriptor
3408  * - are typically created during adf_drv_create
3409  * - need to be created before any API(qdf_nbuf_map) that uses them
3410  *
3411  * Return: QDF STATUS
3412  */
3413 QDF_STATUS
3414 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3415 {
3416 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3417 	/*
3418 	 * driver can tell its SG capablity, it must be handled.
3419 	 * Bounce buffers if they are there
3420 	 */
3421 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3422 	if (!(*dmap))
3423 		error = QDF_STATUS_E_NOMEM;
3424 
3425 	return error;
3426 }
3427 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3428 /**
3429  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3430  * @osdev: qdf device handle
3431  * @dmap: dma map handle
3432  *
3433  * Return: none
3434  */
3435 void
3436 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3437 {
3438 	kfree(dmap);
3439 }
3440 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3441 
3442 /**
3443  * __qdf_nbuf_map_nbytes_single() - map nbytes
3444  * @osdev: os device
3445  * @buf: buffer
3446  * @dir: direction
3447  * @nbytes: number of bytes
3448  *
3449  * Return: QDF_STATUS
3450  */
3451 #ifdef A_SIMOS_DEVHOST
3452 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3453 		qdf_device_t osdev, struct sk_buff *buf,
3454 		 qdf_dma_dir_t dir, int nbytes)
3455 {
3456 	qdf_dma_addr_t paddr;
3457 
3458 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3459 	return QDF_STATUS_SUCCESS;
3460 }
3461 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3462 #else
3463 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3464 		qdf_device_t osdev, struct sk_buff *buf,
3465 		 qdf_dma_dir_t dir, int nbytes)
3466 {
3467 	qdf_dma_addr_t paddr;
3468 
3469 	/* assume that the OS only provides a single fragment */
3470 	QDF_NBUF_CB_PADDR(buf) = paddr =
3471 		dma_map_single(osdev->dev, buf->data,
3472 			nbytes, __qdf_dma_dir_to_os(dir));
3473 	return dma_mapping_error(osdev->dev, paddr) ?
3474 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3475 }
3476 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3477 #endif
3478 /**
3479  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3480  * @osdev: os device
3481  * @buf: buffer
3482  * @dir: direction
3483  * @nbytes: number of bytes
3484  *
3485  * Return: none
3486  */
3487 #if defined(A_SIMOS_DEVHOST)
3488 void
3489 __qdf_nbuf_unmap_nbytes_single(
3490 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3491 {
3492 }
3493 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3494 
3495 #else
3496 void
3497 __qdf_nbuf_unmap_nbytes_single(
3498 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3499 {
3500 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3501 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3502 		return;
3503 	}
3504 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3505 			nbytes, __qdf_dma_dir_to_os(dir));
3506 }
3507 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3508 #endif
3509 /**
3510  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3511  * @osdev: os device
3512  * @skb: skb handle
3513  * @dir: dma direction
3514  * @nbytes: number of bytes to be mapped
3515  *
3516  * Return: QDF_STATUS
3517  */
3518 #ifdef QDF_OS_DEBUG
3519 QDF_STATUS
3520 __qdf_nbuf_map_nbytes(
3521 	qdf_device_t osdev,
3522 	struct sk_buff *skb,
3523 	qdf_dma_dir_t dir,
3524 	int nbytes)
3525 {
3526 	struct skb_shared_info  *sh = skb_shinfo(skb);
3527 
3528 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3529 
3530 	/*
3531 	 * Assume there's only a single fragment.
3532 	 * To support multiple fragments, it would be necessary to change
3533 	 * adf_nbuf_t to be a separate object that stores meta-info
3534 	 * (including the bus address for each fragment) and a pointer
3535 	 * to the underlying sk_buff.
3536 	 */
3537 	qdf_assert(sh->nr_frags == 0);
3538 
3539 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3540 }
3541 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3542 #else
3543 QDF_STATUS
3544 __qdf_nbuf_map_nbytes(
3545 	qdf_device_t osdev,
3546 	struct sk_buff *skb,
3547 	qdf_dma_dir_t dir,
3548 	int nbytes)
3549 {
3550 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3551 }
3552 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3553 #endif
3554 /**
3555  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3556  * @osdev: OS device
3557  * @skb: skb handle
3558  * @dir: direction
3559  * @nbytes: number of bytes
3560  *
3561  * Return: none
3562  */
3563 void
3564 __qdf_nbuf_unmap_nbytes(
3565 	qdf_device_t osdev,
3566 	struct sk_buff *skb,
3567 	qdf_dma_dir_t dir,
3568 	int nbytes)
3569 {
3570 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3571 
3572 	/*
3573 	 * Assume there's a single fragment.
3574 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3575 	 */
3576 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3577 }
3578 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3579 
3580 /**
3581  * __qdf_nbuf_dma_map_info() - return the dma map info
3582  * @bmap: dma map
3583  * @sg: dma map info
3584  *
3585  * Return: none
3586  */
3587 void
3588 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3589 {
3590 	qdf_assert(bmap->mapped);
3591 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3592 
3593 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3594 			sizeof(struct __qdf_segment));
3595 	sg->nsegs = bmap->nsegs;
3596 }
3597 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3598 /**
3599  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3600  *			specified by the index
3601  * @skb: sk buff
3602  * @sg: scatter/gather list of all the frags
3603  *
3604  * Return: none
3605  */
3606 #if defined(__QDF_SUPPORT_FRAG_MEM)
3607 void
3608 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3609 {
3610 	qdf_assert(skb != NULL);
3611 	sg->sg_segs[0].vaddr = skb->data;
3612 	sg->sg_segs[0].len   = skb->len;
3613 	sg->nsegs            = 1;
3614 
3615 	for (int i = 1; i <= sh->nr_frags; i++) {
3616 		skb_frag_t    *f        = &sh->frags[i - 1];
3617 
3618 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3619 			f->page_offset);
3620 		sg->sg_segs[i].len      = f->size;
3621 
3622 		qdf_assert(i < QDF_MAX_SGLIST);
3623 	}
3624 	sg->nsegs += i;
3625 
3626 }
3627 qdf_export_symbol(__qdf_nbuf_frag_info);
3628 #else
3629 #ifdef QDF_OS_DEBUG
3630 void
3631 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3632 {
3633 
3634 	struct skb_shared_info  *sh = skb_shinfo(skb);
3635 
3636 	qdf_assert(skb != NULL);
3637 	sg->sg_segs[0].vaddr = skb->data;
3638 	sg->sg_segs[0].len   = skb->len;
3639 	sg->nsegs            = 1;
3640 
3641 	qdf_assert(sh->nr_frags == 0);
3642 }
3643 qdf_export_symbol(__qdf_nbuf_frag_info);
3644 #else
3645 void
3646 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3647 {
3648 	sg->sg_segs[0].vaddr = skb->data;
3649 	sg->sg_segs[0].len   = skb->len;
3650 	sg->nsegs            = 1;
3651 }
3652 qdf_export_symbol(__qdf_nbuf_frag_info);
3653 #endif
3654 #endif
3655 /**
3656  * __qdf_nbuf_get_frag_size() - get frag size
3657  * @nbuf: sk buffer
3658  * @cur_frag: current frag
3659  *
3660  * Return: frag size
3661  */
3662 uint32_t
3663 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3664 {
3665 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3666 	const skb_frag_t *frag = sh->frags + cur_frag;
3667 
3668 	return skb_frag_size(frag);
3669 }
3670 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3671 
3672 /**
3673  * __qdf_nbuf_frag_map() - dma map frag
3674  * @osdev: os device
3675  * @nbuf: sk buff
3676  * @offset: offset
3677  * @dir: direction
3678  * @cur_frag: current fragment
3679  *
3680  * Return: QDF status
3681  */
3682 #ifdef A_SIMOS_DEVHOST
3683 QDF_STATUS __qdf_nbuf_frag_map(
3684 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3685 	int offset, qdf_dma_dir_t dir, int cur_frag)
3686 {
3687 	int32_t paddr, frag_len;
3688 
3689 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3690 	return QDF_STATUS_SUCCESS;
3691 }
3692 qdf_export_symbol(__qdf_nbuf_frag_map);
3693 #else
3694 QDF_STATUS __qdf_nbuf_frag_map(
3695 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3696 	int offset, qdf_dma_dir_t dir, int cur_frag)
3697 {
3698 	dma_addr_t paddr, frag_len;
3699 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3700 	const skb_frag_t *frag = sh->frags + cur_frag;
3701 
3702 	frag_len = skb_frag_size(frag);
3703 
3704 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3705 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3706 					__qdf_dma_dir_to_os(dir));
3707 	return dma_mapping_error(osdev->dev, paddr) ?
3708 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3709 }
3710 qdf_export_symbol(__qdf_nbuf_frag_map);
3711 #endif
3712 /**
3713  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3714  * @dmap: dma map
3715  * @cb: callback
3716  * @arg: argument
3717  *
3718  * Return: none
3719  */
3720 void
3721 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3722 {
3723 	return;
3724 }
3725 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3726 
3727 
3728 /**
3729  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3730  * @osdev: os device
3731  * @buf: sk buff
3732  * @dir: direction
3733  *
3734  * Return: none
3735  */
3736 #if defined(A_SIMOS_DEVHOST)
3737 static void __qdf_nbuf_sync_single_for_cpu(
3738 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3739 {
3740 	return;
3741 }
3742 #else
3743 static void __qdf_nbuf_sync_single_for_cpu(
3744 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3745 {
3746 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3747 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3748 		return;
3749 	}
3750 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3751 		skb_end_offset(buf) - skb_headroom(buf),
3752 		__qdf_dma_dir_to_os(dir));
3753 }
3754 #endif
3755 /**
3756  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3757  * @osdev: os device
3758  * @skb: sk buff
3759  * @dir: direction
3760  *
3761  * Return: none
3762  */
3763 void
3764 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3765 	struct sk_buff *skb, qdf_dma_dir_t dir)
3766 {
3767 	qdf_assert(
3768 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3769 
3770 	/*
3771 	 * Assume there's a single fragment.
3772 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3773 	 */
3774 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3775 }
3776 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3777 
3778 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3779 /**
3780  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3781  * @rx_status: Pointer to rx_status.
3782  * @rtap_buf: Buf to which VHT info has to be updated.
3783  * @rtap_len: Current length of radiotap buffer
3784  *
3785  * Return: Length of radiotap after VHT flags updated.
3786  */
3787 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3788 					struct mon_rx_status *rx_status,
3789 					int8_t *rtap_buf,
3790 					uint32_t rtap_len)
3791 {
3792 	uint16_t vht_flags = 0;
3793 
3794 	rtap_len = qdf_align(rtap_len, 2);
3795 
3796 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3797 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3798 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3799 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3800 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3801 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3802 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3803 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3804 	rtap_len += 2;
3805 
3806 	rtap_buf[rtap_len] |=
3807 		(rx_status->is_stbc ?
3808 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3809 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3810 		(rx_status->ldpc ?
3811 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3812 		(rx_status->beamformed ?
3813 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3814 	rtap_len += 1;
3815 	switch (rx_status->vht_flag_values2) {
3816 	case IEEE80211_RADIOTAP_VHT_BW_20:
3817 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3818 		break;
3819 	case IEEE80211_RADIOTAP_VHT_BW_40:
3820 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3821 		break;
3822 	case IEEE80211_RADIOTAP_VHT_BW_80:
3823 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3824 		break;
3825 	case IEEE80211_RADIOTAP_VHT_BW_160:
3826 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3827 		break;
3828 	}
3829 	rtap_len += 1;
3830 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3831 	rtap_len += 1;
3832 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3833 	rtap_len += 1;
3834 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3835 	rtap_len += 1;
3836 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3837 	rtap_len += 1;
3838 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3839 	rtap_len += 1;
3840 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3841 	rtap_len += 1;
3842 	put_unaligned_le16(rx_status->vht_flag_values6,
3843 			   &rtap_buf[rtap_len]);
3844 	rtap_len += 2;
3845 
3846 	return rtap_len;
3847 }
3848 
3849 /**
3850  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3851  * @rx_status: Pointer to rx_status.
3852  * @rtap_buf: buffer to which radiotap has to be updated
3853  * @rtap_len: radiotap length
3854  *
3855  * API update high-efficiency (11ax) fields in the radiotap header
3856  *
3857  * Return: length of rtap_len updated.
3858  */
3859 static unsigned int
3860 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3861 				     int8_t *rtap_buf, uint32_t rtap_len)
3862 {
3863 	/*
3864 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3865 	 * Enable all "known" HE radiotap flags for now
3866 	 */
3867 	rtap_len = qdf_align(rtap_len, 2);
3868 
3869 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3870 	rtap_len += 2;
3871 
3872 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3873 	rtap_len += 2;
3874 
3875 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3876 	rtap_len += 2;
3877 
3878 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3879 	rtap_len += 2;
3880 
3881 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3882 	rtap_len += 2;
3883 
3884 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3885 	rtap_len += 2;
3886 	qdf_debug("he data %x %x %x %x %x %x",
3887 		  rx_status->he_data1,
3888 		  rx_status->he_data2, rx_status->he_data3,
3889 		  rx_status->he_data4, rx_status->he_data5,
3890 		  rx_status->he_data6);
3891 	return rtap_len;
3892 }
3893 
3894 
3895 /**
3896  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3897  * @rx_status: Pointer to rx_status.
3898  * @rtap_buf: buffer to which radiotap has to be updated
3899  * @rtap_len: radiotap length
3900  *
3901  * API update HE-MU fields in the radiotap header
3902  *
3903  * Return: length of rtap_len updated.
3904  */
3905 static unsigned int
3906 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3907 				     int8_t *rtap_buf, uint32_t rtap_len)
3908 {
3909 	rtap_len = qdf_align(rtap_len, 2);
3910 
3911 	/*
3912 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3913 	 * Enable all "known" he-mu radiotap flags for now
3914 	 */
3915 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3916 	rtap_len += 2;
3917 
3918 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3919 	rtap_len += 2;
3920 
3921 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3922 	rtap_len += 1;
3923 
3924 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3925 	rtap_len += 1;
3926 
3927 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3928 	rtap_len += 1;
3929 
3930 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3931 	rtap_len += 1;
3932 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
3933 		  rx_status->he_flags1,
3934 		  rx_status->he_flags2, rx_status->he_RU[0],
3935 		  rx_status->he_RU[1], rx_status->he_RU[2],
3936 		  rx_status->he_RU[3]);
3937 
3938 	return rtap_len;
3939 }
3940 
3941 /**
3942  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3943  * @rx_status: Pointer to rx_status.
3944  * @rtap_buf: buffer to which radiotap has to be updated
3945  * @rtap_len: radiotap length
3946  *
3947  * API update he-mu-other fields in the radiotap header
3948  *
3949  * Return: length of rtap_len updated.
3950  */
3951 static unsigned int
3952 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3953 				     int8_t *rtap_buf, uint32_t rtap_len)
3954 {
3955 	rtap_len = qdf_align(rtap_len, 2);
3956 
3957 	/*
3958 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3959 	 * Enable all "known" he-mu-other radiotap flags for now
3960 	 */
3961 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3962 	rtap_len += 2;
3963 
3964 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3965 	rtap_len += 2;
3966 
3967 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3968 	rtap_len += 1;
3969 
3970 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3971 	rtap_len += 1;
3972 	qdf_debug("he_per_user %x %x pos %x knwn %x",
3973 		  rx_status->he_per_user_1,
3974 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
3975 		  rx_status->he_per_user_known);
3976 	return rtap_len;
3977 }
3978 
3979 
3980 /**
3981  * This is the length for radiotap, combined length
3982  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3983  * cannot be more than available headroom_sz.
3984  * increase this when we add more radiotap elements.
3985  * Number after '+' indicates maximum possible increase due to alignment
3986  */
3987 
3988 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
3989 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
3990 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
3991 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
3992 #define RADIOTAP_FIXED_HEADER_LEN 17
3993 #define RADIOTAP_HT_FLAGS_LEN 3
3994 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
3995 #define RADIOTAP_VENDOR_NS_LEN \
3996 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
3997 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3998 				RADIOTAP_FIXED_HEADER_LEN + \
3999 				RADIOTAP_HT_FLAGS_LEN + \
4000 				RADIOTAP_VHT_FLAGS_LEN + \
4001 				RADIOTAP_AMPDU_STATUS_LEN + \
4002 				RADIOTAP_HE_FLAGS_LEN + \
4003 				RADIOTAP_HE_MU_FLAGS_LEN + \
4004 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4005 				RADIOTAP_VENDOR_NS_LEN)
4006 
4007 #define IEEE80211_RADIOTAP_HE 23
4008 #define IEEE80211_RADIOTAP_HE_MU	24
4009 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4010 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4011 
4012 /**
4013  * radiotap_num_to_freq() - Get frequency from chan number
4014  * @chan_num - Input channel number
4015  *
4016  * Return - Channel frequency in Mhz
4017  */
4018 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
4019 {
4020 	if (chan_num == CHANNEL_NUM_14)
4021 		return CHANNEL_FREQ_2484;
4022 	if (chan_num < CHANNEL_NUM_14)
4023 		return CHANNEL_FREQ_2407 +
4024 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4025 
4026 	if (chan_num < CHANNEL_NUM_27)
4027 		return CHANNEL_FREQ_2512 +
4028 			((chan_num - CHANNEL_NUM_15) *
4029 			 FREQ_MULTIPLIER_CONST_20MHZ);
4030 
4031 	if (chan_num > CHANNEL_NUM_182 &&
4032 			chan_num < CHANNEL_NUM_197)
4033 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
4034 			CHANNEL_FREQ_4000);
4035 
4036 	return CHANNEL_FREQ_5000 +
4037 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4038 }
4039 
4040 /**
4041  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4042  * @rx_status: Pointer to rx_status.
4043  * @rtap_buf: Buf to which AMPDU info has to be updated.
4044  * @rtap_len: Current length of radiotap buffer
4045  *
4046  * Return: Length of radiotap after AMPDU flags updated.
4047  */
4048 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4049 					struct mon_rx_status *rx_status,
4050 					uint8_t *rtap_buf,
4051 					uint32_t rtap_len)
4052 {
4053 	/*
4054 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4055 	 * First 32 bits of AMPDU represents the reference number
4056 	 */
4057 
4058 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4059 	uint16_t ampdu_flags = 0;
4060 	uint16_t ampdu_reserved_flags = 0;
4061 
4062 	rtap_len = qdf_align(rtap_len, 4);
4063 
4064 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4065 	rtap_len += 4;
4066 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4067 	rtap_len += 2;
4068 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4069 	rtap_len += 2;
4070 
4071 	return rtap_len;
4072 }
4073 
4074 /**
4075  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4076  * @rx_status: Pointer to rx_status.
4077  * @nbuf:      nbuf pointer to which radiotap has to be updated
4078  * @headroom_sz: Available headroom size.
4079  *
4080  * Return: length of rtap_len updated.
4081  */
4082 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4083 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4084 {
4085 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4086 	struct ieee80211_radiotap_header *rthdr =
4087 		(struct ieee80211_radiotap_header *)rtap_buf;
4088 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4089 	uint32_t rtap_len = rtap_hdr_len;
4090 	uint8_t length = rtap_len;
4091 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4092 
4093 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4094 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4095 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4096 	rtap_len += 8;
4097 
4098 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4099 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4100 
4101 	if (rx_status->rs_fcs_err)
4102 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4103 
4104 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4105 	rtap_len += 1;
4106 
4107 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4108 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4109 	    !rx_status->he_flags) {
4110 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4111 		rtap_buf[rtap_len] = rx_status->rate;
4112 	} else
4113 		rtap_buf[rtap_len] = 0;
4114 	rtap_len += 1;
4115 
4116 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4117 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4118 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4119 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4120 	rtap_len += 2;
4121 	/* Channel flags. */
4122 	if (rx_status->chan_num > CHANNEL_NUM_35)
4123 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4124 	else
4125 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4126 	if (rx_status->cck_flag)
4127 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4128 	if (rx_status->ofdm_flag)
4129 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4130 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4131 	rtap_len += 2;
4132 
4133 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4134 	 *					(dBm)
4135 	 */
4136 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4137 	/*
4138 	 * rssi_comb is int dB, need to convert it to dBm.
4139 	 * normalize value to noise floor of -96 dBm
4140 	 */
4141 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4142 	rtap_len += 1;
4143 
4144 	/* RX signal noise floor */
4145 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4146 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4147 	rtap_len += 1;
4148 
4149 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4150 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4151 	rtap_buf[rtap_len] = rx_status->nr_ant;
4152 	rtap_len += 1;
4153 
4154 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4155 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4156 		return 0;
4157 	}
4158 
4159 	if (rx_status->ht_flags) {
4160 		length = rtap_len;
4161 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4162 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4163 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4164 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4165 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4166 		rtap_len += 1;
4167 
4168 		if (rx_status->sgi)
4169 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4170 		if (rx_status->bw)
4171 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4172 		else
4173 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4174 		rtap_len += 1;
4175 
4176 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4177 		rtap_len += 1;
4178 
4179 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4180 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4181 			return 0;
4182 		}
4183 	}
4184 
4185 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4186 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4187 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4188 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4189 								rtap_buf,
4190 								rtap_len);
4191 	}
4192 
4193 	if (rx_status->vht_flags) {
4194 		length = rtap_len;
4195 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4196 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4197 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4198 								rtap_buf,
4199 								rtap_len);
4200 
4201 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4202 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4203 			return 0;
4204 		}
4205 	}
4206 
4207 	if (rx_status->he_flags) {
4208 		length = rtap_len;
4209 		/* IEEE80211_RADIOTAP_HE */
4210 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4211 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4212 								rtap_buf,
4213 								rtap_len);
4214 
4215 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4216 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4217 			return 0;
4218 		}
4219 	}
4220 
4221 	if (rx_status->he_mu_flags) {
4222 		length = rtap_len;
4223 		/* IEEE80211_RADIOTAP_HE-MU */
4224 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4225 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4226 								rtap_buf,
4227 								rtap_len);
4228 
4229 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4230 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4231 			return 0;
4232 		}
4233 	}
4234 
4235 	if (rx_status->he_mu_other_flags) {
4236 		length = rtap_len;
4237 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4238 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4239 		rtap_len =
4240 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4241 								rtap_buf,
4242 								rtap_len);
4243 
4244 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4245 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4246 			return 0;
4247 		}
4248 	}
4249 
4250 	rtap_len = qdf_align(rtap_len, 2);
4251 	/*
4252 	 * Radiotap Vendor Namespace
4253 	 */
4254 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4255 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4256 					(rtap_buf + rtap_len);
4257 	/*
4258 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4259 	 */
4260 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4261 	/*
4262 	 * Name space selector = 0
4263 	 * We only will have one namespace for now
4264 	 */
4265 	radiotap_vendor_ns_ath->hdr.selector = 0;
4266 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4267 					sizeof(*radiotap_vendor_ns_ath) -
4268 					sizeof(radiotap_vendor_ns_ath->hdr));
4269 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4270 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4271 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4272 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4273 				cpu_to_le32(rx_status->ppdu_timestamp);
4274 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4275 
4276 	rthdr->it_len = cpu_to_le16(rtap_len);
4277 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4278 
4279 	if (headroom_sz < rtap_len) {
4280 		qdf_err("ERROR: not enough space to update radiotap");
4281 		return 0;
4282 	}
4283 	qdf_nbuf_push_head(nbuf, rtap_len);
4284 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4285 	return rtap_len;
4286 }
4287 #else
4288 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4289 					struct mon_rx_status *rx_status,
4290 					int8_t *rtap_buf,
4291 					uint32_t rtap_len)
4292 {
4293 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4294 	return 0;
4295 }
4296 
4297 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4298 				      int8_t *rtap_buf, uint32_t rtap_len)
4299 {
4300 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4301 	return 0;
4302 }
4303 
4304 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4305 					struct mon_rx_status *rx_status,
4306 					uint8_t *rtap_buf,
4307 					uint32_t rtap_len)
4308 {
4309 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4310 	return 0;
4311 }
4312 
4313 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4314 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4315 {
4316 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4317 	return 0;
4318 }
4319 #endif
4320 qdf_export_symbol(qdf_nbuf_update_radiotap);
4321 
4322 /**
4323  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4324  * @cb_func_ptr: function pointer to the nbuf free callback
4325  *
4326  * This function registers a callback function for nbuf free.
4327  *
4328  * Return: none
4329  */
4330 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4331 {
4332 	nbuf_free_cb = cb_func_ptr;
4333 }
4334 
4335 /**
4336  * qdf_nbuf_classify_pkt() - classify packet
4337  * @skb - sk buff
4338  *
4339  * Return: none
4340  */
4341 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4342 {
4343 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4344 
4345 	/* check destination mac address is broadcast/multicast */
4346 	if (is_broadcast_ether_addr((uint8_t *)eh))
4347 		QDF_NBUF_CB_SET_BCAST(skb);
4348 	else if (is_multicast_ether_addr((uint8_t *)eh))
4349 		QDF_NBUF_CB_SET_MCAST(skb);
4350 
4351 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4352 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4353 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4354 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4355 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4356 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4357 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4358 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4359 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4360 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4361 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4362 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4363 }
4364 qdf_export_symbol(qdf_nbuf_classify_pkt);
4365 
4366 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4367 {
4368 	qdf_nbuf_users_set(&nbuf->users, 1);
4369 	nbuf->data = nbuf->head + NET_SKB_PAD;
4370 	skb_reset_tail_pointer(nbuf);
4371 }
4372 qdf_export_symbol(__qdf_nbuf_init);
4373 
4374 #ifdef WLAN_FEATURE_FASTPATH
4375 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4376 {
4377 	qdf_nbuf_users_set(&nbuf->users, 1);
4378 	nbuf->data = nbuf->head + NET_SKB_PAD;
4379 	skb_reset_tail_pointer(nbuf);
4380 }
4381 qdf_export_symbol(qdf_nbuf_init_fast);
4382 #endif /* WLAN_FEATURE_FASTPATH */
4383 
4384 
4385 #ifdef QDF_NBUF_GLOBAL_COUNT
4386 /**
4387  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4388  *
4389  * Return void
4390  */
4391 void __qdf_nbuf_mod_init(void)
4392 {
4393 	qdf_atomic_init(&nbuf_count);
4394 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4395 }
4396 
4397 /**
4398  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4399  *
4400  * Return void
4401  */
4402 void __qdf_nbuf_mod_exit(void)
4403 {
4404 }
4405 #endif
4406