xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: qdf_nbuf.c
30  * QCA driver framework(QDF) network buffer management APIs
31  */
32 
33 #include <linux/hashtable.h>
34 #include <linux/kernel.h>
35 #include <linux/version.h>
36 #include <linux/skbuff.h>
37 #include <linux/module.h>
38 #include <linux/proc_fs.h>
39 #include <qdf_atomic.h>
40 #include <qdf_types.h>
41 #include <qdf_nbuf.h>
42 #include <qdf_mem.h>
43 #include <qdf_status.h>
44 #include <qdf_lock.h>
45 #include <qdf_trace.h>
46 #include <qdf_debugfs.h>
47 #include <net/ieee80211_radiotap.h>
48 #include <qdf_module.h>
49 #include <qdf_atomic.h>
50 #include <pld_common.h>
51 #include <qdf_module.h>
52 
53 #if defined(FEATURE_TSO)
54 #include <net/ipv6.h>
55 #include <linux/ipv6.h>
56 #include <linux/tcp.h>
57 #include <linux/if_vlan.h>
58 #include <linux/ip.h>
59 #endif /* FEATURE_TSO */
60 
61 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
62 
63 #define qdf_nbuf_users_inc atomic_inc
64 #define qdf_nbuf_users_dec atomic_dec
65 #define qdf_nbuf_users_set atomic_set
66 #define qdf_nbuf_users_read atomic_read
67 #else
68 #define qdf_nbuf_users_inc refcount_inc
69 #define qdf_nbuf_users_dec refcount_dec
70 #define qdf_nbuf_users_set refcount_set
71 #define qdf_nbuf_users_read refcount_read
72 #endif /* KERNEL_VERSION(4, 13, 0) */
73 
74 #define IEEE80211_RADIOTAP_VHT_BW_20	0
75 #define IEEE80211_RADIOTAP_VHT_BW_40	1
76 #define IEEE80211_RADIOTAP_VHT_BW_80	2
77 #define IEEE80211_RADIOTAP_VHT_BW_160	3
78 
79 #define RADIOTAP_VHT_BW_20	0
80 #define RADIOTAP_VHT_BW_40	1
81 #define RADIOTAP_VHT_BW_80	4
82 #define RADIOTAP_VHT_BW_160	11
83 
84 /* channel number to freq conversion */
85 #define CHANNEL_NUM_14 14
86 #define CHANNEL_NUM_15 15
87 #define CHANNEL_NUM_27 27
88 #define CHANNEL_NUM_35 35
89 #define CHANNEL_NUM_182 182
90 #define CHANNEL_NUM_197 197
91 #define CHANNEL_FREQ_2484 2484
92 #define CHANNEL_FREQ_2407 2407
93 #define CHANNEL_FREQ_2512 2512
94 #define CHANNEL_FREQ_5000 5000
95 #define CHANNEL_FREQ_4000 4000
96 #define FREQ_MULTIPLIER_CONST_5MHZ 5
97 #define FREQ_MULTIPLIER_CONST_20MHZ 20
98 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
99 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
100 #define RADIOTAP_CCK_CHANNEL 0x0020
101 #define RADIOTAP_OFDM_CHANNEL 0x0040
102 
103 #ifdef CONFIG_MCL
104 #include <qdf_mc_timer.h>
105 
106 struct qdf_track_timer {
107 	qdf_mc_timer_t track_timer;
108 	qdf_atomic_t alloc_fail_cnt;
109 };
110 
111 static struct qdf_track_timer alloc_track_timer;
112 
113 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
114 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
115 #endif
116 
117 /* Packet Counter */
118 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
119 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
120 #ifdef QDF_NBUF_GLOBAL_COUNT
121 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
122 static qdf_atomic_t nbuf_count;
123 #endif
124 
125 /**
126  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
127  *
128  * Return: none
129  */
130 void qdf_nbuf_tx_desc_count_display(void)
131 {
132 	qdf_print("Current Snapshot of the Driver:\n");
133 	qdf_print("Data Packets:\n");
134 	qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
135 		nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
136 		(nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
137 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
138 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
139 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
140 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
141 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
142 			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
143 		nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
144 			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
145 	qdf_print(" HTC %d  HIF %d CE %d TX_COMP %d\n",
146 		nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
147 			nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
148 		nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
149 			 nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
150 		nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
151 			 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
152 		nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
153 	qdf_print("Mgmt Packets:\n");
154 	qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
155 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
156 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
157 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
158 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
159 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
160 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
161 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
162 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
163 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
164 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
165 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
166 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
167 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
168 }
169 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
170 
171 /**
172  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
173  * @packet_type   : packet type either mgmt/data
174  * @current_state : layer at which the packet currently present
175  *
176  * Return: none
177  */
178 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
179 			uint8_t current_state)
180 {
181 	switch (packet_type) {
182 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
183 		nbuf_tx_mgmt[current_state]++;
184 		break;
185 	case QDF_NBUF_TX_PKT_DATA_TRACK:
186 		nbuf_tx_data[current_state]++;
187 		break;
188 	default:
189 		break;
190 	}
191 }
192 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
193 
194 /**
195  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
196  *
197  * Return: none
198  */
199 void qdf_nbuf_tx_desc_count_clear(void)
200 {
201 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
202 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
203 }
204 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
205 
206 /**
207  * qdf_nbuf_set_state() - Updates the packet state
208  * @nbuf:            network buffer
209  * @current_state :  layer at which the packet currently is
210  *
211  * This function updates the packet state to the layer at which the packet
212  * currently is
213  *
214  * Return: none
215  */
216 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
217 {
218 	/*
219 	 * Only Mgmt, Data Packets are tracked. WMI messages
220 	 * such as scan commands are not tracked
221 	 */
222 	uint8_t packet_type;
223 
224 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
225 
226 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
227 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
228 		return;
229 	}
230 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
231 	qdf_nbuf_tx_desc_count_update(packet_type,
232 					current_state);
233 }
234 qdf_export_symbol(qdf_nbuf_set_state);
235 
236 #ifdef CONFIG_MCL
237 /**
238  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
239  *
240  * This function starts the alloc fail replenish timer.
241  *
242  * Return: void
243  */
244 static void __qdf_nbuf_start_replenish_timer(void)
245 {
246 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
247 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
248 	    QDF_TIMER_STATE_RUNNING)
249 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
250 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
251 }
252 
253 /**
254  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
255  *
256  * This function stops the alloc fail replenish timer.
257  *
258  * Return: void
259  */
260 static void __qdf_nbuf_stop_replenish_timer(void)
261 {
262 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
263 		return;
264 
265 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
266 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
267 	    QDF_TIMER_STATE_RUNNING)
268 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
269 }
270 
271 /**
272  * qdf_replenish_expire_handler - Replenish expire handler
273  *
274  * This function triggers when the alloc fail replenish timer expires.
275  *
276  * Return: void
277  */
278 static void qdf_replenish_expire_handler(void *arg)
279 {
280 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
281 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
282 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
283 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
284 
285 		/* Error handling here */
286 	}
287 }
288 
289 /**
290  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
291  *
292  * This function initializes the nbuf alloc fail replenish timer.
293  *
294  * Return: void
295  */
296 void __qdf_nbuf_init_replenish_timer(void)
297 {
298 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
299 			  qdf_replenish_expire_handler, NULL);
300 }
301 
302 /**
303  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
304  *
305  * This function deinitializes the nbuf alloc fail replenish timer.
306  *
307  * Return: void
308  */
309 void __qdf_nbuf_deinit_replenish_timer(void)
310 {
311 	__qdf_nbuf_stop_replenish_timer();
312 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
313 }
314 #else
315 
316 static inline void __qdf_nbuf_start_replenish_timer(void) {}
317 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
318 #endif
319 
320 /* globals do not need to be initialized to NULL/0 */
321 qdf_nbuf_trace_update_t qdf_trace_update_cb;
322 qdf_nbuf_free_t nbuf_free_cb;
323 
324 #ifdef QDF_NBUF_GLOBAL_COUNT
325 
326 /**
327  * __qdf_nbuf_count_get() - get nbuf global count
328  *
329  * Return: nbuf global count
330  */
331 int __qdf_nbuf_count_get(void)
332 {
333 	return qdf_atomic_read(&nbuf_count);
334 }
335 qdf_export_symbol(__qdf_nbuf_count_get);
336 
337 /**
338  * __qdf_nbuf_count_inc() - increment nbuf global count
339  *
340  * @buf: sk buff
341  *
342  * Return: void
343  */
344 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
345 {
346 	qdf_atomic_inc(&nbuf_count);
347 }
348 qdf_export_symbol(__qdf_nbuf_count_inc);
349 
350 /**
351  * __qdf_nbuf_count_dec() - decrement nbuf global count
352  *
353  * @buf: sk buff
354  *
355  * Return: void
356  */
357 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
358 {
359 	qdf_atomic_dec(&nbuf_count);
360 }
361 qdf_export_symbol(__qdf_nbuf_count_dec);
362 #endif
363 
364 
365 /**
366  * __qdf_nbuf_alloc() - Allocate nbuf
367  * @hdl: Device handle
368  * @size: Netbuf requested size
369  * @reserve: headroom to start with
370  * @align: Align
371  * @prio: Priority
372  *
373  * This allocates an nbuf aligns if needed and reserves some space in the front,
374  * since the reserve is done after alignment the reserve value if being
375  * unaligned will result in an unaligned address.
376  *
377  * Return: nbuf or %NULL if no memory
378  */
379 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
380 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
381 			 int align, int prio)
382 {
383 	struct sk_buff *skb;
384 	unsigned long offset;
385 	uint32_t lowmem_alloc_tries = 0;
386 
387 	if (align)
388 		size += (align - 1);
389 
390 realloc:
391 	skb = dev_alloc_skb(size);
392 
393 	if (skb)
394 		goto skb_alloc;
395 
396 	skb = pld_nbuf_pre_alloc(size);
397 
398 	if (!skb) {
399 		pr_info("ERROR:NBUF alloc failed\n");
400 		return NULL;
401 	}
402 
403 skb_alloc:
404 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
405 	 * Though we are trying to reserve low memory upfront to prevent this,
406 	 * we sometimes see SKBs allocated from low memory.
407 	 */
408 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
409 		lowmem_alloc_tries++;
410 		if (lowmem_alloc_tries > 100) {
411 			qdf_print("%s Failed \n",__func__);
412 			return NULL;
413 		} else {
414 			/* Not freeing to make sure it
415 			 * will not get allocated again
416 			 */
417 			goto realloc;
418 		}
419 	}
420 	memset(skb->cb, 0x0, sizeof(skb->cb));
421 
422 	/*
423 	 * The default is for netbuf fragments to be interpreted
424 	 * as wordstreams rather than bytestreams.
425 	 */
426 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
427 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
428 
429 	/*
430 	 * XXX:how about we reserve first then align
431 	 * Align & make sure that the tail & data are adjusted properly
432 	 */
433 
434 	if (align) {
435 		offset = ((unsigned long)skb->data) % align;
436 		if (offset)
437 			skb_reserve(skb, align - offset);
438 	}
439 
440 	/*
441 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
442 	 * pointer
443 	 */
444 	skb_reserve(skb, reserve);
445 	qdf_nbuf_count_inc(skb);
446 
447 	return skb;
448 }
449 #else
450 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
451 			 int align, int prio)
452 {
453 	struct sk_buff *skb;
454 	unsigned long offset;
455 	int flags = GFP_KERNEL;
456 
457 	if (align)
458 		size += (align - 1);
459 
460 	if (in_interrupt() || irqs_disabled() || in_atomic())
461 		flags = GFP_ATOMIC;
462 
463 	skb = __netdev_alloc_skb(NULL, size, flags);
464 
465 	if (skb)
466 		goto skb_alloc;
467 
468 	skb = pld_nbuf_pre_alloc(size);
469 
470 	if (!skb) {
471 		pr_err_ratelimited("ERROR:NBUF alloc failed, size = %zu\n",
472 				   size);
473 		__qdf_nbuf_start_replenish_timer();
474 		return NULL;
475 	} else {
476 		__qdf_nbuf_stop_replenish_timer();
477 	}
478 
479 skb_alloc:
480 	memset(skb->cb, 0x0, sizeof(skb->cb));
481 
482 	/*
483 	 * The default is for netbuf fragments to be interpreted
484 	 * as wordstreams rather than bytestreams.
485 	 */
486 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
487 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
488 
489 	/*
490 	 * XXX:how about we reserve first then align
491 	 * Align & make sure that the tail & data are adjusted properly
492 	 */
493 
494 	if (align) {
495 		offset = ((unsigned long)skb->data) % align;
496 		if (offset)
497 			skb_reserve(skb, align - offset);
498 	}
499 
500 	/*
501 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
502 	 * pointer
503 	 */
504 	skb_reserve(skb, reserve);
505 	qdf_nbuf_count_inc(skb);
506 
507 	return skb;
508 }
509 #endif
510 qdf_export_symbol(__qdf_nbuf_alloc);
511 
512 /**
513  * __qdf_nbuf_free() - free the nbuf its interrupt safe
514  * @skb: Pointer to network buffer
515  *
516  * Return: none
517  */
518 
519 #ifdef CONFIG_MCL
520 void __qdf_nbuf_free(struct sk_buff *skb)
521 {
522 	if (pld_nbuf_pre_alloc_free(skb))
523 		return;
524 
525 	qdf_nbuf_count_dec(skb);
526 	if (nbuf_free_cb)
527 		nbuf_free_cb(skb);
528 	else
529 		dev_kfree_skb_any(skb);
530 }
531 #else
532 void __qdf_nbuf_free(struct sk_buff *skb)
533 {
534 	if (pld_nbuf_pre_alloc_free(skb))
535 		return;
536 
537 	qdf_nbuf_count_dec(skb);
538 	dev_kfree_skb_any(skb);
539 }
540 #endif
541 
542 qdf_export_symbol(__qdf_nbuf_free);
543 
544 #ifdef NBUF_MEMORY_DEBUG
545 enum qdf_nbuf_event_type {
546 	QDF_NBUF_ALLOC,
547 	QDF_NBUF_FREE,
548 	QDF_NBUF_MAP,
549 	QDF_NBUF_UNMAP,
550 };
551 
552 struct qdf_nbuf_event {
553 	qdf_nbuf_t nbuf;
554 	const char *file;
555 	uint32_t line;
556 	enum qdf_nbuf_event_type type;
557 	uint64_t timestamp;
558 };
559 
560 #define QDF_NBUF_HISTORY_SIZE 4096
561 static qdf_atomic_t qdf_nbuf_history_index;
562 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
563 
564 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
565 {
566 	int32_t next = qdf_atomic_inc_return(index);
567 
568 	if (next == size)
569 		qdf_atomic_sub(size, index);
570 
571 	return next % size;
572 }
573 
574 static void
575 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
576 		     enum qdf_nbuf_event_type type)
577 {
578 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
579 						   QDF_NBUF_HISTORY_SIZE);
580 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
581 
582 	event->nbuf = nbuf;
583 	event->file = file;
584 	event->line = line;
585 	event->type = type;
586 	event->timestamp = qdf_get_log_timestamp();
587 }
588 
589 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
590 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
591 static qdf_spinlock_t qdf_nbuf_map_lock;
592 
593 struct qdf_nbuf_map_metadata {
594 	struct hlist_node node;
595 	qdf_nbuf_t nbuf;
596 	const char *file;
597 	uint32_t line;
598 };
599 
600 static void qdf_nbuf_map_tracking_init(void)
601 {
602 	hash_init(qdf_nbuf_map_ht);
603 	qdf_spinlock_create(&qdf_nbuf_map_lock);
604 }
605 
606 void qdf_nbuf_map_check_for_leaks(void)
607 {
608 	struct qdf_nbuf_map_metadata *meta;
609 	int bucket;
610 	uint32_t count = 0;
611 	bool is_empty;
612 
613 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
614 	is_empty = hash_empty(qdf_nbuf_map_ht);
615 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
616 
617 	if (is_empty)
618 		return;
619 
620 	qdf_err("Nbuf map without unmap events detected!");
621 	qdf_err("------------------------------------------------------------");
622 
623 	/* Hold the lock for the entire iteration for safe list/meta access. We
624 	 * are explicitly preferring the chance to watchdog on the print, over
625 	 * the posibility of invalid list/memory access. Since we are going to
626 	 * panic anyway, the worst case is loading up the crash dump to find out
627 	 * what was in the hash table.
628 	 */
629 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
630 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
631 		count++;
632 		qdf_err("0x%pk @ %s:%u",
633 			meta->nbuf, kbasename(meta->file), meta->line);
634 	}
635 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
636 
637 	panic("%u fatal nbuf map without unmap events detected!", count);
638 }
639 
640 static void qdf_nbuf_map_tracking_deinit(void)
641 {
642 	qdf_nbuf_map_check_for_leaks();
643 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
644 }
645 
646 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
647 {
648 	struct qdf_nbuf_map_metadata *meta;
649 
650 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
651 		if (meta->nbuf == nbuf)
652 			return meta;
653 	}
654 
655 	return NULL;
656 }
657 
658 static QDF_STATUS
659 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
660 {
661 	struct qdf_nbuf_map_metadata *meta;
662 
663 	QDF_BUG(nbuf);
664 	if (!nbuf) {
665 		qdf_err("Cannot map null nbuf");
666 		return QDF_STATUS_E_INVAL;
667 	}
668 
669 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
670 	meta = qdf_nbuf_meta_get(nbuf);
671 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
672 	if (meta)
673 		panic("Double nbuf map detected @ %s:%u",
674 		      kbasename(file), line);
675 
676 	meta = qdf_mem_malloc(sizeof(*meta));
677 	if (!meta) {
678 		qdf_err("Failed to allocate nbuf map tracking metadata");
679 		return QDF_STATUS_E_NOMEM;
680 	}
681 
682 	meta->nbuf = nbuf;
683 	meta->file = file;
684 	meta->line = line;
685 
686 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
687 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
688 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
689 
690 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
691 
692 	return QDF_STATUS_SUCCESS;
693 }
694 
695 static void
696 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
697 {
698 	struct qdf_nbuf_map_metadata *meta;
699 
700 	QDF_BUG(nbuf);
701 	if (!nbuf) {
702 		qdf_err("Cannot unmap null nbuf");
703 		return;
704 	}
705 
706 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
707 	meta = qdf_nbuf_meta_get(nbuf);
708 
709 	if (!meta)
710 		panic("Double nbuf unmap or unmap without map detected @%s:%u",
711 		      kbasename(file), line);
712 
713 	hash_del(&meta->node);
714 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
715 
716 	qdf_mem_free(meta);
717 
718 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
719 }
720 
721 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
722 			      qdf_nbuf_t buf,
723 			      qdf_dma_dir_t dir,
724 			      const char *file,
725 			      uint32_t line)
726 {
727 	QDF_STATUS status;
728 
729 	status = qdf_nbuf_track_map(buf, file, line);
730 	if (QDF_IS_STATUS_ERROR(status))
731 		return status;
732 
733 	status = __qdf_nbuf_map(osdev, buf, dir);
734 	if (QDF_IS_STATUS_ERROR(status))
735 		qdf_nbuf_untrack_map(buf, file, line);
736 
737 	return status;
738 }
739 
740 qdf_export_symbol(qdf_nbuf_map_debug);
741 
742 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
743 			  qdf_nbuf_t buf,
744 			  qdf_dma_dir_t dir,
745 			  const char *file,
746 			  uint32_t line)
747 {
748 	qdf_nbuf_untrack_map(buf, file, line);
749 	__qdf_nbuf_unmap_single(osdev, buf, dir);
750 }
751 
752 qdf_export_symbol(qdf_nbuf_unmap_debug);
753 
754 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
755 				     qdf_nbuf_t buf,
756 				     qdf_dma_dir_t dir,
757 				     const char *file,
758 				     uint32_t line)
759 {
760 	QDF_STATUS status;
761 
762 	status = qdf_nbuf_track_map(buf, file, line);
763 	if (QDF_IS_STATUS_ERROR(status))
764 		return status;
765 
766 	status = __qdf_nbuf_map_single(osdev, buf, dir);
767 	if (QDF_IS_STATUS_ERROR(status))
768 		qdf_nbuf_untrack_map(buf, file, line);
769 
770 	return status;
771 }
772 
773 qdf_export_symbol(qdf_nbuf_map_single_debug);
774 
775 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
776 				 qdf_nbuf_t buf,
777 				 qdf_dma_dir_t dir,
778 				 const char *file,
779 				 uint32_t line)
780 {
781 	qdf_nbuf_untrack_map(buf, file, line);
782 	__qdf_nbuf_unmap_single(osdev, buf, dir);
783 }
784 
785 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
786 
787 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
788 				     qdf_nbuf_t buf,
789 				     qdf_dma_dir_t dir,
790 				     int nbytes,
791 				     const char *file,
792 				     uint32_t line)
793 {
794 	QDF_STATUS status;
795 
796 	status = qdf_nbuf_track_map(buf, file, line);
797 	if (QDF_IS_STATUS_ERROR(status))
798 		return status;
799 
800 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
801 	if (QDF_IS_STATUS_ERROR(status))
802 		qdf_nbuf_untrack_map(buf, file, line);
803 
804 	return status;
805 }
806 
807 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
808 
809 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
810 				 qdf_nbuf_t buf,
811 				 qdf_dma_dir_t dir,
812 				 int nbytes,
813 				 const char *file,
814 				 uint32_t line)
815 {
816 	qdf_nbuf_untrack_map(buf, file, line);
817 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
818 }
819 
820 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
821 
822 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
823 					    qdf_nbuf_t buf,
824 					    qdf_dma_dir_t dir,
825 					    int nbytes,
826 					    const char *file,
827 					    uint32_t line)
828 {
829 	QDF_STATUS status;
830 
831 	status = qdf_nbuf_track_map(buf, file, line);
832 	if (QDF_IS_STATUS_ERROR(status))
833 		return status;
834 
835 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
836 	if (QDF_IS_STATUS_ERROR(status))
837 		qdf_nbuf_untrack_map(buf, file, line);
838 
839 	return status;
840 }
841 
842 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
843 
844 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
845 					qdf_nbuf_t buf,
846 					qdf_dma_dir_t dir,
847 					int nbytes,
848 					const char *file,
849 					uint32_t line)
850 {
851 	qdf_nbuf_untrack_map(buf, file, line);
852 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
853 }
854 
855 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
856 #endif /* NBUF_MEMORY_DEBUG */
857 
858 /**
859  * __qdf_nbuf_map() - map a buffer to local bus address space
860  * @osdev: OS device
861  * @bmap: Bitmap
862  * @skb: Pointer to network buffer
863  * @dir: Direction
864  *
865  * Return: QDF_STATUS
866  */
867 #ifdef QDF_OS_DEBUG
868 QDF_STATUS
869 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
870 {
871 	struct skb_shared_info *sh = skb_shinfo(skb);
872 
873 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
874 			|| (dir == QDF_DMA_FROM_DEVICE));
875 
876 	/*
877 	 * Assume there's only a single fragment.
878 	 * To support multiple fragments, it would be necessary to change
879 	 * qdf_nbuf_t to be a separate object that stores meta-info
880 	 * (including the bus address for each fragment) and a pointer
881 	 * to the underlying sk_buff.
882 	 */
883 	qdf_assert(sh->nr_frags == 0);
884 
885 	return __qdf_nbuf_map_single(osdev, skb, dir);
886 }
887 qdf_export_symbol(__qdf_nbuf_map);
888 
889 #else
890 QDF_STATUS
891 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
892 {
893 	return __qdf_nbuf_map_single(osdev, skb, dir);
894 }
895 qdf_export_symbol(__qdf_nbuf_map);
896 #endif
897 /**
898  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
899  * @osdev: OS device
900  * @skb: Pointer to network buffer
901  * @dir: dma direction
902  *
903  * Return: none
904  */
905 void
906 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
907 			qdf_dma_dir_t dir)
908 {
909 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
910 		   || (dir == QDF_DMA_FROM_DEVICE));
911 
912 	/*
913 	 * Assume there's a single fragment.
914 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
915 	 */
916 	__qdf_nbuf_unmap_single(osdev, skb, dir);
917 }
918 qdf_export_symbol(__qdf_nbuf_unmap);
919 
920 /**
921  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
922  * @osdev: OS device
923  * @skb: Pointer to network buffer
924  * @dir: Direction
925  *
926  * Return: QDF_STATUS
927  */
928 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
929 QDF_STATUS
930 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
931 {
932 	qdf_dma_addr_t paddr;
933 
934 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
935 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
936 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
937 	return QDF_STATUS_SUCCESS;
938 }
939 qdf_export_symbol(__qdf_nbuf_map_single);
940 #else
941 QDF_STATUS
942 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
943 {
944 	qdf_dma_addr_t paddr;
945 
946 	/* assume that the OS only provides a single fragment */
947 	QDF_NBUF_CB_PADDR(buf) = paddr =
948 		dma_map_single(osdev->dev, buf->data,
949 				skb_end_pointer(buf) - buf->data,
950 				__qdf_dma_dir_to_os(dir));
951 	return dma_mapping_error(osdev->dev, paddr)
952 		? QDF_STATUS_E_FAILURE
953 		: QDF_STATUS_SUCCESS;
954 }
955 qdf_export_symbol(__qdf_nbuf_map_single);
956 #endif
957 /**
958  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
959  * @osdev: OS device
960  * @skb: Pointer to network buffer
961  * @dir: Direction
962  *
963  * Return: none
964  */
965 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
966 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
967 				qdf_dma_dir_t dir)
968 {
969 }
970 #else
971 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
972 					qdf_dma_dir_t dir)
973 {
974 	if (QDF_NBUF_CB_PADDR(buf))
975 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
976 			skb_end_pointer(buf) - buf->data,
977 			__qdf_dma_dir_to_os(dir));
978 }
979 #endif
980 qdf_export_symbol(__qdf_nbuf_unmap_single);
981 
982 /**
983  * __qdf_nbuf_set_rx_cksum() - set rx checksum
984  * @skb: Pointer to network buffer
985  * @cksum: Pointer to checksum value
986  *
987  * Return: QDF_STATUS
988  */
989 QDF_STATUS
990 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
991 {
992 	switch (cksum->l4_result) {
993 	case QDF_NBUF_RX_CKSUM_NONE:
994 		skb->ip_summed = CHECKSUM_NONE;
995 		break;
996 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
997 		skb->ip_summed = CHECKSUM_UNNECESSARY;
998 		break;
999 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1000 		skb->ip_summed = CHECKSUM_PARTIAL;
1001 		skb->csum = cksum->val;
1002 		break;
1003 	default:
1004 		pr_err("Unknown checksum type\n");
1005 		qdf_assert(0);
1006 		return QDF_STATUS_E_NOSUPPORT;
1007 	}
1008 	return QDF_STATUS_SUCCESS;
1009 }
1010 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1011 
1012 /**
1013  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1014  * @skb: Pointer to network buffer
1015  *
1016  * Return: TX checksum value
1017  */
1018 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1019 {
1020 	switch (skb->ip_summed) {
1021 	case CHECKSUM_NONE:
1022 		return QDF_NBUF_TX_CKSUM_NONE;
1023 	case CHECKSUM_PARTIAL:
1024 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1025 	case CHECKSUM_COMPLETE:
1026 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1027 	default:
1028 		return QDF_NBUF_TX_CKSUM_NONE;
1029 	}
1030 }
1031 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1032 
1033 /**
1034  * __qdf_nbuf_get_tid() - get tid
1035  * @skb: Pointer to network buffer
1036  *
1037  * Return: tid
1038  */
1039 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1040 {
1041 	return skb->priority;
1042 }
1043 qdf_export_symbol(__qdf_nbuf_get_tid);
1044 
1045 /**
1046  * __qdf_nbuf_set_tid() - set tid
1047  * @skb: Pointer to network buffer
1048  *
1049  * Return: none
1050  */
1051 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1052 {
1053 	skb->priority = tid;
1054 }
1055 qdf_export_symbol(__qdf_nbuf_set_tid);
1056 
1057 /**
1058  * __qdf_nbuf_set_tid() - set tid
1059  * @skb: Pointer to network buffer
1060  *
1061  * Return: none
1062  */
1063 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1064 {
1065 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1066 }
1067 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1068 
1069 /**
1070  * __qdf_nbuf_reg_trace_cb() - register trace callback
1071  * @cb_func_ptr: Pointer to trace callback function
1072  *
1073  * Return: none
1074  */
1075 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1076 {
1077 	qdf_trace_update_cb = cb_func_ptr;
1078 }
1079 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1080 
1081 /**
1082  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1083  *              of DHCP packet.
1084  * @data: Pointer to DHCP packet data buffer
1085  *
1086  * This func. returns the subtype of DHCP packet.
1087  *
1088  * Return: subtype of the DHCP packet.
1089  */
1090 enum qdf_proto_subtype
1091 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1092 {
1093 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1094 
1095 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1096 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1097 					QDF_DHCP_OPTION53_LENGTH)) {
1098 
1099 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1100 		case QDF_DHCP_DISCOVER:
1101 			subtype = QDF_PROTO_DHCP_DISCOVER;
1102 			break;
1103 		case QDF_DHCP_REQUEST:
1104 			subtype = QDF_PROTO_DHCP_REQUEST;
1105 			break;
1106 		case QDF_DHCP_OFFER:
1107 			subtype = QDF_PROTO_DHCP_OFFER;
1108 			break;
1109 		case QDF_DHCP_ACK:
1110 			subtype = QDF_PROTO_DHCP_ACK;
1111 			break;
1112 		case QDF_DHCP_NAK:
1113 			subtype = QDF_PROTO_DHCP_NACK;
1114 			break;
1115 		case QDF_DHCP_RELEASE:
1116 			subtype = QDF_PROTO_DHCP_RELEASE;
1117 			break;
1118 		case QDF_DHCP_INFORM:
1119 			subtype = QDF_PROTO_DHCP_INFORM;
1120 			break;
1121 		case QDF_DHCP_DECLINE:
1122 			subtype = QDF_PROTO_DHCP_DECLINE;
1123 			break;
1124 		default:
1125 			break;
1126 		}
1127 	}
1128 
1129 	return subtype;
1130 }
1131 
1132 /**
1133  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1134  *            of EAPOL packet.
1135  * @data: Pointer to EAPOL packet data buffer
1136  *
1137  * This func. returns the subtype of EAPOL packet.
1138  *
1139  * Return: subtype of the EAPOL packet.
1140  */
1141 enum qdf_proto_subtype
1142 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1143 {
1144 	uint16_t eapol_key_info;
1145 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1146 	uint16_t mask;
1147 
1148 	eapol_key_info = (uint16_t)(*(uint16_t *)
1149 			(data + EAPOL_KEY_INFO_OFFSET));
1150 
1151 	mask = eapol_key_info & EAPOL_MASK;
1152 	switch (mask) {
1153 	case EAPOL_M1_BIT_MASK:
1154 		subtype = QDF_PROTO_EAPOL_M1;
1155 		break;
1156 	case EAPOL_M2_BIT_MASK:
1157 		subtype = QDF_PROTO_EAPOL_M2;
1158 		break;
1159 	case EAPOL_M3_BIT_MASK:
1160 		subtype = QDF_PROTO_EAPOL_M3;
1161 		break;
1162 	case EAPOL_M4_BIT_MASK:
1163 		subtype = QDF_PROTO_EAPOL_M4;
1164 		break;
1165 	default:
1166 		break;
1167 	}
1168 
1169 	return subtype;
1170 }
1171 
1172 /**
1173  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1174  *            of ARP packet.
1175  * @data: Pointer to ARP packet data buffer
1176  *
1177  * This func. returns the subtype of ARP packet.
1178  *
1179  * Return: subtype of the ARP packet.
1180  */
1181 enum qdf_proto_subtype
1182 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1183 {
1184 	uint16_t subtype;
1185 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1186 
1187 	subtype = (uint16_t)(*(uint16_t *)
1188 			(data + ARP_SUB_TYPE_OFFSET));
1189 
1190 	switch (QDF_SWAP_U16(subtype)) {
1191 	case ARP_REQUEST:
1192 		proto_subtype = QDF_PROTO_ARP_REQ;
1193 		break;
1194 	case ARP_RESPONSE:
1195 		proto_subtype = QDF_PROTO_ARP_RES;
1196 		break;
1197 	default:
1198 		break;
1199 	}
1200 
1201 	return proto_subtype;
1202 }
1203 
1204 /**
1205  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1206  *            of IPV4 ICMP packet.
1207  * @data: Pointer to IPV4 ICMP packet data buffer
1208  *
1209  * This func. returns the subtype of ICMP packet.
1210  *
1211  * Return: subtype of the ICMP packet.
1212  */
1213 enum qdf_proto_subtype
1214 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1215 {
1216 	uint8_t subtype;
1217 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1218 
1219 	subtype = (uint8_t)(*(uint8_t *)
1220 			(data + ICMP_SUBTYPE_OFFSET));
1221 
1222 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
1223 		"ICMP proto type: 0x%02x", subtype);
1224 
1225 	switch (subtype) {
1226 	case ICMP_REQUEST:
1227 		proto_subtype = QDF_PROTO_ICMP_REQ;
1228 		break;
1229 	case ICMP_RESPONSE:
1230 		proto_subtype = QDF_PROTO_ICMP_RES;
1231 		break;
1232 	default:
1233 		break;
1234 	}
1235 
1236 	return proto_subtype;
1237 }
1238 
1239 /**
1240  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1241  *            of IPV6 ICMPV6 packet.
1242  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1243  *
1244  * This func. returns the subtype of ICMPV6 packet.
1245  *
1246  * Return: subtype of the ICMPV6 packet.
1247  */
1248 enum qdf_proto_subtype
1249 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1250 {
1251 	uint8_t subtype;
1252 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1253 
1254 	subtype = (uint8_t)(*(uint8_t *)
1255 			(data + ICMPV6_SUBTYPE_OFFSET));
1256 
1257 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
1258 		"ICMPv6 proto type: 0x%02x", subtype);
1259 
1260 	switch (subtype) {
1261 	case ICMPV6_REQUEST:
1262 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1263 		break;
1264 	case ICMPV6_RESPONSE:
1265 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1266 		break;
1267 	case ICMPV6_RS:
1268 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1269 		break;
1270 	case ICMPV6_RA:
1271 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1272 		break;
1273 	case ICMPV6_NS:
1274 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1275 		break;
1276 	case ICMPV6_NA:
1277 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1278 		break;
1279 	default:
1280 		break;
1281 	}
1282 
1283 	return proto_subtype;
1284 }
1285 
1286 /**
1287  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1288  *            of IPV4 packet.
1289  * @data: Pointer to IPV4 packet data buffer
1290  *
1291  * This func. returns the proto type of IPV4 packet.
1292  *
1293  * Return: proto type of IPV4 packet.
1294  */
1295 uint8_t
1296 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1297 {
1298 	uint8_t proto_type;
1299 
1300 	proto_type = (uint8_t)(*(uint8_t *)(data +
1301 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1302 	return proto_type;
1303 }
1304 
1305 /**
1306  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1307  *            of IPV6 packet.
1308  * @data: Pointer to IPV6 packet data buffer
1309  *
1310  * This func. returns the proto type of IPV6 packet.
1311  *
1312  * Return: proto type of IPV6 packet.
1313  */
1314 uint8_t
1315 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1316 {
1317 	uint8_t proto_type;
1318 
1319 	proto_type = (uint8_t)(*(uint8_t *)(data +
1320 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1321 	return proto_type;
1322 }
1323 
1324 /**
1325  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1326  * @data: Pointer to network data
1327  *
1328  * This api is for Tx packets.
1329  *
1330  * Return: true if packet is ipv4 packet
1331  *	   false otherwise
1332  */
1333 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1334 {
1335 	uint16_t ether_type;
1336 
1337 	ether_type = (uint16_t)(*(uint16_t *)(data +
1338 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1339 
1340 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1341 		return true;
1342 	else
1343 		return false;
1344 }
1345 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1346 
1347 /**
1348  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1349  * @data: Pointer to network data buffer
1350  *
1351  * This api is for ipv4 packet.
1352  *
1353  * Return: true if packet is DHCP packet
1354  *	   false otherwise
1355  */
1356 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1357 {
1358 	uint16_t sport;
1359 	uint16_t dport;
1360 
1361 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1362 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1363 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1364 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1365 					 sizeof(uint16_t)));
1366 
1367 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1368 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1369 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1370 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1371 		return true;
1372 	else
1373 		return false;
1374 }
1375 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1376 
1377 /**
1378  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1379  * @data: Pointer to network data buffer
1380  *
1381  * This api is for ipv4 packet.
1382  *
1383  * Return: true if packet is EAPOL packet
1384  *	   false otherwise.
1385  */
1386 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1387 {
1388 	uint16_t ether_type;
1389 
1390 	ether_type = (uint16_t)(*(uint16_t *)(data +
1391 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1392 
1393 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1394 		return true;
1395 	else
1396 		return false;
1397 }
1398 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1399 
1400 /**
1401  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1402  * @skb: Pointer to network buffer
1403  *
1404  * This api is for ipv4 packet.
1405  *
1406  * Return: true if packet is WAPI packet
1407  *	   false otherwise.
1408  */
1409 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1410 {
1411 	uint16_t ether_type;
1412 
1413 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1414 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1415 
1416 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1417 		return true;
1418 	else
1419 		return false;
1420 }
1421 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1422 
1423 /**
1424  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1425  * @skb: Pointer to network buffer
1426  *
1427  * This api is for ipv4 packet.
1428  *
1429  * Return: true if packet is tdls packet
1430  *	   false otherwise.
1431  */
1432 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1433 {
1434 	uint16_t ether_type;
1435 
1436 	ether_type = *(uint16_t *)(skb->data +
1437 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1438 
1439 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1440 		return true;
1441 	else
1442 		return false;
1443 }
1444 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1445 
1446 /**
1447  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1448  * @data: Pointer to network data buffer
1449  *
1450  * This api is for ipv4 packet.
1451  *
1452  * Return: true if packet is ARP packet
1453  *	   false otherwise.
1454  */
1455 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1456 {
1457 	uint16_t ether_type;
1458 
1459 	ether_type = (uint16_t)(*(uint16_t *)(data +
1460 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1461 
1462 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1463 		return true;
1464 	else
1465 		return false;
1466 }
1467 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1468 
1469 /**
1470  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1471  * @data: Pointer to network data buffer
1472  *
1473  * This api is for ipv4 packet.
1474  *
1475  * Return: true if packet is ARP request
1476  *	   false otherwise.
1477  */
1478 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1479 {
1480 	uint16_t op_code;
1481 
1482 	op_code = (uint16_t)(*(uint16_t *)(data +
1483 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1484 
1485 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1486 		return true;
1487 	return false;
1488 }
1489 
1490 /**
1491  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1492  * @data: Pointer to network data buffer
1493  *
1494  * This api is for ipv4 packet.
1495  *
1496  * Return: true if packet is ARP response
1497  *	   false otherwise.
1498  */
1499 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1500 {
1501 	uint16_t op_code;
1502 
1503 	op_code = (uint16_t)(*(uint16_t *)(data +
1504 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1505 
1506 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1507 		return true;
1508 	return false;
1509 }
1510 
1511 /**
1512  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1513  * @data: Pointer to network data buffer
1514  *
1515  * This api is for ipv4 packet.
1516  *
1517  * Return: ARP packet source IP value.
1518  */
1519 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1520 {
1521 	uint32_t src_ip;
1522 
1523 	src_ip = (uint32_t)(*(uint32_t *)(data +
1524 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1525 
1526 	return src_ip;
1527 }
1528 
1529 /**
1530  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1531  * @data: Pointer to network data buffer
1532  *
1533  * This api is for ipv4 packet.
1534  *
1535  * Return: ARP packet target IP value.
1536  */
1537 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1538 {
1539 	uint32_t tgt_ip;
1540 
1541 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1542 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1543 
1544 	return tgt_ip;
1545 }
1546 
1547 /**
1548  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1549  * @data: Pointer to network data buffer
1550  * @len: length to copy
1551  *
1552  * This api is for dns domain name
1553  *
1554  * Return: dns domain name.
1555  */
1556 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1557 {
1558 	uint8_t *domain_name;
1559 
1560 	domain_name = (uint8_t *)
1561 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1562 	return domain_name;
1563 }
1564 
1565 
1566 /**
1567  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1568  * @data: Pointer to network data buffer
1569  *
1570  * This api is for dns query packet.
1571  *
1572  * Return: true if packet is dns query packet.
1573  *	   false otherwise.
1574  */
1575 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1576 {
1577 	uint16_t op_code;
1578 	uint16_t tgt_port;
1579 
1580 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1581 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1582 	/* Standard DNS query always happen on Dest Port 53. */
1583 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1584 		op_code = (uint16_t)(*(uint16_t *)(data +
1585 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1586 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1587 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1588 			return true;
1589 	}
1590 	return false;
1591 }
1592 
1593 /**
1594  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1595  * @data: Pointer to network data buffer
1596  *
1597  * This api is for dns query response.
1598  *
1599  * Return: true if packet is dns response packet.
1600  *	   false otherwise.
1601  */
1602 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1603 {
1604 	uint16_t op_code;
1605 	uint16_t src_port;
1606 
1607 	src_port = (uint16_t)(*(uint16_t *)(data +
1608 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1609 	/* Standard DNS response always comes on Src Port 53. */
1610 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1611 		op_code = (uint16_t)(*(uint16_t *)(data +
1612 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1613 
1614 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1615 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1616 			return true;
1617 	}
1618 	return false;
1619 }
1620 
1621 /**
1622  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1623  * @data: Pointer to network data buffer
1624  *
1625  * This api is for tcp syn packet.
1626  *
1627  * Return: true if packet is tcp syn packet.
1628  *	   false otherwise.
1629  */
1630 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1631 {
1632 	uint8_t op_code;
1633 
1634 	op_code = (uint8_t)(*(uint8_t *)(data +
1635 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1636 
1637 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1638 		return true;
1639 	return false;
1640 }
1641 
1642 /**
1643  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1644  * @data: Pointer to network data buffer
1645  *
1646  * This api is for tcp syn ack packet.
1647  *
1648  * Return: true if packet is tcp syn ack packet.
1649  *	   false otherwise.
1650  */
1651 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1652 {
1653 	uint8_t op_code;
1654 
1655 	op_code = (uint8_t)(*(uint8_t *)(data +
1656 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1657 
1658 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1659 		return true;
1660 	return false;
1661 }
1662 
1663 /**
1664  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1665  * @data: Pointer to network data buffer
1666  *
1667  * This api is for tcp ack packet.
1668  *
1669  * Return: true if packet is tcp ack packet.
1670  *	   false otherwise.
1671  */
1672 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1673 {
1674 	uint8_t op_code;
1675 
1676 	op_code = (uint8_t)(*(uint8_t *)(data +
1677 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1678 
1679 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1680 		return true;
1681 	return false;
1682 }
1683 
1684 /**
1685  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1686  * @data: Pointer to network data buffer
1687  *
1688  * This api is for tcp packet.
1689  *
1690  * Return: tcp source port value.
1691  */
1692 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1693 {
1694 	uint16_t src_port;
1695 
1696 	src_port = (uint16_t)(*(uint16_t *)(data +
1697 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1698 
1699 	return src_port;
1700 }
1701 
1702 /**
1703  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1704  * @data: Pointer to network data buffer
1705  *
1706  * This api is for tcp packet.
1707  *
1708  * Return: tcp destination port value.
1709  */
1710 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1711 {
1712 	uint16_t tgt_port;
1713 
1714 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1715 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1716 
1717 	return tgt_port;
1718 }
1719 
1720 /**
1721  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1722  * @data: Pointer to network data buffer
1723  *
1724  * This api is for ipv4 req packet.
1725  *
1726  * Return: true if packet is icmpv4 request
1727  *	   false otherwise.
1728  */
1729 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1730 {
1731 	uint8_t op_code;
1732 
1733 	op_code = (uint8_t)(*(uint8_t *)(data +
1734 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1735 
1736 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1737 		return true;
1738 	return false;
1739 }
1740 
1741 /**
1742  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1743  * @data: Pointer to network data buffer
1744  *
1745  * This api is for ipv4 res packet.
1746  *
1747  * Return: true if packet is icmpv4 response
1748  *	   false otherwise.
1749  */
1750 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1751 {
1752 	uint8_t op_code;
1753 
1754 	op_code = (uint8_t)(*(uint8_t *)(data +
1755 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1756 
1757 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1758 		return true;
1759 	return false;
1760 }
1761 
1762 /**
1763  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1764  * @data: Pointer to network data buffer
1765  *
1766  * This api is for ipv4 packet.
1767  *
1768  * Return: icmpv4 packet source IP value.
1769  */
1770 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1771 {
1772 	uint32_t src_ip;
1773 
1774 	src_ip = (uint32_t)(*(uint32_t *)(data +
1775 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1776 
1777 	return src_ip;
1778 }
1779 
1780 /**
1781  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1782  * @data: Pointer to network data buffer
1783  *
1784  * This api is for ipv4 packet.
1785  *
1786  * Return: icmpv4 packet target IP value.
1787  */
1788 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1789 {
1790 	uint32_t tgt_ip;
1791 
1792 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1793 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1794 
1795 	return tgt_ip;
1796 }
1797 
1798 
1799 /**
1800  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1801  * @data: Pointer to IPV6 packet data buffer
1802  *
1803  * This func. checks whether it is a IPV6 packet or not.
1804  *
1805  * Return: TRUE if it is a IPV6 packet
1806  *         FALSE if not
1807  */
1808 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1809 {
1810 	uint16_t ether_type;
1811 
1812 	ether_type = (uint16_t)(*(uint16_t *)(data +
1813 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1814 
1815 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1816 		return true;
1817 	else
1818 		return false;
1819 }
1820 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1821 
1822 /**
1823  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1824  * @data: Pointer to network data buffer
1825  *
1826  * This api is for ipv6 packet.
1827  *
1828  * Return: true if packet is DHCP packet
1829  *	   false otherwise
1830  */
1831 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1832 {
1833 	uint16_t sport;
1834 	uint16_t dport;
1835 
1836 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1837 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1838 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1839 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1840 					sizeof(uint16_t));
1841 
1842 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1843 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1844 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1845 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1846 		return true;
1847 	else
1848 		return false;
1849 }
1850 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1851 
1852 /**
1853  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1854  * @data: Pointer to IPV4 packet data buffer
1855  *
1856  * This func. checks whether it is a IPV4 multicast packet or not.
1857  *
1858  * Return: TRUE if it is a IPV4 multicast packet
1859  *         FALSE if not
1860  */
1861 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1862 {
1863 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1864 		uint32_t *dst_addr =
1865 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1866 
1867 		/*
1868 		 * Check first word of the IPV4 address and if it is
1869 		 * equal to 0xE then it represents multicast IP.
1870 		 */
1871 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1872 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1873 			return true;
1874 		else
1875 			return false;
1876 	} else
1877 		return false;
1878 }
1879 
1880 /**
1881  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1882  * @data: Pointer to IPV6 packet data buffer
1883  *
1884  * This func. checks whether it is a IPV6 multicast packet or not.
1885  *
1886  * Return: TRUE if it is a IPV6 multicast packet
1887  *         FALSE if not
1888  */
1889 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1890 {
1891 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1892 		uint16_t *dst_addr;
1893 
1894 		dst_addr = (uint16_t *)
1895 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1896 
1897 		/*
1898 		 * Check first byte of the IP address and if it
1899 		 * 0xFF00 then it is a IPV6 mcast packet.
1900 		 */
1901 		if (*dst_addr ==
1902 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1903 			return true;
1904 		else
1905 			return false;
1906 	} else
1907 		return false;
1908 }
1909 
1910 /**
1911  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1912  * @data: Pointer to IPV4 ICMP packet data buffer
1913  *
1914  * This func. checks whether it is a ICMP packet or not.
1915  *
1916  * Return: TRUE if it is a ICMP packet
1917  *         FALSE if not
1918  */
1919 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1920 {
1921 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1922 		uint8_t pkt_type;
1923 
1924 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1925 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1926 
1927 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1928 			return true;
1929 		else
1930 			return false;
1931 	} else
1932 		return false;
1933 }
1934 
1935 /**
1936  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1937  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1938  *
1939  * This func. checks whether it is a ICMPV6 packet or not.
1940  *
1941  * Return: TRUE if it is a ICMPV6 packet
1942  *         FALSE if not
1943  */
1944 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1945 {
1946 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1947 		uint8_t pkt_type;
1948 
1949 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1950 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1951 
1952 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1953 			return true;
1954 		else
1955 			return false;
1956 	} else
1957 		return false;
1958 }
1959 
1960 /**
1961  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1962  * @data: Pointer to IPV4 UDP packet data buffer
1963  *
1964  * This func. checks whether it is a IPV4 UDP packet or not.
1965  *
1966  * Return: TRUE if it is a IPV4 UDP packet
1967  *         FALSE if not
1968  */
1969 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1970 {
1971 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1972 		uint8_t pkt_type;
1973 
1974 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1975 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1976 
1977 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1978 			return true;
1979 		else
1980 			return false;
1981 	} else
1982 		return false;
1983 }
1984 
1985 /**
1986  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1987  * @data: Pointer to IPV4 TCP packet data buffer
1988  *
1989  * This func. checks whether it is a IPV4 TCP packet or not.
1990  *
1991  * Return: TRUE if it is a IPV4 TCP packet
1992  *         FALSE if not
1993  */
1994 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1995 {
1996 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1997 		uint8_t pkt_type;
1998 
1999 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2000 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2001 
2002 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2003 			return true;
2004 		else
2005 			return false;
2006 	} else
2007 		return false;
2008 }
2009 
2010 /**
2011  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2012  * @data: Pointer to IPV6 UDP packet data buffer
2013  *
2014  * This func. checks whether it is a IPV6 UDP packet or not.
2015  *
2016  * Return: TRUE if it is a IPV6 UDP packet
2017  *         FALSE if not
2018  */
2019 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2020 {
2021 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2022 		uint8_t pkt_type;
2023 
2024 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2025 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2026 
2027 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2028 			return true;
2029 		else
2030 			return false;
2031 	} else
2032 		return false;
2033 }
2034 
2035 /**
2036  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2037  * @data: Pointer to IPV6 TCP packet data buffer
2038  *
2039  * This func. checks whether it is a IPV6 TCP packet or not.
2040  *
2041  * Return: TRUE if it is a IPV6 TCP packet
2042  *         FALSE if not
2043  */
2044 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2045 {
2046 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2047 		uint8_t pkt_type;
2048 
2049 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2050 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2051 
2052 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2053 			return true;
2054 		else
2055 			return false;
2056 	} else
2057 		return false;
2058 }
2059 
2060 /**
2061  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2062  * @nbuf - sk buff
2063  *
2064  * Return: true if packet is broadcast
2065  *	   false otherwise
2066  */
2067 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2068 {
2069 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2070 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2071 }
2072 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2073 
2074 #ifdef NBUF_MEMORY_DEBUG
2075 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2076 
2077 /**
2078  * struct qdf_nbuf_track_t - Network buffer track structure
2079  *
2080  * @p_next: Pointer to next
2081  * @net_buf: Pointer to network buffer
2082  * @file_name: File name
2083  * @line_num: Line number
2084  * @size: Size
2085  */
2086 struct qdf_nbuf_track_t {
2087 	struct qdf_nbuf_track_t *p_next;
2088 	qdf_nbuf_t net_buf;
2089 	uint8_t *file_name;
2090 	uint32_t line_num;
2091 	size_t size;
2092 };
2093 
2094 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2095 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2096 
2097 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2098 static struct kmem_cache *nbuf_tracking_cache;
2099 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2100 static spinlock_t qdf_net_buf_track_free_list_lock;
2101 static uint32_t qdf_net_buf_track_free_list_count;
2102 static uint32_t qdf_net_buf_track_used_list_count;
2103 static uint32_t qdf_net_buf_track_max_used;
2104 static uint32_t qdf_net_buf_track_max_free;
2105 static uint32_t qdf_net_buf_track_max_allocated;
2106 
2107 /**
2108  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2109  *
2110  * tracks the max number of network buffers that the wlan driver was tracking
2111  * at any one time.
2112  *
2113  * Return: none
2114  */
2115 static inline void update_max_used(void)
2116 {
2117 	int sum;
2118 
2119 	if (qdf_net_buf_track_max_used <
2120 	    qdf_net_buf_track_used_list_count)
2121 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2122 	sum = qdf_net_buf_track_free_list_count +
2123 		qdf_net_buf_track_used_list_count;
2124 	if (qdf_net_buf_track_max_allocated < sum)
2125 		qdf_net_buf_track_max_allocated = sum;
2126 }
2127 
2128 /**
2129  * update_max_free() - update qdf_net_buf_track_free_list_count
2130  *
2131  * tracks the max number tracking buffers kept in the freelist.
2132  *
2133  * Return: none
2134  */
2135 static inline void update_max_free(void)
2136 {
2137 	if (qdf_net_buf_track_max_free <
2138 	    qdf_net_buf_track_free_list_count)
2139 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2140 }
2141 
2142 /**
2143  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2144  *
2145  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2146  * This function also ads fexibility to adjust the allocation and freelist
2147  * scheems.
2148  *
2149  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2150  */
2151 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2152 {
2153 	int flags = GFP_KERNEL;
2154 	unsigned long irq_flag;
2155 	QDF_NBUF_TRACK *new_node = NULL;
2156 
2157 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2158 	qdf_net_buf_track_used_list_count++;
2159 	if (qdf_net_buf_track_free_list != NULL) {
2160 		new_node = qdf_net_buf_track_free_list;
2161 		qdf_net_buf_track_free_list =
2162 			qdf_net_buf_track_free_list->p_next;
2163 		qdf_net_buf_track_free_list_count--;
2164 	}
2165 	update_max_used();
2166 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2167 
2168 	if (new_node != NULL)
2169 		return new_node;
2170 
2171 	if (in_interrupt() || irqs_disabled() || in_atomic())
2172 		flags = GFP_ATOMIC;
2173 
2174 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2175 }
2176 
2177 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2178 #define FREEQ_POOLSIZE 2048
2179 
2180 /**
2181  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2182  *
2183  * Matches calls to qdf_nbuf_track_alloc.
2184  * Either frees the tracking cookie to kernel or an internal
2185  * freelist based on the size of the freelist.
2186  *
2187  * Return: none
2188  */
2189 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2190 {
2191 	unsigned long irq_flag;
2192 
2193 	if (!node)
2194 		return;
2195 
2196 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2197 	 * only shrink the freelist if it is bigger than twice the number of
2198 	 * nbufs in use. If the driver is stalling in a consistent bursty
2199 	 * fasion, this will keep 3/4 of thee allocations from the free list
2200 	 * while also allowing the system to recover memory as less frantic
2201 	 * traffic occurs.
2202 	 */
2203 
2204 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2205 
2206 	qdf_net_buf_track_used_list_count--;
2207 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2208 	   (qdf_net_buf_track_free_list_count >
2209 	    qdf_net_buf_track_used_list_count << 1)) {
2210 		kmem_cache_free(nbuf_tracking_cache, node);
2211 	} else {
2212 		node->p_next = qdf_net_buf_track_free_list;
2213 		qdf_net_buf_track_free_list = node;
2214 		qdf_net_buf_track_free_list_count++;
2215 	}
2216 	update_max_free();
2217 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2218 }
2219 
2220 /**
2221  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2222  *
2223  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2224  * the freelist first makes it performant for the first iperf udp burst
2225  * as well as steady state.
2226  *
2227  * Return: None
2228  */
2229 static void qdf_nbuf_track_prefill(void)
2230 {
2231 	int i;
2232 	QDF_NBUF_TRACK *node, *head;
2233 
2234 	/* prepopulate the freelist */
2235 	head = NULL;
2236 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2237 		node = qdf_nbuf_track_alloc();
2238 		if (node == NULL)
2239 			continue;
2240 		node->p_next = head;
2241 		head = node;
2242 	}
2243 	while (head) {
2244 		node = head->p_next;
2245 		qdf_nbuf_track_free(head);
2246 		head = node;
2247 	}
2248 
2249 	/* prefilled buffers should not count as used */
2250 	qdf_net_buf_track_max_used = 0;
2251 }
2252 
2253 /**
2254  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2255  *
2256  * This initializes the memory manager for the nbuf tracking cookies.  Because
2257  * these cookies are all the same size and only used in this feature, we can
2258  * use a kmem_cache to provide tracking as well as to speed up allocations.
2259  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2260  * features) a freelist is prepopulated here.
2261  *
2262  * Return: None
2263  */
2264 static void qdf_nbuf_track_memory_manager_create(void)
2265 {
2266 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2267 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2268 						sizeof(QDF_NBUF_TRACK),
2269 						0, 0, NULL);
2270 
2271 	qdf_nbuf_track_prefill();
2272 }
2273 
2274 /**
2275  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2276  *
2277  * Empty the freelist and print out usage statistics when it is no longer
2278  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2279  * any nbuf tracking cookies were leaked.
2280  *
2281  * Return: None
2282  */
2283 static void qdf_nbuf_track_memory_manager_destroy(void)
2284 {
2285 	QDF_NBUF_TRACK *node, *tmp;
2286 	unsigned long irq_flag;
2287 
2288 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2289 	node = qdf_net_buf_track_free_list;
2290 
2291 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2292 		qdf_print("%s: unexpectedly large max_used count %d",
2293 			  __func__, qdf_net_buf_track_max_used);
2294 
2295 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2296 		qdf_print("%s: %d unused trackers were allocated",
2297 			  __func__,
2298 			  qdf_net_buf_track_max_allocated -
2299 			  qdf_net_buf_track_max_used);
2300 
2301 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2302 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2303 		qdf_print("%s: check freelist shrinking functionality",
2304 			  __func__);
2305 
2306 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2307 		  "%s: %d residual freelist size\n",
2308 		  __func__, qdf_net_buf_track_free_list_count);
2309 
2310 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2311 		  "%s: %d max freelist size observed\n",
2312 		  __func__, qdf_net_buf_track_max_free);
2313 
2314 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2315 		  "%s: %d max buffers used observed\n",
2316 		  __func__, qdf_net_buf_track_max_used);
2317 
2318 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2319 		  "%s: %d max buffers allocated observed\n",
2320 		  __func__, qdf_net_buf_track_max_allocated);
2321 
2322 	while (node) {
2323 		tmp = node;
2324 		node = node->p_next;
2325 		kmem_cache_free(nbuf_tracking_cache, tmp);
2326 		qdf_net_buf_track_free_list_count--;
2327 	}
2328 
2329 	if (qdf_net_buf_track_free_list_count != 0)
2330 		qdf_print("%s: %d unfreed tracking memory lost in freelist\n",
2331 			  __func__, qdf_net_buf_track_free_list_count);
2332 
2333 	if (qdf_net_buf_track_used_list_count != 0)
2334 		qdf_print("%s: %d unfreed tracking memory still in use\n",
2335 			  __func__, qdf_net_buf_track_used_list_count);
2336 
2337 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2338 	kmem_cache_destroy(nbuf_tracking_cache);
2339 	qdf_net_buf_track_free_list = NULL;
2340 }
2341 
2342 /**
2343  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2344  *
2345  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2346  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2347  * WLAN driver module whose allocated SKB is freed by network stack are
2348  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2349  * reported as memory leak.
2350  *
2351  * Return: none
2352  */
2353 void qdf_net_buf_debug_init(void)
2354 {
2355 	uint32_t i;
2356 
2357 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2358 
2359 	qdf_nbuf_map_tracking_init();
2360 	qdf_nbuf_track_memory_manager_create();
2361 
2362 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2363 		gp_qdf_net_buf_track_tbl[i] = NULL;
2364 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2365 	}
2366 }
2367 qdf_export_symbol(qdf_net_buf_debug_init);
2368 
2369 /**
2370  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2371  *
2372  * Exit network buffer tracking debug functionality and log SKB memory leaks
2373  * As part of exiting the functionality, free the leaked memory and
2374  * cleanup the tracking buffers.
2375  *
2376  * Return: none
2377  */
2378 void qdf_net_buf_debug_exit(void)
2379 {
2380 	uint32_t i;
2381 	uint32_t count = 0;
2382 	unsigned long irq_flag;
2383 	QDF_NBUF_TRACK *p_node;
2384 	QDF_NBUF_TRACK *p_prev;
2385 
2386 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2387 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2388 		p_node = gp_qdf_net_buf_track_tbl[i];
2389 		while (p_node) {
2390 			p_prev = p_node;
2391 			p_node = p_node->p_next;
2392 			count++;
2393 			qdf_print("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK\n",
2394 				  p_prev->file_name, p_prev->line_num,
2395 				  p_prev->size, p_prev->net_buf);
2396 			qdf_nbuf_track_free(p_prev);
2397 		}
2398 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2399 	}
2400 
2401 	qdf_nbuf_track_memory_manager_destroy();
2402 	qdf_nbuf_map_tracking_deinit();
2403 
2404 #ifdef CONFIG_HALT_KMEMLEAK
2405 	if (count) {
2406 		qdf_print("%d SKBs leaked .. please fix the SKB leak", count);
2407 		QDF_BUG(0);
2408 	}
2409 #endif
2410 }
2411 qdf_export_symbol(qdf_net_buf_debug_exit);
2412 
2413 /**
2414  * qdf_net_buf_debug_hash() - hash network buffer pointer
2415  *
2416  * Return: hash value
2417  */
2418 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2419 {
2420 	uint32_t i;
2421 
2422 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2423 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2424 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2425 
2426 	return i;
2427 }
2428 
2429 /**
2430  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2431  *
2432  * Return: If skb is found in hash table then return pointer to network buffer
2433  *	else return %NULL
2434  */
2435 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2436 {
2437 	uint32_t i;
2438 	QDF_NBUF_TRACK *p_node;
2439 
2440 	i = qdf_net_buf_debug_hash(net_buf);
2441 	p_node = gp_qdf_net_buf_track_tbl[i];
2442 
2443 	while (p_node) {
2444 		if (p_node->net_buf == net_buf)
2445 			return p_node;
2446 		p_node = p_node->p_next;
2447 	}
2448 
2449 	return NULL;
2450 }
2451 
2452 /**
2453  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2454  *
2455  * Return: none
2456  */
2457 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2458 				uint8_t *file_name, uint32_t line_num)
2459 {
2460 	uint32_t i;
2461 	unsigned long irq_flag;
2462 	QDF_NBUF_TRACK *p_node;
2463 	QDF_NBUF_TRACK *new_node;
2464 
2465 	new_node = qdf_nbuf_track_alloc();
2466 
2467 	i = qdf_net_buf_debug_hash(net_buf);
2468 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2469 
2470 	p_node = qdf_net_buf_debug_look_up(net_buf);
2471 
2472 	if (p_node) {
2473 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2474 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2475 			  net_buf, file_name, line_num);
2476 		qdf_nbuf_track_free(new_node);
2477 	} else {
2478 		p_node = new_node;
2479 		if (p_node) {
2480 			p_node->net_buf = net_buf;
2481 			p_node->file_name = file_name;
2482 			p_node->line_num = line_num;
2483 			p_node->size = size;
2484 			qdf_mem_skb_inc(size);
2485 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2486 			gp_qdf_net_buf_track_tbl[i] = p_node;
2487 		} else
2488 			qdf_print(
2489 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2490 				  file_name, line_num, size);
2491 	}
2492 
2493 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2494 }
2495 qdf_export_symbol(qdf_net_buf_debug_add_node);
2496 
2497 /**
2498  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2499  *
2500  * Return: none
2501  */
2502 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2503 {
2504 	uint32_t i;
2505 	QDF_NBUF_TRACK *p_head;
2506 	QDF_NBUF_TRACK *p_node = NULL;
2507 	unsigned long irq_flag;
2508 	QDF_NBUF_TRACK *p_prev;
2509 
2510 	i = qdf_net_buf_debug_hash(net_buf);
2511 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2512 
2513 	p_head = gp_qdf_net_buf_track_tbl[i];
2514 
2515 	/* Unallocated SKB */
2516 	if (!p_head)
2517 		goto done;
2518 
2519 	p_node = p_head;
2520 	/* Found at head of the table */
2521 	if (p_head->net_buf == net_buf) {
2522 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2523 		goto done;
2524 	}
2525 
2526 	/* Search in collision list */
2527 	while (p_node) {
2528 		p_prev = p_node;
2529 		p_node = p_node->p_next;
2530 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2531 			p_prev->p_next = p_node->p_next;
2532 			break;
2533 		}
2534 	}
2535 
2536 done:
2537 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2538 
2539 	if (p_node) {
2540 		qdf_mem_skb_dec(p_node->size);
2541 		qdf_nbuf_track_free(p_node);
2542 	} else {
2543 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2544 			  net_buf);
2545 		QDF_BUG(0);
2546 	}
2547 }
2548 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2549 
2550 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2551 			uint8_t *file_name, uint32_t line_num)
2552 {
2553 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2554 
2555 	while (ext_list) {
2556 		/*
2557 		 * Take care to add if it is Jumbo packet connected using
2558 		 * frag_list
2559 		 */
2560 		qdf_nbuf_t next;
2561 
2562 		next = qdf_nbuf_queue_next(ext_list);
2563 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2564 		ext_list = next;
2565 	}
2566 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2567 }
2568 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2569 
2570 /**
2571  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2572  * @net_buf: Network buf holding head segment (single)
2573  *
2574  * WLAN driver module whose allocated SKB is freed by network stack are
2575  * suppose to call this API before returning SKB to network stack such
2576  * that the SKB is not reported as memory leak.
2577  *
2578  * Return: none
2579  */
2580 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2581 {
2582 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2583 
2584 	while (ext_list) {
2585 		/*
2586 		 * Take care to free if it is Jumbo packet connected using
2587 		 * frag_list
2588 		 */
2589 		qdf_nbuf_t next;
2590 
2591 		next = qdf_nbuf_queue_next(ext_list);
2592 
2593 		if (qdf_nbuf_is_tso(ext_list) &&
2594 			qdf_nbuf_get_users(ext_list) > 1) {
2595 			ext_list = next;
2596 			continue;
2597 		}
2598 
2599 		qdf_net_buf_debug_delete_node(ext_list);
2600 		ext_list = next;
2601 	}
2602 
2603 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2604 		return;
2605 
2606 	qdf_net_buf_debug_delete_node(net_buf);
2607 }
2608 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2609 
2610 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2611 				int reserve, int align, int prio,
2612 				uint8_t *file, uint32_t line)
2613 {
2614 	qdf_nbuf_t nbuf;
2615 
2616 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio);
2617 
2618 	/* Store SKB in internal QDF tracking table */
2619 	if (qdf_likely(nbuf)) {
2620 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2621 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2622 	}
2623 
2624 	return nbuf;
2625 }
2626 qdf_export_symbol(qdf_nbuf_alloc_debug);
2627 
2628 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2629 {
2630 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2631 		goto free_buf;
2632 
2633 	/* Remove SKB from internal QDF tracking table */
2634 	if (qdf_likely(nbuf)) {
2635 		qdf_net_buf_debug_delete_node(nbuf);
2636 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2637 	}
2638 
2639 free_buf:
2640 	__qdf_nbuf_free(nbuf);
2641 }
2642 qdf_export_symbol(qdf_nbuf_free_debug);
2643 
2644 #endif /* NBUF_MEMORY_DEBUG */
2645 
2646 #if defined(FEATURE_TSO)
2647 
2648 /**
2649  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2650  *
2651  * @ethproto: ethernet type of the msdu
2652  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2653  * @l2_len: L2 length for the msdu
2654  * @eit_hdr: pointer to EIT header
2655  * @eit_hdr_len: EIT header length for the msdu
2656  * @eit_hdr_dma_map_addr: dma addr for EIT header
2657  * @tcphdr: pointer to tcp header
2658  * @ipv4_csum_en: ipv4 checksum enable
2659  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2660  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2661  * @ip_id: IP id
2662  * @tcp_seq_num: TCP sequence number
2663  *
2664  * This structure holds the TSO common info that is common
2665  * across all the TCP segments of the jumbo packet.
2666  */
2667 struct qdf_tso_cmn_seg_info_t {
2668 	uint16_t ethproto;
2669 	uint16_t ip_tcp_hdr_len;
2670 	uint16_t l2_len;
2671 	uint8_t *eit_hdr;
2672 	uint32_t eit_hdr_len;
2673 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2674 	struct tcphdr *tcphdr;
2675 	uint16_t ipv4_csum_en;
2676 	uint16_t tcp_ipv4_csum_en;
2677 	uint16_t tcp_ipv6_csum_en;
2678 	uint16_t ip_id;
2679 	uint32_t tcp_seq_num;
2680 };
2681 
2682 /**
2683  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2684  * information
2685  * @osdev: qdf device handle
2686  * @skb: skb buffer
2687  * @tso_info: Parameters common to all segements
2688  *
2689  * Get the TSO information that is common across all the TCP
2690  * segments of the jumbo packet
2691  *
2692  * Return: 0 - success 1 - failure
2693  */
2694 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2695 			struct sk_buff *skb,
2696 			struct qdf_tso_cmn_seg_info_t *tso_info)
2697 {
2698 	/* Get ethernet type and ethernet header length */
2699 	tso_info->ethproto = vlan_get_protocol(skb);
2700 
2701 	/* Determine whether this is an IPv4 or IPv6 packet */
2702 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2703 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2704 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2705 
2706 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2707 		tso_info->ipv4_csum_en = 1;
2708 		tso_info->tcp_ipv4_csum_en = 1;
2709 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2710 			qdf_print("TSO IPV4 proto 0x%x not TCP\n",
2711 				 ipv4_hdr->protocol);
2712 			return 1;
2713 		}
2714 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2715 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2716 		tso_info->tcp_ipv6_csum_en = 1;
2717 	} else {
2718 		qdf_print("TSO: ethertype 0x%x is not supported!\n",
2719 			 tso_info->ethproto);
2720 		return 1;
2721 	}
2722 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2723 	tso_info->tcphdr = tcp_hdr(skb);
2724 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2725 	/* get pointer to the ethernet + IP + TCP header and their length */
2726 	tso_info->eit_hdr = skb->data;
2727 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2728 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2729 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2730 							tso_info->eit_hdr,
2731 							tso_info->eit_hdr_len,
2732 							DMA_TO_DEVICE);
2733 	if (unlikely(dma_mapping_error(osdev->dev,
2734 				       tso_info->eit_hdr_dma_map_addr))) {
2735 		qdf_print("DMA mapping error!\n");
2736 		qdf_assert(0);
2737 		return 1;
2738 	}
2739 
2740 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2741 		/* inlcude IPv4 header length for IPV4 (total length) */
2742 		tso_info->ip_tcp_hdr_len =
2743 			tso_info->eit_hdr_len - tso_info->l2_len;
2744 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2745 		/* exclude IPv6 header length for IPv6 (payload length) */
2746 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2747 	}
2748 	/*
2749 	 * The length of the payload (application layer data) is added to
2750 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2751 	 * descriptor.
2752 	 */
2753 
2754 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2755 		tso_info->tcp_seq_num,
2756 		tso_info->eit_hdr_len,
2757 		tso_info->l2_len,
2758 		skb->len);
2759 	return 0;
2760 }
2761 
2762 
2763 /**
2764  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
2765  *
2766  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
2767  *
2768  * Return: N/A
2769  */
2770 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
2771 				      uint32_t *lo, uint32_t *hi)
2772 {
2773 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
2774 		*lo = lower_32_bits(dmaaddr);
2775 		*hi = upper_32_bits(dmaaddr);
2776 	} else {
2777 		*lo = dmaaddr;
2778 		*hi = 0;
2779 	}
2780 }
2781 qdf_export_symbol(__qdf_dmaaddr_to_32s);
2782 
2783 /**
2784  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2785  *
2786  * @curr_seg: Segment whose contents are initialized
2787  * @tso_cmn_info: Parameters common to all segements
2788  *
2789  * Return: None
2790  */
2791 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2792 				struct qdf_tso_seg_elem_t *curr_seg,
2793 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2794 {
2795 	/* Initialize the flags to 0 */
2796 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2797 
2798 	/*
2799 	 * The following fields remain the same across all segments of
2800 	 * a jumbo packet
2801 	 */
2802 	curr_seg->seg.tso_flags.tso_enable = 1;
2803 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2804 		tso_cmn_info->ipv4_csum_en;
2805 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2806 		tso_cmn_info->tcp_ipv6_csum_en;
2807 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2808 		tso_cmn_info->tcp_ipv4_csum_en;
2809 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2810 
2811 	/* The following fields change for the segments */
2812 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2813 	tso_cmn_info->ip_id++;
2814 
2815 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2816 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2817 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2818 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2819 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2820 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2821 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2822 
2823 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2824 
2825 	/*
2826 	 * First fragment for each segment always contains the ethernet,
2827 	 * IP and TCP header
2828 	 */
2829 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2830 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2831 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2832 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2833 
2834 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2835 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2836 		   tso_cmn_info->eit_hdr_len,
2837 		   curr_seg->seg.tso_flags.tcp_seq_num,
2838 		   curr_seg->seg.total_len);
2839 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2840 }
2841 
2842 /**
2843  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2844  * into segments
2845  * @nbuf: network buffer to be segmented
2846  * @tso_info: This is the output. The information about the
2847  *           TSO segments will be populated within this.
2848  *
2849  * This function fragments a TCP jumbo packet into smaller
2850  * segments to be transmitted by the driver. It chains the TSO
2851  * segments created into a list.
2852  *
2853  * Return: number of TSO segments
2854  */
2855 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2856 		struct qdf_tso_info_t *tso_info)
2857 {
2858 	/* common accross all segments */
2859 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2860 	/* segment specific */
2861 	void *tso_frag_vaddr;
2862 	qdf_dma_addr_t tso_frag_paddr = 0;
2863 	uint32_t num_seg = 0;
2864 	struct qdf_tso_seg_elem_t *curr_seg;
2865 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2866 	struct skb_frag_struct *frag = NULL;
2867 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2868 	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
2869 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2870 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2871 	int j = 0; /* skb fragment index */
2872 
2873 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2874 
2875 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2876 						skb, &tso_cmn_info))) {
2877 		qdf_print("TSO: error getting common segment info\n");
2878 		return 0;
2879 	}
2880 
2881 	total_num_seg = tso_info->tso_num_seg_list;
2882 	curr_seg = tso_info->tso_seg_list;
2883 
2884 	/* length of the first chunk of data in the skb */
2885 	skb_frag_len = skb_headlen(skb);
2886 
2887 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2888 	/* update the remaining skb fragment length and TSO segment length */
2889 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2890 	skb_proc -= tso_cmn_info.eit_hdr_len;
2891 
2892 	/* get the address to the next tso fragment */
2893 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2894 	/* get the length of the next tso fragment */
2895 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2896 
2897 	if (tso_frag_len != 0) {
2898 		tso_frag_paddr = dma_map_single(osdev->dev,
2899 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2900 	}
2901 
2902 	if (unlikely(dma_mapping_error(osdev->dev,
2903 					tso_frag_paddr))) {
2904 		qdf_print("%s:%d DMA mapping error!\n", __func__, __LINE__);
2905 		qdf_assert(0);
2906 		return 0;
2907 	}
2908 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2909 		__LINE__, skb_frag_len, tso_frag_len);
2910 	num_seg = tso_info->num_segs;
2911 	tso_info->num_segs = 0;
2912 	tso_info->is_tso = 1;
2913 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2914 
2915 	while (num_seg && curr_seg) {
2916 		int i = 1; /* tso fragment index */
2917 		uint8_t more_tso_frags = 1;
2918 
2919 		curr_seg->seg.num_frags = 0;
2920 		tso_info->num_segs++;
2921 		total_num_seg->num_seg.tso_cmn_num_seg++;
2922 
2923 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2924 						 &tso_cmn_info);
2925 
2926 		if (unlikely(skb_proc == 0))
2927 			return tso_info->num_segs;
2928 
2929 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2930 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2931 		/* frag len is added to ip_len in while loop below*/
2932 
2933 		curr_seg->seg.num_frags++;
2934 
2935 		while (more_tso_frags) {
2936 			if (tso_frag_len != 0) {
2937 				curr_seg->seg.tso_frags[i].vaddr =
2938 					tso_frag_vaddr;
2939 				curr_seg->seg.tso_frags[i].length =
2940 					tso_frag_len;
2941 				curr_seg->seg.total_len += tso_frag_len;
2942 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2943 				curr_seg->seg.num_frags++;
2944 				skb_proc = skb_proc - tso_frag_len;
2945 
2946 				/* increment the TCP sequence number */
2947 
2948 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2949 				curr_seg->seg.tso_frags[i].paddr =
2950 					tso_frag_paddr;
2951 			}
2952 
2953 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2954 					__func__, __LINE__,
2955 					i,
2956 					tso_frag_len,
2957 					curr_seg->seg.total_len,
2958 					curr_seg->seg.tso_frags[i].vaddr);
2959 
2960 			/* if there is no more data left in the skb */
2961 			if (!skb_proc)
2962 				return tso_info->num_segs;
2963 
2964 			/* get the next payload fragment information */
2965 			/* check if there are more fragments in this segment */
2966 			if (tso_frag_len < tso_seg_size) {
2967 				more_tso_frags = 1;
2968 				if (tso_frag_len != 0) {
2969 					tso_seg_size = tso_seg_size -
2970 						tso_frag_len;
2971 					i++;
2972 					if (curr_seg->seg.num_frags ==
2973 								FRAG_NUM_MAX) {
2974 						more_tso_frags = 0;
2975 						/*
2976 						 * reset i and the tso
2977 						 * payload size
2978 						 */
2979 						i = 1;
2980 						tso_seg_size =
2981 							skb_shinfo(skb)->
2982 								gso_size;
2983 					}
2984 				}
2985 			} else {
2986 				more_tso_frags = 0;
2987 				/* reset i and the tso payload size */
2988 				i = 1;
2989 				tso_seg_size = skb_shinfo(skb)->gso_size;
2990 			}
2991 
2992 			/* if the next fragment is contiguous */
2993 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
2994 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
2995 				skb_frag_len = skb_frag_len - tso_frag_len;
2996 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2997 
2998 			} else { /* the next fragment is not contiguous */
2999 				if (skb_shinfo(skb)->nr_frags == 0) {
3000 					qdf_print("TSO: nr_frags == 0!\n");
3001 					qdf_assert(0);
3002 					return 0;
3003 				}
3004 				if (j >= skb_shinfo(skb)->nr_frags) {
3005 					qdf_print("TSO: nr_frags %d j %d\n",
3006 						  skb_shinfo(skb)->nr_frags, j);
3007 					qdf_assert(0);
3008 					return 0;
3009 				}
3010 				frag = &skb_shinfo(skb)->frags[j];
3011 				skb_frag_len = skb_frag_size(frag);
3012 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3013 				tso_frag_vaddr = skb_frag_address_safe(frag);
3014 				j++;
3015 			}
3016 
3017 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3018 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3019 				tso_seg_size);
3020 
3021 			if (!(tso_frag_vaddr)) {
3022 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3023 						__func__);
3024 				return 0;
3025 			}
3026 
3027 			tso_frag_paddr =
3028 					 dma_map_single(osdev->dev,
3029 						 tso_frag_vaddr,
3030 						 tso_frag_len,
3031 						 DMA_TO_DEVICE);
3032 			if (unlikely(dma_mapping_error(osdev->dev,
3033 							tso_frag_paddr))) {
3034 				qdf_print("%s:%d DMA mapping error!\n",
3035 						__func__, __LINE__);
3036 				qdf_assert(0);
3037 				return 0;
3038 			}
3039 		}
3040 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3041 				curr_seg->seg.tso_flags.tcp_seq_num);
3042 		num_seg--;
3043 		/* if TCP FIN flag was set, set it in the last segment */
3044 		if (!num_seg)
3045 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3046 
3047 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3048 		curr_seg = curr_seg->next;
3049 	}
3050 	return tso_info->num_segs;
3051 }
3052 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3053 
3054 /**
3055  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3056  *
3057  * @osdev: qdf device handle
3058  * @tso_seg: TSO segment element to be unmapped
3059  * @is_last_seg: whether this is last tso seg or not
3060  *
3061  * Return: none
3062  */
3063 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3064 			  struct qdf_tso_seg_elem_t *tso_seg,
3065 			  bool is_last_seg)
3066 {
3067 	uint32_t num_frags = 0;
3068 
3069 	if (tso_seg->seg.num_frags > 0)
3070 		num_frags = tso_seg->seg.num_frags - 1;
3071 
3072 	/*Num of frags in a tso seg cannot be less than 2 */
3073 	if (num_frags < 1) {
3074 		qdf_assert(0);
3075 		qdf_print("ERROR: num of frags in a tso segment is %d\n",
3076 				  (num_frags + 1));
3077 		return;
3078 	}
3079 
3080 	while (num_frags) {
3081 		/*Do dma unmap the tso seg except the 0th frag */
3082 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3083 			qdf_print("ERROR: TSO seg frag %d mapped physical address is NULL\n",
3084 				  num_frags);
3085 			qdf_assert(0);
3086 			return;
3087 		}
3088 		dma_unmap_single(osdev->dev,
3089 				 tso_seg->seg.tso_frags[num_frags].paddr,
3090 				 tso_seg->seg.tso_frags[num_frags].length,
3091 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3092 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3093 		num_frags--;
3094 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3095 	}
3096 
3097 	if (is_last_seg) {
3098 		/*Do dma unmap for the tso seg 0th frag */
3099 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3100 			qdf_print("ERROR: TSO seg frag 0 mapped physical address is NULL\n");
3101 			qdf_assert(0);
3102 			return;
3103 		}
3104 		dma_unmap_single(osdev->dev,
3105 				 tso_seg->seg.tso_frags[0].paddr,
3106 				 tso_seg->seg.tso_frags[0].length,
3107 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3108 		tso_seg->seg.tso_frags[0].paddr = 0;
3109 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3110 	}
3111 }
3112 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3113 
3114 /**
3115  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3116  * into segments
3117  * @nbuf:   network buffer to be segmented
3118  * @tso_info:  This is the output. The information about the
3119  *      TSO segments will be populated within this.
3120  *
3121  * This function fragments a TCP jumbo packet into smaller
3122  * segments to be transmitted by the driver. It chains the TSO
3123  * segments created into a list.
3124  *
3125  * Return: 0 - success, 1 - failure
3126  */
3127 #ifndef BUILD_X86
3128 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3129 {
3130 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3131 	uint32_t remainder, num_segs = 0;
3132 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3133 	uint8_t frags_per_tso = 0;
3134 	uint32_t skb_frag_len = 0;
3135 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3136 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3137 	struct skb_frag_struct *frag = NULL;
3138 	int j = 0;
3139 	uint32_t temp_num_seg = 0;
3140 
3141 	/* length of the first chunk of data in the skb minus eit header*/
3142 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3143 
3144 	/* Calculate num of segs for skb's first chunk of data*/
3145 	remainder = skb_frag_len % tso_seg_size;
3146 	num_segs = skb_frag_len / tso_seg_size;
3147 	/**
3148 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3149 	 * In that case, one more tso seg is required to accommodate
3150 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3151 	 * then remaining data will be accomodated while doing the calculation
3152 	 * for nr_frags data. Hence, frags_per_tso++.
3153 	 */
3154 	if (remainder) {
3155 		if (!skb_nr_frags)
3156 			num_segs++;
3157 		else
3158 			frags_per_tso++;
3159 	}
3160 
3161 	while (skb_nr_frags) {
3162 		if (j >= skb_shinfo(skb)->nr_frags) {
3163 			qdf_print("TSO: nr_frags %d j %d\n",
3164 			skb_shinfo(skb)->nr_frags, j);
3165 			qdf_assert(0);
3166 			return 0;
3167 		}
3168 		/**
3169 		 * Calculate the number of tso seg for nr_frags data:
3170 		 * Get the length of each frag in skb_frag_len, add to
3171 		 * remainder.Get the number of segments by dividing it to
3172 		 * tso_seg_size and calculate the new remainder.
3173 		 * Decrement the nr_frags value and keep
3174 		 * looping all the skb_fragments.
3175 		 */
3176 		frag = &skb_shinfo(skb)->frags[j];
3177 		skb_frag_len = skb_frag_size(frag);
3178 		temp_num_seg = num_segs;
3179 		remainder += skb_frag_len;
3180 		num_segs += remainder / tso_seg_size;
3181 		remainder = remainder % tso_seg_size;
3182 		skb_nr_frags--;
3183 		if (remainder) {
3184 			if (num_segs > temp_num_seg)
3185 				frags_per_tso = 0;
3186 			/**
3187 			 * increment the tso per frags whenever remainder is
3188 			 * positive. If frags_per_tso reaches the (max-1),
3189 			 * [First frags always have EIT header, therefore max-1]
3190 			 * increment the num_segs as no more data can be
3191 			 * accomodated in the curr tso seg. Reset the remainder
3192 			 * and frags per tso and keep looping.
3193 			 */
3194 			frags_per_tso++;
3195 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3196 				num_segs++;
3197 				frags_per_tso = 0;
3198 				remainder = 0;
3199 			}
3200 			/**
3201 			 * If this is the last skb frag and still remainder is
3202 			 * non-zero(frags_per_tso is not reached to the max-1)
3203 			 * then increment the num_segs to take care of the
3204 			 * remaining length.
3205 			 */
3206 			if (!skb_nr_frags && remainder) {
3207 				num_segs++;
3208 				frags_per_tso = 0;
3209 			}
3210 		} else {
3211 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3212 			frags_per_tso = 0;
3213 		}
3214 		j++;
3215 	}
3216 
3217 	return num_segs;
3218 }
3219 #else
3220 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3221 {
3222 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3223 	struct skb_frag_struct *frag = NULL;
3224 
3225 	/*
3226 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3227 	 * region which cannot be accessed by Target
3228 	 */
3229 	if (virt_to_phys(skb->data) < 0x50000040) {
3230 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3231 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3232 				virt_to_phys(skb->data));
3233 		goto fail;
3234 
3235 	}
3236 
3237 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3238 		frag = &skb_shinfo(skb)->frags[i];
3239 
3240 		if (!frag)
3241 			goto fail;
3242 
3243 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3244 			goto fail;
3245 	}
3246 
3247 
3248 	gso_size = skb_shinfo(skb)->gso_size;
3249 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3250 			+ tcp_hdrlen(skb));
3251 	while (tmp_len) {
3252 		num_segs++;
3253 		if (tmp_len > gso_size)
3254 			tmp_len -= gso_size;
3255 		else
3256 			break;
3257 	}
3258 
3259 	return num_segs;
3260 
3261 	/*
3262 	 * Do not free this frame, just do socket level accounting
3263 	 * so that this is not reused.
3264 	 */
3265 fail:
3266 	if (skb->sk)
3267 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3268 
3269 	return 0;
3270 }
3271 #endif
3272 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3273 
3274 #endif /* FEATURE_TSO */
3275 
3276 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3277 {
3278 	qdf_nbuf_users_inc(&skb->users);
3279 	return skb;
3280 }
3281 qdf_export_symbol(__qdf_nbuf_inc_users);
3282 
3283 int __qdf_nbuf_get_users(struct sk_buff *skb)
3284 {
3285 	return qdf_nbuf_users_read(&skb->users);
3286 }
3287 qdf_export_symbol(__qdf_nbuf_get_users);
3288 
3289 /**
3290  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3291  * @skb: sk_buff handle
3292  *
3293  * Return: none
3294  */
3295 
3296 void __qdf_nbuf_ref(struct sk_buff *skb)
3297 {
3298 	skb_get(skb);
3299 }
3300 qdf_export_symbol(__qdf_nbuf_ref);
3301 
3302 /**
3303  * __qdf_nbuf_shared() - Check whether the buffer is shared
3304  *  @skb: sk_buff buffer
3305  *
3306  *  Return: true if more than one person has a reference to this buffer.
3307  */
3308 int __qdf_nbuf_shared(struct sk_buff *skb)
3309 {
3310 	return skb_shared(skb);
3311 }
3312 qdf_export_symbol(__qdf_nbuf_shared);
3313 
3314 /**
3315  * __qdf_nbuf_dmamap_create() - create a DMA map.
3316  * @osdev: qdf device handle
3317  * @dmap: dma map handle
3318  *
3319  * This can later be used to map networking buffers. They :
3320  * - need space in adf_drv's software descriptor
3321  * - are typically created during adf_drv_create
3322  * - need to be created before any API(qdf_nbuf_map) that uses them
3323  *
3324  * Return: QDF STATUS
3325  */
3326 QDF_STATUS
3327 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3328 {
3329 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3330 	/*
3331 	 * driver can tell its SG capablity, it must be handled.
3332 	 * Bounce buffers if they are there
3333 	 */
3334 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3335 	if (!(*dmap))
3336 		error = QDF_STATUS_E_NOMEM;
3337 
3338 	return error;
3339 }
3340 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3341 /**
3342  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3343  * @osdev: qdf device handle
3344  * @dmap: dma map handle
3345  *
3346  * Return: none
3347  */
3348 void
3349 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3350 {
3351 	kfree(dmap);
3352 }
3353 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3354 
3355 /**
3356  * __qdf_nbuf_map_nbytes_single() - map nbytes
3357  * @osdev: os device
3358  * @buf: buffer
3359  * @dir: direction
3360  * @nbytes: number of bytes
3361  *
3362  * Return: QDF_STATUS
3363  */
3364 #ifdef A_SIMOS_DEVHOST
3365 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3366 		qdf_device_t osdev, struct sk_buff *buf,
3367 		 qdf_dma_dir_t dir, int nbytes)
3368 {
3369 	qdf_dma_addr_t paddr;
3370 
3371 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3372 	return QDF_STATUS_SUCCESS;
3373 }
3374 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3375 #else
3376 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3377 		qdf_device_t osdev, struct sk_buff *buf,
3378 		 qdf_dma_dir_t dir, int nbytes)
3379 {
3380 	qdf_dma_addr_t paddr;
3381 
3382 	/* assume that the OS only provides a single fragment */
3383 	QDF_NBUF_CB_PADDR(buf) = paddr =
3384 		dma_map_single(osdev->dev, buf->data,
3385 			nbytes, __qdf_dma_dir_to_os(dir));
3386 	return dma_mapping_error(osdev->dev, paddr) ?
3387 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3388 }
3389 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3390 #endif
3391 /**
3392  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3393  * @osdev: os device
3394  * @buf: buffer
3395  * @dir: direction
3396  * @nbytes: number of bytes
3397  *
3398  * Return: none
3399  */
3400 #if defined(A_SIMOS_DEVHOST)
3401 void
3402 __qdf_nbuf_unmap_nbytes_single(
3403 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3404 {
3405 }
3406 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3407 
3408 #else
3409 void
3410 __qdf_nbuf_unmap_nbytes_single(
3411 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3412 {
3413 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3414 		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
3415 		return;
3416 	}
3417 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3418 			nbytes, __qdf_dma_dir_to_os(dir));
3419 }
3420 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3421 #endif
3422 /**
3423  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3424  * @osdev: os device
3425  * @skb: skb handle
3426  * @dir: dma direction
3427  * @nbytes: number of bytes to be mapped
3428  *
3429  * Return: QDF_STATUS
3430  */
3431 #ifdef QDF_OS_DEBUG
3432 QDF_STATUS
3433 __qdf_nbuf_map_nbytes(
3434 	qdf_device_t osdev,
3435 	struct sk_buff *skb,
3436 	qdf_dma_dir_t dir,
3437 	int nbytes)
3438 {
3439 	struct skb_shared_info  *sh = skb_shinfo(skb);
3440 
3441 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3442 
3443 	/*
3444 	 * Assume there's only a single fragment.
3445 	 * To support multiple fragments, it would be necessary to change
3446 	 * adf_nbuf_t to be a separate object that stores meta-info
3447 	 * (including the bus address for each fragment) and a pointer
3448 	 * to the underlying sk_buff.
3449 	 */
3450 	qdf_assert(sh->nr_frags == 0);
3451 
3452 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3453 }
3454 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3455 #else
3456 QDF_STATUS
3457 __qdf_nbuf_map_nbytes(
3458 	qdf_device_t osdev,
3459 	struct sk_buff *skb,
3460 	qdf_dma_dir_t dir,
3461 	int nbytes)
3462 {
3463 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3464 }
3465 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3466 #endif
3467 /**
3468  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3469  * @osdev: OS device
3470  * @skb: skb handle
3471  * @dir: direction
3472  * @nbytes: number of bytes
3473  *
3474  * Return: none
3475  */
3476 void
3477 __qdf_nbuf_unmap_nbytes(
3478 	qdf_device_t osdev,
3479 	struct sk_buff *skb,
3480 	qdf_dma_dir_t dir,
3481 	int nbytes)
3482 {
3483 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3484 
3485 	/*
3486 	 * Assume there's a single fragment.
3487 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3488 	 */
3489 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3490 }
3491 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3492 
3493 /**
3494  * __qdf_nbuf_dma_map_info() - return the dma map info
3495  * @bmap: dma map
3496  * @sg: dma map info
3497  *
3498  * Return: none
3499  */
3500 void
3501 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3502 {
3503 	qdf_assert(bmap->mapped);
3504 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3505 
3506 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3507 			sizeof(struct __qdf_segment));
3508 	sg->nsegs = bmap->nsegs;
3509 }
3510 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3511 /**
3512  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3513  *			specified by the index
3514  * @skb: sk buff
3515  * @sg: scatter/gather list of all the frags
3516  *
3517  * Return: none
3518  */
3519 #if defined(__QDF_SUPPORT_FRAG_MEM)
3520 void
3521 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3522 {
3523 	qdf_assert(skb != NULL);
3524 	sg->sg_segs[0].vaddr = skb->data;
3525 	sg->sg_segs[0].len   = skb->len;
3526 	sg->nsegs            = 1;
3527 
3528 	for (int i = 1; i <= sh->nr_frags; i++) {
3529 		skb_frag_t    *f        = &sh->frags[i - 1];
3530 
3531 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3532 			f->page_offset);
3533 		sg->sg_segs[i].len      = f->size;
3534 
3535 		qdf_assert(i < QDF_MAX_SGLIST);
3536 	}
3537 	sg->nsegs += i;
3538 
3539 }
3540 qdf_export_symbol(__qdf_nbuf_frag_info);
3541 #else
3542 #ifdef QDF_OS_DEBUG
3543 void
3544 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3545 {
3546 
3547 	struct skb_shared_info  *sh = skb_shinfo(skb);
3548 
3549 	qdf_assert(skb != NULL);
3550 	sg->sg_segs[0].vaddr = skb->data;
3551 	sg->sg_segs[0].len   = skb->len;
3552 	sg->nsegs            = 1;
3553 
3554 	qdf_assert(sh->nr_frags == 0);
3555 }
3556 qdf_export_symbol(__qdf_nbuf_frag_info);
3557 #else
3558 void
3559 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3560 {
3561 	sg->sg_segs[0].vaddr = skb->data;
3562 	sg->sg_segs[0].len   = skb->len;
3563 	sg->nsegs            = 1;
3564 }
3565 qdf_export_symbol(__qdf_nbuf_frag_info);
3566 #endif
3567 #endif
3568 /**
3569  * __qdf_nbuf_get_frag_size() - get frag size
3570  * @nbuf: sk buffer
3571  * @cur_frag: current frag
3572  *
3573  * Return: frag size
3574  */
3575 uint32_t
3576 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3577 {
3578 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3579 	const skb_frag_t *frag = sh->frags + cur_frag;
3580 
3581 	return skb_frag_size(frag);
3582 }
3583 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3584 
3585 /**
3586  * __qdf_nbuf_frag_map() - dma map frag
3587  * @osdev: os device
3588  * @nbuf: sk buff
3589  * @offset: offset
3590  * @dir: direction
3591  * @cur_frag: current fragment
3592  *
3593  * Return: QDF status
3594  */
3595 #ifdef A_SIMOS_DEVHOST
3596 QDF_STATUS __qdf_nbuf_frag_map(
3597 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3598 	int offset, qdf_dma_dir_t dir, int cur_frag)
3599 {
3600 	int32_t paddr, frag_len;
3601 
3602 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3603 	return QDF_STATUS_SUCCESS;
3604 }
3605 qdf_export_symbol(__qdf_nbuf_frag_map);
3606 #else
3607 QDF_STATUS __qdf_nbuf_frag_map(
3608 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3609 	int offset, qdf_dma_dir_t dir, int cur_frag)
3610 {
3611 	dma_addr_t paddr, frag_len;
3612 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3613 	const skb_frag_t *frag = sh->frags + cur_frag;
3614 
3615 	frag_len = skb_frag_size(frag);
3616 
3617 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3618 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3619 					__qdf_dma_dir_to_os(dir));
3620 	return dma_mapping_error(osdev->dev, paddr) ?
3621 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3622 }
3623 qdf_export_symbol(__qdf_nbuf_frag_map);
3624 #endif
3625 /**
3626  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3627  * @dmap: dma map
3628  * @cb: callback
3629  * @arg: argument
3630  *
3631  * Return: none
3632  */
3633 void
3634 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3635 {
3636 	return;
3637 }
3638 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3639 
3640 
3641 /**
3642  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3643  * @osdev: os device
3644  * @buf: sk buff
3645  * @dir: direction
3646  *
3647  * Return: none
3648  */
3649 #if defined(A_SIMOS_DEVHOST)
3650 static void __qdf_nbuf_sync_single_for_cpu(
3651 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3652 {
3653 	return;
3654 }
3655 #else
3656 static void __qdf_nbuf_sync_single_for_cpu(
3657 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3658 {
3659 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3660 		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
3661 		return;
3662 	}
3663 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3664 		skb_end_offset(buf) - skb_headroom(buf),
3665 		__qdf_dma_dir_to_os(dir));
3666 }
3667 #endif
3668 /**
3669  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3670  * @osdev: os device
3671  * @skb: sk buff
3672  * @dir: direction
3673  *
3674  * Return: none
3675  */
3676 void
3677 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3678 	struct sk_buff *skb, qdf_dma_dir_t dir)
3679 {
3680 	qdf_assert(
3681 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3682 
3683 	/*
3684 	 * Assume there's a single fragment.
3685 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3686 	 */
3687 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3688 }
3689 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3690 
3691 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3692 /**
3693  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3694  * @rx_status: Pointer to rx_status.
3695  * @rtap_buf: Buf to which VHT info has to be updated.
3696  * @rtap_len: Current length of radiotap buffer
3697  *
3698  * Return: Length of radiotap after VHT flags updated.
3699  */
3700 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3701 					struct mon_rx_status *rx_status,
3702 					int8_t *rtap_buf,
3703 					uint32_t rtap_len)
3704 {
3705 	uint16_t vht_flags = 0;
3706 
3707 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3708 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3709 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3710 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3711 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3712 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3713 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3714 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3715 	rtap_len += 2;
3716 
3717 	rtap_buf[rtap_len] |=
3718 		(rx_status->is_stbc ?
3719 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3720 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3721 		(rx_status->ldpc ?
3722 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3723 		(rx_status->beamformed ?
3724 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3725 	rtap_len += 1;
3726 	switch (rx_status->vht_flag_values2) {
3727 	case IEEE80211_RADIOTAP_VHT_BW_20:
3728 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3729 		break;
3730 	case IEEE80211_RADIOTAP_VHT_BW_40:
3731 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3732 		break;
3733 	case IEEE80211_RADIOTAP_VHT_BW_80:
3734 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3735 		break;
3736 	case IEEE80211_RADIOTAP_VHT_BW_160:
3737 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3738 		break;
3739 	}
3740 	rtap_len += 1;
3741 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3742 	rtap_len += 1;
3743 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3744 	rtap_len += 1;
3745 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3746 	rtap_len += 1;
3747 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3748 	rtap_len += 1;
3749 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3750 	rtap_len += 1;
3751 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3752 	rtap_len += 1;
3753 	put_unaligned_le16(rx_status->vht_flag_values6,
3754 			   &rtap_buf[rtap_len]);
3755 	rtap_len += 2;
3756 
3757 	return rtap_len;
3758 }
3759 
3760 /**
3761  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3762  * @rx_status: Pointer to rx_status.
3763  * @rtap_buf: buffer to which radiotap has to be updated
3764  * @rtap_len: radiotap length
3765  *
3766  * API update high-efficiency (11ax) fields in the radiotap header
3767  *
3768  * Return: length of rtap_len updated.
3769  */
3770 static unsigned int
3771 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3772 				     int8_t *rtap_buf, uint32_t rtap_len)
3773 {
3774 	/*
3775 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3776 	 * Enable all "known" HE radiotap flags for now
3777 	 */
3778 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3779 	rtap_len += 2;
3780 
3781 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3782 	rtap_len += 2;
3783 
3784 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3785 	rtap_len += 2;
3786 
3787 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3788 	rtap_len += 2;
3789 
3790 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3791 	rtap_len += 2;
3792 
3793 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3794 	rtap_len += 2;
3795 
3796 	return rtap_len;
3797 }
3798 
3799 
3800 /**
3801  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3802  * @rx_status: Pointer to rx_status.
3803  * @rtap_buf: buffer to which radiotap has to be updated
3804  * @rtap_len: radiotap length
3805  *
3806  * API update HE-MU fields in the radiotap header
3807  *
3808  * Return: length of rtap_len updated.
3809  */
3810 static unsigned int
3811 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3812 				     int8_t *rtap_buf, uint32_t rtap_len)
3813 {
3814 	/*
3815 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3816 	 * Enable all "known" he-mu radiotap flags for now
3817 	 */
3818 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3819 	rtap_len += 2;
3820 
3821 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3822 	rtap_len += 2;
3823 
3824 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3825 	rtap_len += 1;
3826 
3827 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3828 	rtap_len += 1;
3829 
3830 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3831 	rtap_len += 1;
3832 
3833 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3834 	rtap_len += 1;
3835 
3836 	return rtap_len;
3837 }
3838 
3839 /**
3840  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3841  * @rx_status: Pointer to rx_status.
3842  * @rtap_buf: buffer to which radiotap has to be updated
3843  * @rtap_len: radiotap length
3844  *
3845  * API update he-mu-other fields in the radiotap header
3846  *
3847  * Return: length of rtap_len updated.
3848  */
3849 static unsigned int
3850 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3851 				     int8_t *rtap_buf, uint32_t rtap_len)
3852 {
3853 	/*
3854 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3855 	 * Enable all "known" he-mu-other radiotap flags for now
3856 	 */
3857 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3858 	rtap_len += 2;
3859 
3860 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3861 	rtap_len += 2;
3862 
3863 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3864 	rtap_len += 1;
3865 
3866 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3867 	rtap_len += 1;
3868 
3869 	return rtap_len;
3870 }
3871 
3872 #define NORMALIZED_TO_NOISE_FLOOR (-96)
3873 
3874 /* This is the length for radiotap, combined length
3875  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3876  * cannot be more than available headroom_sz.
3877  * increase this when we add more radiotap elements.
3878  */
3879 
3880 #define RADIOTAP_VHT_FLAGS_LEN 12
3881 #define RADIOTAP_HE_FLAGS_LEN 12
3882 #define RADIOTAP_HE_MU_FLAGS_LEN 8
3883 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN 18
3884 #define RADIOTAP_FIXED_HEADER_LEN 16
3885 #define RADIOTAP_HT_FLAGS_LEN 3
3886 #define RADIOTAP_AMPDU_STATUS_LEN 8
3887 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3888 				RADIOTAP_FIXED_HEADER_LEN + \
3889 				RADIOTAP_HT_FLAGS_LEN + \
3890 				RADIOTAP_VHT_FLAGS_LEN + \
3891 				RADIOTAP_AMPDU_STATUS_LEN + \
3892 				RADIOTAP_HE_FLAGS_LEN + \
3893 				RADIOTAP_HE_MU_FLAGS_LEN + \
3894 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN)
3895 
3896 #define IEEE80211_RADIOTAP_HE 23
3897 #define IEEE80211_RADIOTAP_HE_MU	24
3898 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3899 
3900 /**
3901  * radiotap_num_to_freq() - Get frequency from chan number
3902  * @chan_num - Input channel number
3903  *
3904  * Return - Channel frequency in Mhz
3905  */
3906 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3907 {
3908 	if (chan_num == CHANNEL_NUM_14)
3909 		return CHANNEL_FREQ_2484;
3910 	if (chan_num < CHANNEL_NUM_14)
3911 		return CHANNEL_FREQ_2407 +
3912 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3913 
3914 	if (chan_num < CHANNEL_NUM_27)
3915 		return CHANNEL_FREQ_2512 +
3916 			((chan_num - CHANNEL_NUM_15) *
3917 			 FREQ_MULTIPLIER_CONST_20MHZ);
3918 
3919 	if (chan_num > CHANNEL_NUM_182 &&
3920 			chan_num < CHANNEL_NUM_197)
3921 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3922 			CHANNEL_FREQ_4000);
3923 
3924 	return CHANNEL_FREQ_5000 +
3925 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3926 }
3927 
3928 /**
3929  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
3930  * @rx_status: Pointer to rx_status.
3931  * @rtap_buf: Buf to which AMPDU info has to be updated.
3932  * @rtap_len: Current length of radiotap buffer
3933  *
3934  * Return: Length of radiotap after AMPDU flags updated.
3935  */
3936 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
3937 					struct mon_rx_status *rx_status,
3938 					uint8_t *rtap_buf,
3939 					uint32_t rtap_len)
3940 {
3941 	/*
3942 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
3943 	 * First 32 bits of AMPDU represents the reference number
3944 	 */
3945 
3946 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
3947 	uint16_t ampdu_flags = 0;
3948 	uint16_t ampdu_reserved_flags = 0;
3949 
3950 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
3951 	rtap_len += 4;
3952 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
3953 	rtap_len += 2;
3954 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
3955 	rtap_len += 2;
3956 
3957 	return rtap_len;
3958 }
3959 
3960 /**
3961  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
3962  * @rx_status: Pointer to rx_status.
3963  * @nbuf:      nbuf pointer to which radiotap has to be updated
3964  * @headroom_sz: Available headroom size.
3965  *
3966  * Return: length of rtap_len updated.
3967  */
3968 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
3969 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
3970 {
3971 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
3972 	struct ieee80211_radiotap_header *rthdr =
3973 		(struct ieee80211_radiotap_header *)rtap_buf;
3974 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
3975 	uint32_t rtap_len = rtap_hdr_len;
3976 
3977 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
3978 	rthdr->it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
3979 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
3980 	rtap_len += 8;
3981 
3982 	/* IEEE80211_RADIOTAP_FLAGS u8 */
3983 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS);
3984 
3985 	if (rx_status->rs_fcs_err)
3986 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3987 
3988 	rtap_buf[rtap_len] = rx_status->rtap_flags;
3989 	rtap_len += 1;
3990 
3991 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
3992 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
3993 	    !rx_status->he_flags) {
3994 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
3995 		rtap_buf[rtap_len] = rx_status->rate;
3996 	} else
3997 		rtap_buf[rtap_len] = 0;
3998 	rtap_len += 1;
3999 
4000 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4001 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
4002 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4003 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4004 	rtap_len += 2;
4005 	/* Channel flags. */
4006 	if (rx_status->chan_num > CHANNEL_NUM_35)
4007 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4008 	else
4009 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4010 	if (rx_status->cck_flag)
4011 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4012 	if (rx_status->ofdm_flag)
4013 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4014 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4015 	rtap_len += 2;
4016 
4017 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4018 	 *					(dBm)
4019 	 */
4020 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4021 	/*
4022 	 * rssi_comb is int dB, need to convert it to dBm.
4023 	 * normalize value to noise floor of -96 dBm
4024 	 */
4025 	rtap_buf[rtap_len] = rx_status->rssi_comb +
4026 		NORMALIZED_TO_NOISE_FLOOR;
4027 	rtap_len += 1;
4028 
4029 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4030 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA);
4031 	rtap_buf[rtap_len] = rx_status->nr_ant;
4032 	rtap_len += 1;
4033 
4034 	if (rx_status->ht_flags) {
4035 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4036 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
4037 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4038 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4039 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4040 		rtap_len += 1;
4041 
4042 		if (rx_status->sgi)
4043 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4044 		if (rx_status->bw)
4045 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4046 		else
4047 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4048 		rtap_len += 1;
4049 
4050 		rtap_buf[rtap_len] = rx_status->mcs;
4051 		rtap_len += 1;
4052 	}
4053 
4054 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4055 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4056 		rthdr->it_present |=
4057 			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4058 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4059 								rtap_buf,
4060 								rtap_len);
4061 	}
4062 
4063 	if (rx_status->vht_flags) {
4064 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4065 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
4066 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4067 							      rtap_buf,
4068 							      rtap_len);
4069 	}
4070 
4071 	if (rx_status->he_flags) {
4072 		/* IEEE80211_RADIOTAP_HE */
4073 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
4074 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4075 								rtap_buf,
4076 								rtap_len);
4077 	}
4078 
4079 	if (rx_status->he_mu_flags) {
4080 		/* IEEE80211_RADIOTAP_HE-MU */
4081 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
4082 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4083 								rtap_buf,
4084 								rtap_len);
4085 	}
4086 
4087 	if (rx_status->he_mu_other_flags) {
4088 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4089 		rthdr->it_present |=
4090 			cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4091 		rtap_len =
4092 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4093 								rtap_buf,
4094 								rtap_len);
4095 	}
4096 
4097 	rthdr->it_len = cpu_to_le16(rtap_len);
4098 
4099 	if (headroom_sz < rtap_len) {
4100 		qdf_print("ERROR: not enough space to update radiotap\n");
4101 		return 0;
4102 	}
4103 	qdf_nbuf_push_head(nbuf, rtap_len);
4104 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4105 	return rtap_len;
4106 }
4107 #else
4108 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4109 					struct mon_rx_status *rx_status,
4110 					int8_t *rtap_buf,
4111 					uint32_t rtap_len)
4112 {
4113 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4114 	return 0;
4115 }
4116 
4117 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4118 				      int8_t *rtap_buf, uint32_t rtap_len)
4119 {
4120 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4121 	return 0;
4122 }
4123 
4124 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4125 					struct mon_rx_status *rx_status,
4126 					uint8_t *rtap_buf,
4127 					uint32_t rtap_len)
4128 {
4129 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4130 	return 0;
4131 }
4132 
4133 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4134 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4135 {
4136 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4137 	return 0;
4138 }
4139 #endif
4140 qdf_export_symbol(qdf_nbuf_update_radiotap);
4141 
4142 /**
4143  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4144  * @cb_func_ptr: function pointer to the nbuf free callback
4145  *
4146  * This function registers a callback function for nbuf free.
4147  *
4148  * Return: none
4149  */
4150 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4151 {
4152 	nbuf_free_cb = cb_func_ptr;
4153 }
4154 
4155 /**
4156  * qdf_nbuf_classify_pkt() - classify packet
4157  * @skb - sk buff
4158  *
4159  * Return: none
4160  */
4161 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4162 {
4163 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4164 
4165 	/* check destination mac address is broadcast/multicast */
4166 	if (is_broadcast_ether_addr((uint8_t *)eh))
4167 		QDF_NBUF_CB_SET_BCAST(skb);
4168 	else if (is_multicast_ether_addr((uint8_t *)eh))
4169 		QDF_NBUF_CB_SET_MCAST(skb);
4170 
4171 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4172 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4173 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4174 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4175 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4176 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4177 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4178 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4179 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4180 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4181 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4182 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4183 }
4184 qdf_export_symbol(qdf_nbuf_classify_pkt);
4185 
4186 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4187 {
4188 	qdf_nbuf_users_set(&nbuf->users, 1);
4189 	nbuf->data = nbuf->head + NET_SKB_PAD;
4190 	skb_reset_tail_pointer(nbuf);
4191 }
4192 qdf_export_symbol(__qdf_nbuf_init);
4193 
4194 #ifdef WLAN_FEATURE_FASTPATH
4195 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4196 {
4197 	qdf_nbuf_users_set(&nbuf->users, 1);
4198 	nbuf->data = nbuf->head + NET_SKB_PAD;
4199 	skb_reset_tail_pointer(nbuf);
4200 }
4201 qdf_export_symbol(qdf_nbuf_init_fast);
4202 #endif /* WLAN_FEATURE_FASTPATH */
4203 
4204 
4205 #ifdef QDF_NBUF_GLOBAL_COUNT
4206 #ifdef WLAN_DEBUGFS
4207 /**
4208  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4209  *
4210  * Return void
4211  */
4212 void __qdf_nbuf_mod_init(void)
4213 {
4214 	qdf_atomic_init(&nbuf_count);
4215 	qdf_debugfs_init();
4216 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4217 }
4218 
4219 /**
4220  * __qdf_nbuf_mod_init() - Unintialization routine for qdf_nuf
4221  *
4222  * Return void
4223  */
4224 void __qdf_nbuf_mod_exit(void)
4225 {
4226 	qdf_debugfs_exit();
4227 }
4228 
4229 #else
4230 
4231 void __qdf_nbuf_mod_init(void)
4232 {
4233 	qdf_atomic_init(&nbuf_count);
4234 }
4235 
4236 void __qdf_nbuf_mod_exit(void)
4237 {
4238 }
4239 
4240 #endif
4241 #endif
4242