xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: qdf_nbuf.c
30  * QCA driver framework(QDF) network buffer management APIs
31  */
32 
33 #include <linux/hashtable.h>
34 #include <linux/kernel.h>
35 #include <linux/version.h>
36 #include <linux/skbuff.h>
37 #include <linux/module.h>
38 #include <linux/proc_fs.h>
39 #include <qdf_atomic.h>
40 #include <qdf_types.h>
41 #include <qdf_nbuf.h>
42 #include <qdf_mem.h>
43 #include <qdf_status.h>
44 #include <qdf_lock.h>
45 #include <qdf_trace.h>
46 #include <qdf_debugfs.h>
47 #include <net/ieee80211_radiotap.h>
48 #include <qdf_module.h>
49 #include <qdf_atomic.h>
50 #include <pld_common.h>
51 
52 #if defined(FEATURE_TSO)
53 #include <net/ipv6.h>
54 #include <linux/ipv6.h>
55 #include <linux/tcp.h>
56 #include <linux/if_vlan.h>
57 #include <linux/ip.h>
58 #endif /* FEATURE_TSO */
59 
60 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
61 
62 #define qdf_nbuf_users_inc atomic_inc
63 #define qdf_nbuf_users_dec atomic_dec
64 #define qdf_nbuf_users_set atomic_set
65 #define qdf_nbuf_users_read atomic_read
66 #else
67 #define qdf_nbuf_users_inc refcount_inc
68 #define qdf_nbuf_users_dec refcount_dec
69 #define qdf_nbuf_users_set refcount_set
70 #define qdf_nbuf_users_read refcount_read
71 #endif /* KERNEL_VERSION(4, 13, 0) */
72 
73 #define IEEE80211_RADIOTAP_VHT_BW_20	0
74 #define IEEE80211_RADIOTAP_VHT_BW_40	1
75 #define IEEE80211_RADIOTAP_VHT_BW_80	2
76 #define IEEE80211_RADIOTAP_VHT_BW_160	3
77 
78 #define RADIOTAP_VHT_BW_20	0
79 #define RADIOTAP_VHT_BW_40	1
80 #define RADIOTAP_VHT_BW_80	4
81 #define RADIOTAP_VHT_BW_160	11
82 
83 /* channel number to freq conversion */
84 #define CHANNEL_NUM_14 14
85 #define CHANNEL_NUM_15 15
86 #define CHANNEL_NUM_27 27
87 #define CHANNEL_NUM_35 35
88 #define CHANNEL_NUM_182 182
89 #define CHANNEL_NUM_197 197
90 #define CHANNEL_FREQ_2484 2484
91 #define CHANNEL_FREQ_2407 2407
92 #define CHANNEL_FREQ_2512 2512
93 #define CHANNEL_FREQ_5000 5000
94 #define CHANNEL_FREQ_4000 4000
95 #define FREQ_MULTIPLIER_CONST_5MHZ 5
96 #define FREQ_MULTIPLIER_CONST_20MHZ 20
97 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
98 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
99 #define RADIOTAP_CCK_CHANNEL 0x0020
100 #define RADIOTAP_OFDM_CHANNEL 0x0040
101 
102 #ifdef CONFIG_MCL
103 #include <qdf_mc_timer.h>
104 
105 struct qdf_track_timer {
106 	qdf_mc_timer_t track_timer;
107 	qdf_atomic_t alloc_fail_cnt;
108 };
109 
110 static struct qdf_track_timer alloc_track_timer;
111 
112 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
113 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
114 #endif
115 
116 /* Packet Counter */
117 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
118 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
119 #ifdef QDF_NBUF_GLOBAL_COUNT
120 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
121 static qdf_atomic_t nbuf_count;
122 #endif
123 
124 /**
125  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
126  *
127  * Return: none
128  */
129 void qdf_nbuf_tx_desc_count_display(void)
130 {
131 	qdf_print("Current Snapshot of the Driver:\n");
132 	qdf_print("Data Packets:\n");
133 	qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
134 		nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
135 		(nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
136 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
137 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
138 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
139 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
140 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
141 			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
142 		nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
143 			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
144 	qdf_print(" HTC %d  HIF %d CE %d TX_COMP %d\n",
145 		nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
146 			nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
147 		nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
148 			 nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
149 		nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
150 			 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
151 		nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
152 	qdf_print("Mgmt Packets:\n");
153 	qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
154 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
155 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
156 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
157 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
158 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
159 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
160 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
161 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
162 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
163 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
164 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
165 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
166 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
167 }
168 EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_display);
169 
170 /**
171  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
172  * @packet_type   : packet type either mgmt/data
173  * @current_state : layer at which the packet currently present
174  *
175  * Return: none
176  */
177 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
178 			uint8_t current_state)
179 {
180 	switch (packet_type) {
181 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
182 		nbuf_tx_mgmt[current_state]++;
183 		break;
184 	case QDF_NBUF_TX_PKT_DATA_TRACK:
185 		nbuf_tx_data[current_state]++;
186 		break;
187 	default:
188 		break;
189 	}
190 }
191 EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_update);
192 
193 /**
194  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
195  *
196  * Return: none
197  */
198 void qdf_nbuf_tx_desc_count_clear(void)
199 {
200 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
201 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
202 }
203 EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_clear);
204 
205 /**
206  * qdf_nbuf_set_state() - Updates the packet state
207  * @nbuf:            network buffer
208  * @current_state :  layer at which the packet currently is
209  *
210  * This function updates the packet state to the layer at which the packet
211  * currently is
212  *
213  * Return: none
214  */
215 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
216 {
217 	/*
218 	 * Only Mgmt, Data Packets are tracked. WMI messages
219 	 * such as scan commands are not tracked
220 	 */
221 	uint8_t packet_type;
222 
223 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
224 
225 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
226 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
227 		return;
228 	}
229 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
230 	qdf_nbuf_tx_desc_count_update(packet_type,
231 					current_state);
232 }
233 EXPORT_SYMBOL(qdf_nbuf_set_state);
234 
235 #ifdef CONFIG_MCL
236 /**
237  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
238  *
239  * This function starts the alloc fail replenish timer.
240  *
241  * Return: void
242  */
243 static void __qdf_nbuf_start_replenish_timer(void)
244 {
245 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
246 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
247 	    QDF_TIMER_STATE_RUNNING)
248 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
249 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
250 }
251 
252 /**
253  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
254  *
255  * This function stops the alloc fail replenish timer.
256  *
257  * Return: void
258  */
259 static void __qdf_nbuf_stop_replenish_timer(void)
260 {
261 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
262 		return;
263 
264 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
265 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
266 	    QDF_TIMER_STATE_RUNNING)
267 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
268 }
269 
270 /**
271  * qdf_replenish_expire_handler - Replenish expire handler
272  *
273  * This function triggers when the alloc fail replenish timer expires.
274  *
275  * Return: void
276  */
277 static void qdf_replenish_expire_handler(void *arg)
278 {
279 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
280 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
281 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
282 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
283 
284 		/* Error handling here */
285 	}
286 }
287 
288 /**
289  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
290  *
291  * This function initializes the nbuf alloc fail replenish timer.
292  *
293  * Return: void
294  */
295 void __qdf_nbuf_init_replenish_timer(void)
296 {
297 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
298 			  qdf_replenish_expire_handler, NULL);
299 }
300 
301 /**
302  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
303  *
304  * This function deinitializes the nbuf alloc fail replenish timer.
305  *
306  * Return: void
307  */
308 void __qdf_nbuf_deinit_replenish_timer(void)
309 {
310 	__qdf_nbuf_stop_replenish_timer();
311 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
312 }
313 #else
314 
315 static inline void __qdf_nbuf_start_replenish_timer(void) {}
316 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
317 #endif
318 
319 /* globals do not need to be initialized to NULL/0 */
320 qdf_nbuf_trace_update_t qdf_trace_update_cb;
321 qdf_nbuf_free_t nbuf_free_cb;
322 
323 #ifdef QDF_NBUF_GLOBAL_COUNT
324 
325 /**
326  * __qdf_nbuf_count_get() - get nbuf global count
327  *
328  * Return: nbuf global count
329  */
330 int __qdf_nbuf_count_get(void)
331 {
332 	return qdf_atomic_read(&nbuf_count);
333 }
334 EXPORT_SYMBOL(__qdf_nbuf_count_get);
335 
336 /**
337  * __qdf_nbuf_count_inc() - increment nbuf global count
338  *
339  * @buf: sk buff
340  *
341  * Return: void
342  */
343 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
344 {
345 	qdf_atomic_inc(&nbuf_count);
346 }
347 EXPORT_SYMBOL(__qdf_nbuf_count_inc);
348 
349 /**
350  * __qdf_nbuf_count_dec() - decrement nbuf global count
351  *
352  * @buf: sk buff
353  *
354  * Return: void
355  */
356 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
357 {
358 	qdf_atomic_dec(&nbuf_count);
359 }
360 EXPORT_SYMBOL(__qdf_nbuf_count_dec);
361 #endif
362 
363 
364 /**
365  * __qdf_nbuf_alloc() - Allocate nbuf
366  * @hdl: Device handle
367  * @size: Netbuf requested size
368  * @reserve: headroom to start with
369  * @align: Align
370  * @prio: Priority
371  *
372  * This allocates an nbuf aligns if needed and reserves some space in the front,
373  * since the reserve is done after alignment the reserve value if being
374  * unaligned will result in an unaligned address.
375  *
376  * Return: nbuf or %NULL if no memory
377  */
378 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
379 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
380 			 int align, int prio)
381 {
382 	struct sk_buff *skb;
383 	unsigned long offset;
384 	uint32_t lowmem_alloc_tries = 0;
385 
386 	if (align)
387 		size += (align - 1);
388 
389 realloc:
390 	skb = dev_alloc_skb(size);
391 
392 	if (skb)
393 		goto skb_alloc;
394 
395 	skb = pld_nbuf_pre_alloc(size);
396 
397 	if (!skb) {
398 		pr_info("ERROR:NBUF alloc failed\n");
399 		return NULL;
400 	}
401 
402 skb_alloc:
403 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
404 	 * Though we are trying to reserve low memory upfront to prevent this,
405 	 * we sometimes see SKBs allocated from low memory.
406 	 */
407 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
408 		lowmem_alloc_tries++;
409 		if (lowmem_alloc_tries > 100) {
410 			qdf_print("%s Failed \n",__func__);
411 			return NULL;
412 		} else {
413 			/* Not freeing to make sure it
414 			 * will not get allocated again
415 			 */
416 			goto realloc;
417 		}
418 	}
419 	memset(skb->cb, 0x0, sizeof(skb->cb));
420 
421 	/*
422 	 * The default is for netbuf fragments to be interpreted
423 	 * as wordstreams rather than bytestreams.
424 	 */
425 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
426 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
427 
428 	/*
429 	 * XXX:how about we reserve first then align
430 	 * Align & make sure that the tail & data are adjusted properly
431 	 */
432 
433 	if (align) {
434 		offset = ((unsigned long)skb->data) % align;
435 		if (offset)
436 			skb_reserve(skb, align - offset);
437 	}
438 
439 	/*
440 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
441 	 * pointer
442 	 */
443 	skb_reserve(skb, reserve);
444 	qdf_nbuf_count_inc(skb);
445 
446 	return skb;
447 }
448 #else
449 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
450 			 int align, int prio)
451 {
452 	struct sk_buff *skb;
453 	unsigned long offset;
454 	int flags = GFP_KERNEL;
455 
456 	if (align)
457 		size += (align - 1);
458 
459 	if (in_interrupt() || irqs_disabled() || in_atomic())
460 		flags = GFP_ATOMIC;
461 
462 	skb = __netdev_alloc_skb(NULL, size, flags);
463 
464 	if (skb)
465 		goto skb_alloc;
466 
467 	skb = pld_nbuf_pre_alloc(size);
468 
469 	if (!skb) {
470 		pr_err_ratelimited("ERROR:NBUF alloc failed, size = %zu\n",
471 				   size);
472 		__qdf_nbuf_start_replenish_timer();
473 		return NULL;
474 	} else {
475 		__qdf_nbuf_stop_replenish_timer();
476 	}
477 
478 skb_alloc:
479 	memset(skb->cb, 0x0, sizeof(skb->cb));
480 
481 	/*
482 	 * The default is for netbuf fragments to be interpreted
483 	 * as wordstreams rather than bytestreams.
484 	 */
485 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
486 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
487 
488 	/*
489 	 * XXX:how about we reserve first then align
490 	 * Align & make sure that the tail & data are adjusted properly
491 	 */
492 
493 	if (align) {
494 		offset = ((unsigned long)skb->data) % align;
495 		if (offset)
496 			skb_reserve(skb, align - offset);
497 	}
498 
499 	/*
500 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
501 	 * pointer
502 	 */
503 	skb_reserve(skb, reserve);
504 	qdf_nbuf_count_inc(skb);
505 
506 	return skb;
507 }
508 #endif
509 EXPORT_SYMBOL(__qdf_nbuf_alloc);
510 
511 /**
512  * __qdf_nbuf_free() - free the nbuf its interrupt safe
513  * @skb: Pointer to network buffer
514  *
515  * Return: none
516  */
517 
518 #ifdef CONFIG_MCL
519 void __qdf_nbuf_free(struct sk_buff *skb)
520 {
521 	if (pld_nbuf_pre_alloc_free(skb))
522 		return;
523 
524 	qdf_nbuf_count_dec(skb);
525 	if (nbuf_free_cb)
526 		nbuf_free_cb(skb);
527 	else
528 		dev_kfree_skb_any(skb);
529 }
530 #else
531 void __qdf_nbuf_free(struct sk_buff *skb)
532 {
533 	if (pld_nbuf_pre_alloc_free(skb))
534 		return;
535 
536 	qdf_nbuf_count_dec(skb);
537 	dev_kfree_skb_any(skb);
538 }
539 #endif
540 
541 EXPORT_SYMBOL(__qdf_nbuf_free);
542 
543 #ifdef MEMORY_DEBUG
544 enum qdf_nbuf_event_type {
545 	QDF_NBUF_ALLOC,
546 	QDF_NBUF_FREE,
547 	QDF_NBUF_MAP,
548 	QDF_NBUF_UNMAP,
549 };
550 
551 struct qdf_nbuf_event {
552 	qdf_nbuf_t nbuf;
553 	const char *file;
554 	uint32_t line;
555 	enum qdf_nbuf_event_type type;
556 	uint64_t timestamp;
557 };
558 
559 #define QDF_NBUF_HISTORY_SIZE 4096
560 static qdf_atomic_t qdf_nbuf_history_index;
561 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
562 
563 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
564 {
565 	int32_t next = qdf_atomic_inc_return(index);
566 
567 	if (next == size)
568 		qdf_atomic_sub(size, index);
569 
570 	return next % size;
571 }
572 
573 static void
574 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
575 		     enum qdf_nbuf_event_type type)
576 {
577 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
578 						   QDF_NBUF_HISTORY_SIZE);
579 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
580 
581 	event->nbuf = nbuf;
582 	event->file = file;
583 	event->line = line;
584 	event->type = type;
585 	event->timestamp = qdf_get_log_timestamp();
586 }
587 
588 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
589 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
590 static qdf_spinlock_t qdf_nbuf_map_lock;
591 
592 struct qdf_nbuf_map_metadata {
593 	struct hlist_node node;
594 	qdf_nbuf_t nbuf;
595 	const char *file;
596 	uint32_t line;
597 };
598 
599 static void qdf_nbuf_map_tracking_init(void)
600 {
601 	hash_init(qdf_nbuf_map_ht);
602 	qdf_spinlock_create(&qdf_nbuf_map_lock);
603 }
604 
605 void qdf_nbuf_map_check_for_leaks(void)
606 {
607 	struct qdf_nbuf_map_metadata *meta;
608 	int bucket;
609 	uint32_t count = 0;
610 	bool is_empty;
611 
612 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
613 	is_empty = hash_empty(qdf_nbuf_map_ht);
614 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
615 
616 	if (is_empty)
617 		return;
618 
619 	qdf_err("Nbuf map without unmap events detected!");
620 	qdf_err("------------------------------------------------------------");
621 
622 	/* Hold the lock for the entire iteration for safe list/meta access. We
623 	 * are explicitly preferring the chance to watchdog on the print, over
624 	 * the posibility of invalid list/memory access. Since we are going to
625 	 * panic anyway, the worst case is loading up the crash dump to find out
626 	 * what was in the hash table.
627 	 */
628 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
629 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
630 		count++;
631 		qdf_err("0x%pk @ %s:%u",
632 			meta->nbuf, kbasename(meta->file), meta->line);
633 	}
634 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
635 
636 	panic("%u fatal nbuf map without unmap events detected!", count);
637 }
638 
639 static void qdf_nbuf_map_tracking_deinit(void)
640 {
641 	qdf_nbuf_map_check_for_leaks();
642 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
643 }
644 
645 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
646 {
647 	struct qdf_nbuf_map_metadata *meta;
648 
649 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
650 		if (meta->nbuf == nbuf)
651 			return meta;
652 	}
653 
654 	return NULL;
655 }
656 
657 static QDF_STATUS
658 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
659 {
660 	struct qdf_nbuf_map_metadata *meta;
661 
662 	QDF_BUG(nbuf);
663 	if (!nbuf) {
664 		qdf_err("Cannot map null nbuf");
665 		return QDF_STATUS_E_INVAL;
666 	}
667 
668 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
669 	meta = qdf_nbuf_meta_get(nbuf);
670 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
671 	if (meta)
672 		panic("Double nbuf map detected @ %s:%u",
673 		      kbasename(file), line);
674 
675 	meta = qdf_mem_malloc(sizeof(*meta));
676 	if (!meta) {
677 		qdf_err("Failed to allocate nbuf map tracking metadata");
678 		return QDF_STATUS_E_NOMEM;
679 	}
680 
681 	meta->nbuf = nbuf;
682 	meta->file = file;
683 	meta->line = line;
684 
685 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
686 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
687 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
688 
689 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
690 
691 	return QDF_STATUS_SUCCESS;
692 }
693 
694 static void
695 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
696 {
697 	struct qdf_nbuf_map_metadata *meta;
698 
699 	QDF_BUG(nbuf);
700 	if (!nbuf) {
701 		qdf_err("Cannot unmap null nbuf");
702 		return;
703 	}
704 
705 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
706 	meta = qdf_nbuf_meta_get(nbuf);
707 
708 	if (!meta)
709 		panic("Double nbuf unmap or unmap without map detected @%s:%u",
710 		      kbasename(file), line);
711 
712 	hash_del(&meta->node);
713 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
714 
715 	qdf_mem_free(meta);
716 
717 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
718 }
719 
720 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
721 			      qdf_nbuf_t buf,
722 			      qdf_dma_dir_t dir,
723 			      const char *file,
724 			      uint32_t line)
725 {
726 	QDF_STATUS status;
727 
728 	status = qdf_nbuf_track_map(buf, file, line);
729 	if (QDF_IS_STATUS_ERROR(status))
730 		return status;
731 
732 	status = __qdf_nbuf_map(osdev, buf, dir);
733 	if (QDF_IS_STATUS_ERROR(status))
734 		qdf_nbuf_untrack_map(buf, file, line);
735 
736 	return status;
737 }
738 
739 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
740 			  qdf_nbuf_t buf,
741 			  qdf_dma_dir_t dir,
742 			  const char *file,
743 			  uint32_t line)
744 {
745 	qdf_nbuf_untrack_map(buf, file, line);
746 	__qdf_nbuf_unmap_single(osdev, buf, dir);
747 }
748 
749 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
750 				     qdf_nbuf_t buf,
751 				     qdf_dma_dir_t dir,
752 				     const char *file,
753 				     uint32_t line)
754 {
755 	QDF_STATUS status;
756 
757 	status = qdf_nbuf_track_map(buf, file, line);
758 	if (QDF_IS_STATUS_ERROR(status))
759 		return status;
760 
761 	status = __qdf_nbuf_map_single(osdev, buf, dir);
762 	if (QDF_IS_STATUS_ERROR(status))
763 		qdf_nbuf_untrack_map(buf, file, line);
764 
765 	return status;
766 }
767 
768 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
769 				 qdf_nbuf_t buf,
770 				 qdf_dma_dir_t dir,
771 				 const char *file,
772 				 uint32_t line)
773 {
774 	qdf_nbuf_untrack_map(buf, file, line);
775 	__qdf_nbuf_unmap_single(osdev, buf, dir);
776 }
777 
778 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
779 				     qdf_nbuf_t buf,
780 				     qdf_dma_dir_t dir,
781 				     int nbytes,
782 				     const char *file,
783 				     uint32_t line)
784 {
785 	QDF_STATUS status;
786 
787 	status = qdf_nbuf_track_map(buf, file, line);
788 	if (QDF_IS_STATUS_ERROR(status))
789 		return status;
790 
791 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
792 	if (QDF_IS_STATUS_ERROR(status))
793 		qdf_nbuf_untrack_map(buf, file, line);
794 
795 	return status;
796 }
797 
798 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
799 				 qdf_nbuf_t buf,
800 				 qdf_dma_dir_t dir,
801 				 int nbytes,
802 				 const char *file,
803 				 uint32_t line)
804 {
805 	qdf_nbuf_untrack_map(buf, file, line);
806 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
807 }
808 
809 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
810 					    qdf_nbuf_t buf,
811 					    qdf_dma_dir_t dir,
812 					    int nbytes,
813 					    const char *file,
814 					    uint32_t line)
815 {
816 	QDF_STATUS status;
817 
818 	status = qdf_nbuf_track_map(buf, file, line);
819 	if (QDF_IS_STATUS_ERROR(status))
820 		return status;
821 
822 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
823 	if (QDF_IS_STATUS_ERROR(status))
824 		qdf_nbuf_untrack_map(buf, file, line);
825 
826 	return status;
827 }
828 
829 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
830 					qdf_nbuf_t buf,
831 					qdf_dma_dir_t dir,
832 					int nbytes,
833 					const char *file,
834 					uint32_t line)
835 {
836 	qdf_nbuf_untrack_map(buf, file, line);
837 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
838 }
839 #endif /* MEMORY_DEBUG */
840 
841 /**
842  * __qdf_nbuf_map() - map a buffer to local bus address space
843  * @osdev: OS device
844  * @bmap: Bitmap
845  * @skb: Pointer to network buffer
846  * @dir: Direction
847  *
848  * Return: QDF_STATUS
849  */
850 #ifdef QDF_OS_DEBUG
851 QDF_STATUS
852 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
853 {
854 	struct skb_shared_info *sh = skb_shinfo(skb);
855 
856 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
857 			|| (dir == QDF_DMA_FROM_DEVICE));
858 
859 	/*
860 	 * Assume there's only a single fragment.
861 	 * To support multiple fragments, it would be necessary to change
862 	 * qdf_nbuf_t to be a separate object that stores meta-info
863 	 * (including the bus address for each fragment) and a pointer
864 	 * to the underlying sk_buff.
865 	 */
866 	qdf_assert(sh->nr_frags == 0);
867 
868 	return __qdf_nbuf_map_single(osdev, skb, dir);
869 }
870 EXPORT_SYMBOL(__qdf_nbuf_map);
871 
872 #else
873 QDF_STATUS
874 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
875 {
876 	return __qdf_nbuf_map_single(osdev, skb, dir);
877 }
878 EXPORT_SYMBOL(__qdf_nbuf_map);
879 #endif
880 /**
881  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
882  * @osdev: OS device
883  * @skb: Pointer to network buffer
884  * @dir: dma direction
885  *
886  * Return: none
887  */
888 void
889 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
890 			qdf_dma_dir_t dir)
891 {
892 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
893 		   || (dir == QDF_DMA_FROM_DEVICE));
894 
895 	/*
896 	 * Assume there's a single fragment.
897 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
898 	 */
899 	__qdf_nbuf_unmap_single(osdev, skb, dir);
900 }
901 EXPORT_SYMBOL(__qdf_nbuf_unmap);
902 
903 /**
904  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
905  * @osdev: OS device
906  * @skb: Pointer to network buffer
907  * @dir: Direction
908  *
909  * Return: QDF_STATUS
910  */
911 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
912 QDF_STATUS
913 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
914 {
915 	qdf_dma_addr_t paddr;
916 
917 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
918 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
919 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
920 	return QDF_STATUS_SUCCESS;
921 }
922 EXPORT_SYMBOL(__qdf_nbuf_map_single);
923 #else
924 QDF_STATUS
925 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
926 {
927 	qdf_dma_addr_t paddr;
928 
929 	/* assume that the OS only provides a single fragment */
930 	QDF_NBUF_CB_PADDR(buf) = paddr =
931 		dma_map_single(osdev->dev, buf->data,
932 				skb_end_pointer(buf) - buf->data,
933 				__qdf_dma_dir_to_os(dir));
934 	return dma_mapping_error(osdev->dev, paddr)
935 		? QDF_STATUS_E_FAILURE
936 		: QDF_STATUS_SUCCESS;
937 }
938 EXPORT_SYMBOL(__qdf_nbuf_map_single);
939 #endif
940 /**
941  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
942  * @osdev: OS device
943  * @skb: Pointer to network buffer
944  * @dir: Direction
945  *
946  * Return: none
947  */
948 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
949 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
950 				qdf_dma_dir_t dir)
951 {
952 }
953 #else
954 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
955 					qdf_dma_dir_t dir)
956 {
957 	if (QDF_NBUF_CB_PADDR(buf))
958 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
959 			skb_end_pointer(buf) - buf->data,
960 			__qdf_dma_dir_to_os(dir));
961 }
962 #endif
963 EXPORT_SYMBOL(__qdf_nbuf_unmap_single);
964 
965 /**
966  * __qdf_nbuf_set_rx_cksum() - set rx checksum
967  * @skb: Pointer to network buffer
968  * @cksum: Pointer to checksum value
969  *
970  * Return: QDF_STATUS
971  */
972 QDF_STATUS
973 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
974 {
975 	switch (cksum->l4_result) {
976 	case QDF_NBUF_RX_CKSUM_NONE:
977 		skb->ip_summed = CHECKSUM_NONE;
978 		break;
979 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
980 		skb->ip_summed = CHECKSUM_UNNECESSARY;
981 		break;
982 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
983 		skb->ip_summed = CHECKSUM_PARTIAL;
984 		skb->csum = cksum->val;
985 		break;
986 	default:
987 		pr_err("Unknown checksum type\n");
988 		qdf_assert(0);
989 		return QDF_STATUS_E_NOSUPPORT;
990 	}
991 	return QDF_STATUS_SUCCESS;
992 }
993 EXPORT_SYMBOL(__qdf_nbuf_set_rx_cksum);
994 
995 /**
996  * __qdf_nbuf_get_tx_cksum() - get tx checksum
997  * @skb: Pointer to network buffer
998  *
999  * Return: TX checksum value
1000  */
1001 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1002 {
1003 	switch (skb->ip_summed) {
1004 	case CHECKSUM_NONE:
1005 		return QDF_NBUF_TX_CKSUM_NONE;
1006 	case CHECKSUM_PARTIAL:
1007 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1008 	case CHECKSUM_COMPLETE:
1009 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1010 	default:
1011 		return QDF_NBUF_TX_CKSUM_NONE;
1012 	}
1013 }
1014 EXPORT_SYMBOL(__qdf_nbuf_get_tx_cksum);
1015 
1016 /**
1017  * __qdf_nbuf_get_tid() - get tid
1018  * @skb: Pointer to network buffer
1019  *
1020  * Return: tid
1021  */
1022 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1023 {
1024 	return skb->priority;
1025 }
1026 EXPORT_SYMBOL(__qdf_nbuf_get_tid);
1027 
1028 /**
1029  * __qdf_nbuf_set_tid() - set tid
1030  * @skb: Pointer to network buffer
1031  *
1032  * Return: none
1033  */
1034 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1035 {
1036 	skb->priority = tid;
1037 }
1038 EXPORT_SYMBOL(__qdf_nbuf_set_tid);
1039 
1040 /**
1041  * __qdf_nbuf_set_tid() - set tid
1042  * @skb: Pointer to network buffer
1043  *
1044  * Return: none
1045  */
1046 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1047 {
1048 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1049 }
1050 EXPORT_SYMBOL(__qdf_nbuf_get_exemption_type);
1051 
1052 /**
1053  * __qdf_nbuf_reg_trace_cb() - register trace callback
1054  * @cb_func_ptr: Pointer to trace callback function
1055  *
1056  * Return: none
1057  */
1058 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1059 {
1060 	qdf_trace_update_cb = cb_func_ptr;
1061 }
1062 EXPORT_SYMBOL(__qdf_nbuf_reg_trace_cb);
1063 
1064 /**
1065  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1066  *              of DHCP packet.
1067  * @data: Pointer to DHCP packet data buffer
1068  *
1069  * This func. returns the subtype of DHCP packet.
1070  *
1071  * Return: subtype of the DHCP packet.
1072  */
1073 enum qdf_proto_subtype
1074 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1075 {
1076 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1077 
1078 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1079 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1080 					QDF_DHCP_OPTION53_LENGTH)) {
1081 
1082 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1083 		case QDF_DHCP_DISCOVER:
1084 			subtype = QDF_PROTO_DHCP_DISCOVER;
1085 			break;
1086 		case QDF_DHCP_REQUEST:
1087 			subtype = QDF_PROTO_DHCP_REQUEST;
1088 			break;
1089 		case QDF_DHCP_OFFER:
1090 			subtype = QDF_PROTO_DHCP_OFFER;
1091 			break;
1092 		case QDF_DHCP_ACK:
1093 			subtype = QDF_PROTO_DHCP_ACK;
1094 			break;
1095 		case QDF_DHCP_NAK:
1096 			subtype = QDF_PROTO_DHCP_NACK;
1097 			break;
1098 		case QDF_DHCP_RELEASE:
1099 			subtype = QDF_PROTO_DHCP_RELEASE;
1100 			break;
1101 		case QDF_DHCP_INFORM:
1102 			subtype = QDF_PROTO_DHCP_INFORM;
1103 			break;
1104 		case QDF_DHCP_DECLINE:
1105 			subtype = QDF_PROTO_DHCP_DECLINE;
1106 			break;
1107 		default:
1108 			break;
1109 		}
1110 	}
1111 
1112 	return subtype;
1113 }
1114 
1115 /**
1116  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1117  *            of EAPOL packet.
1118  * @data: Pointer to EAPOL packet data buffer
1119  *
1120  * This func. returns the subtype of EAPOL packet.
1121  *
1122  * Return: subtype of the EAPOL packet.
1123  */
1124 enum qdf_proto_subtype
1125 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1126 {
1127 	uint16_t eapol_key_info;
1128 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1129 	uint16_t mask;
1130 
1131 	eapol_key_info = (uint16_t)(*(uint16_t *)
1132 			(data + EAPOL_KEY_INFO_OFFSET));
1133 
1134 	mask = eapol_key_info & EAPOL_MASK;
1135 	switch (mask) {
1136 	case EAPOL_M1_BIT_MASK:
1137 		subtype = QDF_PROTO_EAPOL_M1;
1138 		break;
1139 	case EAPOL_M2_BIT_MASK:
1140 		subtype = QDF_PROTO_EAPOL_M2;
1141 		break;
1142 	case EAPOL_M3_BIT_MASK:
1143 		subtype = QDF_PROTO_EAPOL_M3;
1144 		break;
1145 	case EAPOL_M4_BIT_MASK:
1146 		subtype = QDF_PROTO_EAPOL_M4;
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 
1152 	return subtype;
1153 }
1154 
1155 /**
1156  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1157  *            of ARP packet.
1158  * @data: Pointer to ARP packet data buffer
1159  *
1160  * This func. returns the subtype of ARP packet.
1161  *
1162  * Return: subtype of the ARP packet.
1163  */
1164 enum qdf_proto_subtype
1165 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1166 {
1167 	uint16_t subtype;
1168 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1169 
1170 	subtype = (uint16_t)(*(uint16_t *)
1171 			(data + ARP_SUB_TYPE_OFFSET));
1172 
1173 	switch (QDF_SWAP_U16(subtype)) {
1174 	case ARP_REQUEST:
1175 		proto_subtype = QDF_PROTO_ARP_REQ;
1176 		break;
1177 	case ARP_RESPONSE:
1178 		proto_subtype = QDF_PROTO_ARP_RES;
1179 		break;
1180 	default:
1181 		break;
1182 	}
1183 
1184 	return proto_subtype;
1185 }
1186 
1187 /**
1188  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1189  *            of IPV4 ICMP packet.
1190  * @data: Pointer to IPV4 ICMP packet data buffer
1191  *
1192  * This func. returns the subtype of ICMP packet.
1193  *
1194  * Return: subtype of the ICMP packet.
1195  */
1196 enum qdf_proto_subtype
1197 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1198 {
1199 	uint8_t subtype;
1200 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1201 
1202 	subtype = (uint8_t)(*(uint8_t *)
1203 			(data + ICMP_SUBTYPE_OFFSET));
1204 
1205 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
1206 		"ICMP proto type: 0x%02x", subtype);
1207 
1208 	switch (subtype) {
1209 	case ICMP_REQUEST:
1210 		proto_subtype = QDF_PROTO_ICMP_REQ;
1211 		break;
1212 	case ICMP_RESPONSE:
1213 		proto_subtype = QDF_PROTO_ICMP_RES;
1214 		break;
1215 	default:
1216 		break;
1217 	}
1218 
1219 	return proto_subtype;
1220 }
1221 
1222 /**
1223  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1224  *            of IPV6 ICMPV6 packet.
1225  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1226  *
1227  * This func. returns the subtype of ICMPV6 packet.
1228  *
1229  * Return: subtype of the ICMPV6 packet.
1230  */
1231 enum qdf_proto_subtype
1232 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1233 {
1234 	uint8_t subtype;
1235 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1236 
1237 	subtype = (uint8_t)(*(uint8_t *)
1238 			(data + ICMPV6_SUBTYPE_OFFSET));
1239 
1240 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
1241 		"ICMPv6 proto type: 0x%02x", subtype);
1242 
1243 	switch (subtype) {
1244 	case ICMPV6_REQUEST:
1245 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1246 		break;
1247 	case ICMPV6_RESPONSE:
1248 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1249 		break;
1250 	case ICMPV6_RS:
1251 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1252 		break;
1253 	case ICMPV6_RA:
1254 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1255 		break;
1256 	case ICMPV6_NS:
1257 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1258 		break;
1259 	case ICMPV6_NA:
1260 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1261 		break;
1262 	default:
1263 		break;
1264 	}
1265 
1266 	return proto_subtype;
1267 }
1268 
1269 /**
1270  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1271  *            of IPV4 packet.
1272  * @data: Pointer to IPV4 packet data buffer
1273  *
1274  * This func. returns the proto type of IPV4 packet.
1275  *
1276  * Return: proto type of IPV4 packet.
1277  */
1278 uint8_t
1279 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1280 {
1281 	uint8_t proto_type;
1282 
1283 	proto_type = (uint8_t)(*(uint8_t *)(data +
1284 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1285 	return proto_type;
1286 }
1287 
1288 /**
1289  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1290  *            of IPV6 packet.
1291  * @data: Pointer to IPV6 packet data buffer
1292  *
1293  * This func. returns the proto type of IPV6 packet.
1294  *
1295  * Return: proto type of IPV6 packet.
1296  */
1297 uint8_t
1298 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1299 {
1300 	uint8_t proto_type;
1301 
1302 	proto_type = (uint8_t)(*(uint8_t *)(data +
1303 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1304 	return proto_type;
1305 }
1306 
1307 /**
1308  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1309  * @data: Pointer to network data
1310  *
1311  * This api is for Tx packets.
1312  *
1313  * Return: true if packet is ipv4 packet
1314  *	   false otherwise
1315  */
1316 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1317 {
1318 	uint16_t ether_type;
1319 
1320 	ether_type = (uint16_t)(*(uint16_t *)(data +
1321 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1322 
1323 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1324 		return true;
1325 	else
1326 		return false;
1327 }
1328 EXPORT_SYMBOL(__qdf_nbuf_data_is_ipv4_pkt);
1329 
1330 /**
1331  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1332  * @data: Pointer to network data buffer
1333  *
1334  * This api is for ipv4 packet.
1335  *
1336  * Return: true if packet is DHCP packet
1337  *	   false otherwise
1338  */
1339 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1340 {
1341 	uint16_t sport;
1342 	uint16_t dport;
1343 
1344 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1345 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1346 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1347 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1348 					 sizeof(uint16_t)));
1349 
1350 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1351 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1352 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1353 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1354 		return true;
1355 	else
1356 		return false;
1357 }
1358 EXPORT_SYMBOL(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1359 
1360 /**
1361  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1362  * @data: Pointer to network data buffer
1363  *
1364  * This api is for ipv4 packet.
1365  *
1366  * Return: true if packet is EAPOL packet
1367  *	   false otherwise.
1368  */
1369 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1370 {
1371 	uint16_t ether_type;
1372 
1373 	ether_type = (uint16_t)(*(uint16_t *)(data +
1374 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1375 
1376 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1377 		return true;
1378 	else
1379 		return false;
1380 }
1381 EXPORT_SYMBOL(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1382 
1383 /**
1384  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1385  * @skb: Pointer to network buffer
1386  *
1387  * This api is for ipv4 packet.
1388  *
1389  * Return: true if packet is WAPI packet
1390  *	   false otherwise.
1391  */
1392 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1393 {
1394 	uint16_t ether_type;
1395 
1396 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1397 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1398 
1399 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1400 		return true;
1401 	else
1402 		return false;
1403 }
1404 EXPORT_SYMBOL(__qdf_nbuf_is_ipv4_wapi_pkt);
1405 
1406 /**
1407  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1408  * @skb: Pointer to network buffer
1409  *
1410  * This api is for ipv4 packet.
1411  *
1412  * Return: true if packet is tdls packet
1413  *	   false otherwise.
1414  */
1415 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1416 {
1417 	uint16_t ether_type;
1418 
1419 	ether_type = *(uint16_t *)(skb->data +
1420 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1421 
1422 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1423 		return true;
1424 	else
1425 		return false;
1426 }
1427 EXPORT_SYMBOL(__qdf_nbuf_is_ipv4_tdls_pkt);
1428 
1429 /**
1430  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1431  * @data: Pointer to network data buffer
1432  *
1433  * This api is for ipv4 packet.
1434  *
1435  * Return: true if packet is ARP packet
1436  *	   false otherwise.
1437  */
1438 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1439 {
1440 	uint16_t ether_type;
1441 
1442 	ether_type = (uint16_t)(*(uint16_t *)(data +
1443 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1444 
1445 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1446 		return true;
1447 	else
1448 		return false;
1449 }
1450 EXPORT_SYMBOL(__qdf_nbuf_data_is_ipv4_arp_pkt);
1451 
1452 /**
1453  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1454  * @data: Pointer to network data buffer
1455  *
1456  * This api is for ipv4 packet.
1457  *
1458  * Return: true if packet is ARP request
1459  *	   false otherwise.
1460  */
1461 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1462 {
1463 	uint16_t op_code;
1464 
1465 	op_code = (uint16_t)(*(uint16_t *)(data +
1466 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1467 
1468 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1469 		return true;
1470 	return false;
1471 }
1472 
1473 /**
1474  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1475  * @data: Pointer to network data buffer
1476  *
1477  * This api is for ipv4 packet.
1478  *
1479  * Return: true if packet is ARP response
1480  *	   false otherwise.
1481  */
1482 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1483 {
1484 	uint16_t op_code;
1485 
1486 	op_code = (uint16_t)(*(uint16_t *)(data +
1487 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1488 
1489 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1490 		return true;
1491 	return false;
1492 }
1493 
1494 /**
1495  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1496  * @data: Pointer to network data buffer
1497  *
1498  * This api is for ipv4 packet.
1499  *
1500  * Return: ARP packet source IP value.
1501  */
1502 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1503 {
1504 	uint32_t src_ip;
1505 
1506 	src_ip = (uint32_t)(*(uint32_t *)(data +
1507 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1508 
1509 	return src_ip;
1510 }
1511 
1512 /**
1513  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1514  * @data: Pointer to network data buffer
1515  *
1516  * This api is for ipv4 packet.
1517  *
1518  * Return: ARP packet target IP value.
1519  */
1520 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1521 {
1522 	uint32_t tgt_ip;
1523 
1524 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1525 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1526 
1527 	return tgt_ip;
1528 }
1529 
1530 /**
1531  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1532  * @data: Pointer to network data buffer
1533  * @len: length to copy
1534  *
1535  * This api is for dns domain name
1536  *
1537  * Return: dns domain name.
1538  */
1539 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1540 {
1541 	uint8_t *domain_name;
1542 
1543 	domain_name = (uint8_t *)
1544 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1545 	return domain_name;
1546 }
1547 
1548 
1549 /**
1550  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1551  * @data: Pointer to network data buffer
1552  *
1553  * This api is for dns query packet.
1554  *
1555  * Return: true if packet is dns query packet.
1556  *	   false otherwise.
1557  */
1558 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1559 {
1560 	uint16_t op_code;
1561 	uint16_t tgt_port;
1562 
1563 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1564 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1565 	/* Standard DNS query always happen on Dest Port 53. */
1566 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1567 		op_code = (uint16_t)(*(uint16_t *)(data +
1568 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1569 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1570 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1571 			return true;
1572 	}
1573 	return false;
1574 }
1575 
1576 /**
1577  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1578  * @data: Pointer to network data buffer
1579  *
1580  * This api is for dns query response.
1581  *
1582  * Return: true if packet is dns response packet.
1583  *	   false otherwise.
1584  */
1585 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1586 {
1587 	uint16_t op_code;
1588 	uint16_t src_port;
1589 
1590 	src_port = (uint16_t)(*(uint16_t *)(data +
1591 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1592 	/* Standard DNS response always comes on Src Port 53. */
1593 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1594 		op_code = (uint16_t)(*(uint16_t *)(data +
1595 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1596 
1597 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1598 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1599 			return true;
1600 	}
1601 	return false;
1602 }
1603 
1604 /**
1605  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1606  * @data: Pointer to network data buffer
1607  *
1608  * This api is for tcp syn packet.
1609  *
1610  * Return: true if packet is tcp syn packet.
1611  *	   false otherwise.
1612  */
1613 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1614 {
1615 	uint8_t op_code;
1616 
1617 	op_code = (uint8_t)(*(uint8_t *)(data +
1618 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1619 
1620 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1621 		return true;
1622 	return false;
1623 }
1624 
1625 /**
1626  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1627  * @data: Pointer to network data buffer
1628  *
1629  * This api is for tcp syn ack packet.
1630  *
1631  * Return: true if packet is tcp syn ack packet.
1632  *	   false otherwise.
1633  */
1634 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1635 {
1636 	uint8_t op_code;
1637 
1638 	op_code = (uint8_t)(*(uint8_t *)(data +
1639 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1640 
1641 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1642 		return true;
1643 	return false;
1644 }
1645 
1646 /**
1647  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1648  * @data: Pointer to network data buffer
1649  *
1650  * This api is for tcp ack packet.
1651  *
1652  * Return: true if packet is tcp ack packet.
1653  *	   false otherwise.
1654  */
1655 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1656 {
1657 	uint8_t op_code;
1658 
1659 	op_code = (uint8_t)(*(uint8_t *)(data +
1660 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1661 
1662 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1663 		return true;
1664 	return false;
1665 }
1666 
1667 /**
1668  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1669  * @data: Pointer to network data buffer
1670  *
1671  * This api is for tcp packet.
1672  *
1673  * Return: tcp source port value.
1674  */
1675 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1676 {
1677 	uint16_t src_port;
1678 
1679 	src_port = (uint16_t)(*(uint16_t *)(data +
1680 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1681 
1682 	return src_port;
1683 }
1684 
1685 /**
1686  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1687  * @data: Pointer to network data buffer
1688  *
1689  * This api is for tcp packet.
1690  *
1691  * Return: tcp destination port value.
1692  */
1693 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1694 {
1695 	uint16_t tgt_port;
1696 
1697 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1698 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1699 
1700 	return tgt_port;
1701 }
1702 
1703 /**
1704  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1705  * @data: Pointer to network data buffer
1706  *
1707  * This api is for ipv4 req packet.
1708  *
1709  * Return: true if packet is icmpv4 request
1710  *	   false otherwise.
1711  */
1712 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1713 {
1714 	uint8_t op_code;
1715 
1716 	op_code = (uint8_t)(*(uint8_t *)(data +
1717 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1718 
1719 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1720 		return true;
1721 	return false;
1722 }
1723 
1724 /**
1725  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1726  * @data: Pointer to network data buffer
1727  *
1728  * This api is for ipv4 res packet.
1729  *
1730  * Return: true if packet is icmpv4 response
1731  *	   false otherwise.
1732  */
1733 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1734 {
1735 	uint8_t op_code;
1736 
1737 	op_code = (uint8_t)(*(uint8_t *)(data +
1738 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1739 
1740 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1741 		return true;
1742 	return false;
1743 }
1744 
1745 /**
1746  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1747  * @data: Pointer to network data buffer
1748  *
1749  * This api is for ipv4 packet.
1750  *
1751  * Return: icmpv4 packet source IP value.
1752  */
1753 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1754 {
1755 	uint32_t src_ip;
1756 
1757 	src_ip = (uint32_t)(*(uint32_t *)(data +
1758 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1759 
1760 	return src_ip;
1761 }
1762 
1763 /**
1764  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1765  * @data: Pointer to network data buffer
1766  *
1767  * This api is for ipv4 packet.
1768  *
1769  * Return: icmpv4 packet target IP value.
1770  */
1771 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1772 {
1773 	uint32_t tgt_ip;
1774 
1775 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1776 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1777 
1778 	return tgt_ip;
1779 }
1780 
1781 
1782 /**
1783  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1784  * @data: Pointer to IPV6 packet data buffer
1785  *
1786  * This func. checks whether it is a IPV6 packet or not.
1787  *
1788  * Return: TRUE if it is a IPV6 packet
1789  *         FALSE if not
1790  */
1791 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1792 {
1793 	uint16_t ether_type;
1794 
1795 	ether_type = (uint16_t)(*(uint16_t *)(data +
1796 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1797 
1798 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1799 		return true;
1800 	else
1801 		return false;
1802 }
1803 EXPORT_SYMBOL(__qdf_nbuf_data_is_ipv6_pkt);
1804 
1805 /**
1806  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1807  * @data: Pointer to network data buffer
1808  *
1809  * This api is for ipv6 packet.
1810  *
1811  * Return: true if packet is DHCP packet
1812  *	   false otherwise
1813  */
1814 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1815 {
1816 	uint16_t sport;
1817 	uint16_t dport;
1818 
1819 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1820 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1821 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1822 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1823 					sizeof(uint16_t));
1824 
1825 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1826 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1827 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1828 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1829 		return true;
1830 	else
1831 		return false;
1832 }
1833 EXPORT_SYMBOL(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1834 
1835 /**
1836  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1837  * @data: Pointer to IPV4 packet data buffer
1838  *
1839  * This func. checks whether it is a IPV4 multicast packet or not.
1840  *
1841  * Return: TRUE if it is a IPV4 multicast packet
1842  *         FALSE if not
1843  */
1844 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1845 {
1846 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1847 		uint32_t *dst_addr =
1848 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1849 
1850 		/*
1851 		 * Check first word of the IPV4 address and if it is
1852 		 * equal to 0xE then it represents multicast IP.
1853 		 */
1854 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1855 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1856 			return true;
1857 		else
1858 			return false;
1859 	} else
1860 		return false;
1861 }
1862 
1863 /**
1864  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1865  * @data: Pointer to IPV6 packet data buffer
1866  *
1867  * This func. checks whether it is a IPV6 multicast packet or not.
1868  *
1869  * Return: TRUE if it is a IPV6 multicast packet
1870  *         FALSE if not
1871  */
1872 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1873 {
1874 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1875 		uint16_t *dst_addr;
1876 
1877 		dst_addr = (uint16_t *)
1878 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1879 
1880 		/*
1881 		 * Check first byte of the IP address and if it
1882 		 * 0xFF00 then it is a IPV6 mcast packet.
1883 		 */
1884 		if (*dst_addr ==
1885 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1886 			return true;
1887 		else
1888 			return false;
1889 	} else
1890 		return false;
1891 }
1892 
1893 /**
1894  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1895  * @data: Pointer to IPV4 ICMP packet data buffer
1896  *
1897  * This func. checks whether it is a ICMP packet or not.
1898  *
1899  * Return: TRUE if it is a ICMP packet
1900  *         FALSE if not
1901  */
1902 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1903 {
1904 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1905 		uint8_t pkt_type;
1906 
1907 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1908 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1909 
1910 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1911 			return true;
1912 		else
1913 			return false;
1914 	} else
1915 		return false;
1916 }
1917 
1918 /**
1919  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1920  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1921  *
1922  * This func. checks whether it is a ICMPV6 packet or not.
1923  *
1924  * Return: TRUE if it is a ICMPV6 packet
1925  *         FALSE if not
1926  */
1927 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1928 {
1929 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1930 		uint8_t pkt_type;
1931 
1932 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1933 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1934 
1935 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1936 			return true;
1937 		else
1938 			return false;
1939 	} else
1940 		return false;
1941 }
1942 
1943 /**
1944  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1945  * @data: Pointer to IPV4 UDP packet data buffer
1946  *
1947  * This func. checks whether it is a IPV4 UDP packet or not.
1948  *
1949  * Return: TRUE if it is a IPV4 UDP packet
1950  *         FALSE if not
1951  */
1952 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1953 {
1954 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1955 		uint8_t pkt_type;
1956 
1957 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1958 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1959 
1960 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1961 			return true;
1962 		else
1963 			return false;
1964 	} else
1965 		return false;
1966 }
1967 
1968 /**
1969  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1970  * @data: Pointer to IPV4 TCP packet data buffer
1971  *
1972  * This func. checks whether it is a IPV4 TCP packet or not.
1973  *
1974  * Return: TRUE if it is a IPV4 TCP packet
1975  *         FALSE if not
1976  */
1977 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1978 {
1979 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1980 		uint8_t pkt_type;
1981 
1982 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1983 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1984 
1985 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1986 			return true;
1987 		else
1988 			return false;
1989 	} else
1990 		return false;
1991 }
1992 
1993 /**
1994  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
1995  * @data: Pointer to IPV6 UDP packet data buffer
1996  *
1997  * This func. checks whether it is a IPV6 UDP packet or not.
1998  *
1999  * Return: TRUE if it is a IPV6 UDP packet
2000  *         FALSE if not
2001  */
2002 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2003 {
2004 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2005 		uint8_t pkt_type;
2006 
2007 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2008 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2009 
2010 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2011 			return true;
2012 		else
2013 			return false;
2014 	} else
2015 		return false;
2016 }
2017 
2018 /**
2019  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2020  * @data: Pointer to IPV6 TCP packet data buffer
2021  *
2022  * This func. checks whether it is a IPV6 TCP packet or not.
2023  *
2024  * Return: TRUE if it is a IPV6 TCP packet
2025  *         FALSE if not
2026  */
2027 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2028 {
2029 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2030 		uint8_t pkt_type;
2031 
2032 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2033 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2034 
2035 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2036 			return true;
2037 		else
2038 			return false;
2039 	} else
2040 		return false;
2041 }
2042 
2043 /**
2044  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2045  * @nbuf - sk buff
2046  *
2047  * Return: true if packet is broadcast
2048  *	   false otherwise
2049  */
2050 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2051 {
2052 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2053 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2054 }
2055 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2056 
2057 #ifdef MEMORY_DEBUG
2058 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2059 
2060 /**
2061  * struct qdf_nbuf_track_t - Network buffer track structure
2062  *
2063  * @p_next: Pointer to next
2064  * @net_buf: Pointer to network buffer
2065  * @file_name: File name
2066  * @line_num: Line number
2067  * @size: Size
2068  */
2069 struct qdf_nbuf_track_t {
2070 	struct qdf_nbuf_track_t *p_next;
2071 	qdf_nbuf_t net_buf;
2072 	uint8_t *file_name;
2073 	uint32_t line_num;
2074 	size_t size;
2075 };
2076 
2077 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2078 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2079 
2080 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2081 static struct kmem_cache *nbuf_tracking_cache;
2082 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2083 static spinlock_t qdf_net_buf_track_free_list_lock;
2084 static uint32_t qdf_net_buf_track_free_list_count;
2085 static uint32_t qdf_net_buf_track_used_list_count;
2086 static uint32_t qdf_net_buf_track_max_used;
2087 static uint32_t qdf_net_buf_track_max_free;
2088 static uint32_t qdf_net_buf_track_max_allocated;
2089 
2090 /**
2091  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2092  *
2093  * tracks the max number of network buffers that the wlan driver was tracking
2094  * at any one time.
2095  *
2096  * Return: none
2097  */
2098 static inline void update_max_used(void)
2099 {
2100 	int sum;
2101 
2102 	if (qdf_net_buf_track_max_used <
2103 	    qdf_net_buf_track_used_list_count)
2104 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2105 	sum = qdf_net_buf_track_free_list_count +
2106 		qdf_net_buf_track_used_list_count;
2107 	if (qdf_net_buf_track_max_allocated < sum)
2108 		qdf_net_buf_track_max_allocated = sum;
2109 }
2110 
2111 /**
2112  * update_max_free() - update qdf_net_buf_track_free_list_count
2113  *
2114  * tracks the max number tracking buffers kept in the freelist.
2115  *
2116  * Return: none
2117  */
2118 static inline void update_max_free(void)
2119 {
2120 	if (qdf_net_buf_track_max_free <
2121 	    qdf_net_buf_track_free_list_count)
2122 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2123 }
2124 
2125 /**
2126  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2127  *
2128  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2129  * This function also ads fexibility to adjust the allocation and freelist
2130  * scheems.
2131  *
2132  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2133  */
2134 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2135 {
2136 	int flags = GFP_KERNEL;
2137 	unsigned long irq_flag;
2138 	QDF_NBUF_TRACK *new_node = NULL;
2139 
2140 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2141 	qdf_net_buf_track_used_list_count++;
2142 	if (qdf_net_buf_track_free_list != NULL) {
2143 		new_node = qdf_net_buf_track_free_list;
2144 		qdf_net_buf_track_free_list =
2145 			qdf_net_buf_track_free_list->p_next;
2146 		qdf_net_buf_track_free_list_count--;
2147 	}
2148 	update_max_used();
2149 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2150 
2151 	if (new_node != NULL)
2152 		return new_node;
2153 
2154 	if (in_interrupt() || irqs_disabled() || in_atomic())
2155 		flags = GFP_ATOMIC;
2156 
2157 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2158 }
2159 
2160 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2161 #define FREEQ_POOLSIZE 2048
2162 
2163 /**
2164  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2165  *
2166  * Matches calls to qdf_nbuf_track_alloc.
2167  * Either frees the tracking cookie to kernel or an internal
2168  * freelist based on the size of the freelist.
2169  *
2170  * Return: none
2171  */
2172 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2173 {
2174 	unsigned long irq_flag;
2175 
2176 	if (!node)
2177 		return;
2178 
2179 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2180 	 * only shrink the freelist if it is bigger than twice the number of
2181 	 * nbufs in use. If the driver is stalling in a consistent bursty
2182 	 * fasion, this will keep 3/4 of thee allocations from the free list
2183 	 * while also allowing the system to recover memory as less frantic
2184 	 * traffic occurs.
2185 	 */
2186 
2187 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2188 
2189 	qdf_net_buf_track_used_list_count--;
2190 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2191 	   (qdf_net_buf_track_free_list_count >
2192 	    qdf_net_buf_track_used_list_count << 1)) {
2193 		kmem_cache_free(nbuf_tracking_cache, node);
2194 	} else {
2195 		node->p_next = qdf_net_buf_track_free_list;
2196 		qdf_net_buf_track_free_list = node;
2197 		qdf_net_buf_track_free_list_count++;
2198 	}
2199 	update_max_free();
2200 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2201 }
2202 
2203 /**
2204  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2205  *
2206  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2207  * the freelist first makes it performant for the first iperf udp burst
2208  * as well as steady state.
2209  *
2210  * Return: None
2211  */
2212 static void qdf_nbuf_track_prefill(void)
2213 {
2214 	int i;
2215 	QDF_NBUF_TRACK *node, *head;
2216 
2217 	/* prepopulate the freelist */
2218 	head = NULL;
2219 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2220 		node = qdf_nbuf_track_alloc();
2221 		if (node == NULL)
2222 			continue;
2223 		node->p_next = head;
2224 		head = node;
2225 	}
2226 	while (head) {
2227 		node = head->p_next;
2228 		qdf_nbuf_track_free(head);
2229 		head = node;
2230 	}
2231 
2232 	/* prefilled buffers should not count as used */
2233 	qdf_net_buf_track_max_used = 0;
2234 }
2235 
2236 /**
2237  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2238  *
2239  * This initializes the memory manager for the nbuf tracking cookies.  Because
2240  * these cookies are all the same size and only used in this feature, we can
2241  * use a kmem_cache to provide tracking as well as to speed up allocations.
2242  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2243  * features) a freelist is prepopulated here.
2244  *
2245  * Return: None
2246  */
2247 static void qdf_nbuf_track_memory_manager_create(void)
2248 {
2249 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2250 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2251 						sizeof(QDF_NBUF_TRACK),
2252 						0, 0, NULL);
2253 
2254 	qdf_nbuf_track_prefill();
2255 }
2256 
2257 /**
2258  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2259  *
2260  * Empty the freelist and print out usage statistics when it is no longer
2261  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2262  * any nbuf tracking cookies were leaked.
2263  *
2264  * Return: None
2265  */
2266 static void qdf_nbuf_track_memory_manager_destroy(void)
2267 {
2268 	QDF_NBUF_TRACK *node, *tmp;
2269 	unsigned long irq_flag;
2270 
2271 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2272 	node = qdf_net_buf_track_free_list;
2273 
2274 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2275 		qdf_print("%s: unexpectedly large max_used count %d",
2276 			  __func__, qdf_net_buf_track_max_used);
2277 
2278 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2279 		qdf_print("%s: %d unused trackers were allocated",
2280 			  __func__,
2281 			  qdf_net_buf_track_max_allocated -
2282 			  qdf_net_buf_track_max_used);
2283 
2284 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2285 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2286 		qdf_print("%s: check freelist shrinking functionality",
2287 			  __func__);
2288 
2289 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2290 		  "%s: %d residual freelist size\n",
2291 		  __func__, qdf_net_buf_track_free_list_count);
2292 
2293 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2294 		  "%s: %d max freelist size observed\n",
2295 		  __func__, qdf_net_buf_track_max_free);
2296 
2297 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2298 		  "%s: %d max buffers used observed\n",
2299 		  __func__, qdf_net_buf_track_max_used);
2300 
2301 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2302 		  "%s: %d max buffers allocated observed\n",
2303 		  __func__, qdf_net_buf_track_max_allocated);
2304 
2305 	while (node) {
2306 		tmp = node;
2307 		node = node->p_next;
2308 		kmem_cache_free(nbuf_tracking_cache, tmp);
2309 		qdf_net_buf_track_free_list_count--;
2310 	}
2311 
2312 	if (qdf_net_buf_track_free_list_count != 0)
2313 		qdf_print("%s: %d unfreed tracking memory lost in freelist\n",
2314 			  __func__, qdf_net_buf_track_free_list_count);
2315 
2316 	if (qdf_net_buf_track_used_list_count != 0)
2317 		qdf_print("%s: %d unfreed tracking memory still in use\n",
2318 			  __func__, qdf_net_buf_track_used_list_count);
2319 
2320 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2321 	kmem_cache_destroy(nbuf_tracking_cache);
2322 	qdf_net_buf_track_free_list = NULL;
2323 }
2324 
2325 /**
2326  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2327  *
2328  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2329  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2330  * WLAN driver module whose allocated SKB is freed by network stack are
2331  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2332  * reported as memory leak.
2333  *
2334  * Return: none
2335  */
2336 void qdf_net_buf_debug_init(void)
2337 {
2338 	uint32_t i;
2339 
2340 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2341 
2342 	qdf_nbuf_map_tracking_init();
2343 	qdf_nbuf_track_memory_manager_create();
2344 
2345 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2346 		gp_qdf_net_buf_track_tbl[i] = NULL;
2347 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2348 	}
2349 }
2350 EXPORT_SYMBOL(qdf_net_buf_debug_init);
2351 
2352 /**
2353  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2354  *
2355  * Exit network buffer tracking debug functionality and log SKB memory leaks
2356  * As part of exiting the functionality, free the leaked memory and
2357  * cleanup the tracking buffers.
2358  *
2359  * Return: none
2360  */
2361 void qdf_net_buf_debug_exit(void)
2362 {
2363 	uint32_t i;
2364 	uint32_t count = 0;
2365 	unsigned long irq_flag;
2366 	QDF_NBUF_TRACK *p_node;
2367 	QDF_NBUF_TRACK *p_prev;
2368 
2369 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2370 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2371 		p_node = gp_qdf_net_buf_track_tbl[i];
2372 		while (p_node) {
2373 			p_prev = p_node;
2374 			p_node = p_node->p_next;
2375 			count++;
2376 			qdf_print("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK\n",
2377 				  p_prev->file_name, p_prev->line_num,
2378 				  p_prev->size, p_prev->net_buf);
2379 			qdf_nbuf_track_free(p_prev);
2380 		}
2381 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2382 	}
2383 
2384 	qdf_nbuf_track_memory_manager_destroy();
2385 	qdf_nbuf_map_tracking_deinit();
2386 
2387 #ifdef CONFIG_HALT_KMEMLEAK
2388 	if (count) {
2389 		qdf_print("%d SKBs leaked .. please fix the SKB leak", count);
2390 		QDF_BUG(0);
2391 	}
2392 #endif
2393 }
2394 EXPORT_SYMBOL(qdf_net_buf_debug_exit);
2395 
2396 /**
2397  * qdf_net_buf_debug_hash() - hash network buffer pointer
2398  *
2399  * Return: hash value
2400  */
2401 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2402 {
2403 	uint32_t i;
2404 
2405 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2406 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2407 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2408 
2409 	return i;
2410 }
2411 
2412 /**
2413  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2414  *
2415  * Return: If skb is found in hash table then return pointer to network buffer
2416  *	else return %NULL
2417  */
2418 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2419 {
2420 	uint32_t i;
2421 	QDF_NBUF_TRACK *p_node;
2422 
2423 	i = qdf_net_buf_debug_hash(net_buf);
2424 	p_node = gp_qdf_net_buf_track_tbl[i];
2425 
2426 	while (p_node) {
2427 		if (p_node->net_buf == net_buf)
2428 			return p_node;
2429 		p_node = p_node->p_next;
2430 	}
2431 
2432 	return NULL;
2433 }
2434 
2435 /**
2436  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2437  *
2438  * Return: none
2439  */
2440 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2441 				uint8_t *file_name, uint32_t line_num)
2442 {
2443 	uint32_t i;
2444 	unsigned long irq_flag;
2445 	QDF_NBUF_TRACK *p_node;
2446 	QDF_NBUF_TRACK *new_node;
2447 
2448 	new_node = qdf_nbuf_track_alloc();
2449 
2450 	i = qdf_net_buf_debug_hash(net_buf);
2451 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2452 
2453 	p_node = qdf_net_buf_debug_look_up(net_buf);
2454 
2455 	if (p_node) {
2456 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2457 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2458 			  net_buf, file_name, line_num);
2459 		qdf_nbuf_track_free(new_node);
2460 	} else {
2461 		p_node = new_node;
2462 		if (p_node) {
2463 			p_node->net_buf = net_buf;
2464 			p_node->file_name = file_name;
2465 			p_node->line_num = line_num;
2466 			p_node->size = size;
2467 			qdf_mem_skb_inc(size);
2468 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2469 			gp_qdf_net_buf_track_tbl[i] = p_node;
2470 		} else
2471 			qdf_print(
2472 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2473 				  file_name, line_num, size);
2474 	}
2475 
2476 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2477 }
2478 EXPORT_SYMBOL(qdf_net_buf_debug_add_node);
2479 
2480 /**
2481  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2482  *
2483  * Return: none
2484  */
2485 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2486 {
2487 	uint32_t i;
2488 	QDF_NBUF_TRACK *p_head;
2489 	QDF_NBUF_TRACK *p_node = NULL;
2490 	unsigned long irq_flag;
2491 	QDF_NBUF_TRACK *p_prev;
2492 
2493 	i = qdf_net_buf_debug_hash(net_buf);
2494 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2495 
2496 	p_head = gp_qdf_net_buf_track_tbl[i];
2497 
2498 	/* Unallocated SKB */
2499 	if (!p_head)
2500 		goto done;
2501 
2502 	p_node = p_head;
2503 	/* Found at head of the table */
2504 	if (p_head->net_buf == net_buf) {
2505 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2506 		goto done;
2507 	}
2508 
2509 	/* Search in collision list */
2510 	while (p_node) {
2511 		p_prev = p_node;
2512 		p_node = p_node->p_next;
2513 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2514 			p_prev->p_next = p_node->p_next;
2515 			break;
2516 		}
2517 	}
2518 
2519 done:
2520 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2521 
2522 	if (p_node) {
2523 		qdf_mem_skb_dec(p_node->size);
2524 		qdf_nbuf_track_free(p_node);
2525 	} else {
2526 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2527 			  net_buf);
2528 		QDF_BUG(0);
2529 	}
2530 }
2531 EXPORT_SYMBOL(qdf_net_buf_debug_delete_node);
2532 
2533 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2534 			uint8_t *file_name, uint32_t line_num)
2535 {
2536 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2537 
2538 	while (ext_list) {
2539 		/*
2540 		 * Take care to add if it is Jumbo packet connected using
2541 		 * frag_list
2542 		 */
2543 		qdf_nbuf_t next;
2544 
2545 		next = qdf_nbuf_queue_next(ext_list);
2546 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2547 		ext_list = next;
2548 	}
2549 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2550 }
2551 EXPORT_SYMBOL(qdf_net_buf_debug_acquire_skb);
2552 
2553 /**
2554  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2555  * @net_buf: Network buf holding head segment (single)
2556  *
2557  * WLAN driver module whose allocated SKB is freed by network stack are
2558  * suppose to call this API before returning SKB to network stack such
2559  * that the SKB is not reported as memory leak.
2560  *
2561  * Return: none
2562  */
2563 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2564 {
2565 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2566 
2567 	while (ext_list) {
2568 		/*
2569 		 * Take care to free if it is Jumbo packet connected using
2570 		 * frag_list
2571 		 */
2572 		qdf_nbuf_t next;
2573 
2574 		next = qdf_nbuf_queue_next(ext_list);
2575 
2576 		if (qdf_nbuf_is_tso(ext_list) &&
2577 			qdf_nbuf_get_users(ext_list) > 1) {
2578 			ext_list = next;
2579 			continue;
2580 		}
2581 
2582 		qdf_net_buf_debug_delete_node(ext_list);
2583 		ext_list = next;
2584 	}
2585 
2586 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2587 		return;
2588 
2589 	qdf_net_buf_debug_delete_node(net_buf);
2590 }
2591 EXPORT_SYMBOL(qdf_net_buf_debug_release_skb);
2592 
2593 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2594 				int reserve, int align, int prio,
2595 				uint8_t *file, uint32_t line)
2596 {
2597 	qdf_nbuf_t nbuf;
2598 
2599 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio);
2600 
2601 	/* Store SKB in internal QDF tracking table */
2602 	if (qdf_likely(nbuf)) {
2603 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2604 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2605 	}
2606 
2607 	return nbuf;
2608 }
2609 qdf_export_symbol(qdf_nbuf_alloc_debug);
2610 
2611 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2612 {
2613 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2614 		goto free_buf;
2615 
2616 	/* Remove SKB from internal QDF tracking table */
2617 	if (qdf_likely(nbuf)) {
2618 		qdf_net_buf_debug_delete_node(nbuf);
2619 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2620 	}
2621 
2622 free_buf:
2623 	__qdf_nbuf_free(nbuf);
2624 }
2625 qdf_export_symbol(qdf_nbuf_free_debug);
2626 
2627 #endif /*MEMORY_DEBUG */
2628 #if defined(FEATURE_TSO)
2629 
2630 /**
2631  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2632  *
2633  * @ethproto: ethernet type of the msdu
2634  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2635  * @l2_len: L2 length for the msdu
2636  * @eit_hdr: pointer to EIT header
2637  * @eit_hdr_len: EIT header length for the msdu
2638  * @eit_hdr_dma_map_addr: dma addr for EIT header
2639  * @tcphdr: pointer to tcp header
2640  * @ipv4_csum_en: ipv4 checksum enable
2641  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2642  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2643  * @ip_id: IP id
2644  * @tcp_seq_num: TCP sequence number
2645  *
2646  * This structure holds the TSO common info that is common
2647  * across all the TCP segments of the jumbo packet.
2648  */
2649 struct qdf_tso_cmn_seg_info_t {
2650 	uint16_t ethproto;
2651 	uint16_t ip_tcp_hdr_len;
2652 	uint16_t l2_len;
2653 	uint8_t *eit_hdr;
2654 	uint32_t eit_hdr_len;
2655 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2656 	struct tcphdr *tcphdr;
2657 	uint16_t ipv4_csum_en;
2658 	uint16_t tcp_ipv4_csum_en;
2659 	uint16_t tcp_ipv6_csum_en;
2660 	uint16_t ip_id;
2661 	uint32_t tcp_seq_num;
2662 };
2663 
2664 /**
2665  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2666  * information
2667  * @osdev: qdf device handle
2668  * @skb: skb buffer
2669  * @tso_info: Parameters common to all segements
2670  *
2671  * Get the TSO information that is common across all the TCP
2672  * segments of the jumbo packet
2673  *
2674  * Return: 0 - success 1 - failure
2675  */
2676 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2677 			struct sk_buff *skb,
2678 			struct qdf_tso_cmn_seg_info_t *tso_info)
2679 {
2680 	/* Get ethernet type and ethernet header length */
2681 	tso_info->ethproto = vlan_get_protocol(skb);
2682 
2683 	/* Determine whether this is an IPv4 or IPv6 packet */
2684 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2685 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2686 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2687 
2688 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2689 		tso_info->ipv4_csum_en = 1;
2690 		tso_info->tcp_ipv4_csum_en = 1;
2691 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2692 			qdf_print("TSO IPV4 proto 0x%x not TCP\n",
2693 				 ipv4_hdr->protocol);
2694 			return 1;
2695 		}
2696 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2697 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2698 		tso_info->tcp_ipv6_csum_en = 1;
2699 	} else {
2700 		qdf_print("TSO: ethertype 0x%x is not supported!\n",
2701 			 tso_info->ethproto);
2702 		return 1;
2703 	}
2704 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2705 	tso_info->tcphdr = tcp_hdr(skb);
2706 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2707 	/* get pointer to the ethernet + IP + TCP header and their length */
2708 	tso_info->eit_hdr = skb->data;
2709 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2710 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2711 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2712 							tso_info->eit_hdr,
2713 							tso_info->eit_hdr_len,
2714 							DMA_TO_DEVICE);
2715 	if (unlikely(dma_mapping_error(osdev->dev,
2716 				       tso_info->eit_hdr_dma_map_addr))) {
2717 		qdf_print("DMA mapping error!\n");
2718 		qdf_assert(0);
2719 		return 1;
2720 	}
2721 
2722 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2723 		/* inlcude IPv4 header length for IPV4 (total length) */
2724 		tso_info->ip_tcp_hdr_len =
2725 			tso_info->eit_hdr_len - tso_info->l2_len;
2726 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2727 		/* exclude IPv6 header length for IPv6 (payload length) */
2728 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2729 	}
2730 	/*
2731 	 * The length of the payload (application layer data) is added to
2732 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2733 	 * descriptor.
2734 	 */
2735 
2736 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2737 		tso_info->tcp_seq_num,
2738 		tso_info->eit_hdr_len,
2739 		tso_info->l2_len,
2740 		skb->len);
2741 	return 0;
2742 }
2743 
2744 
2745 /**
2746  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
2747  *
2748  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
2749  *
2750  * Return: N/A
2751  */
2752 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
2753 				      uint32_t *lo, uint32_t *hi)
2754 {
2755 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
2756 		*lo = lower_32_bits(dmaaddr);
2757 		*hi = upper_32_bits(dmaaddr);
2758 	} else {
2759 		*lo = dmaaddr;
2760 		*hi = 0;
2761 	}
2762 }
2763 EXPORT_SYMBOL(__qdf_dmaaddr_to_32s);
2764 
2765 /**
2766  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2767  *
2768  * @curr_seg: Segment whose contents are initialized
2769  * @tso_cmn_info: Parameters common to all segements
2770  *
2771  * Return: None
2772  */
2773 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2774 				struct qdf_tso_seg_elem_t *curr_seg,
2775 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2776 {
2777 	/* Initialize the flags to 0 */
2778 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2779 
2780 	/*
2781 	 * The following fields remain the same across all segments of
2782 	 * a jumbo packet
2783 	 */
2784 	curr_seg->seg.tso_flags.tso_enable = 1;
2785 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2786 		tso_cmn_info->ipv4_csum_en;
2787 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2788 		tso_cmn_info->tcp_ipv6_csum_en;
2789 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2790 		tso_cmn_info->tcp_ipv4_csum_en;
2791 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2792 
2793 	/* The following fields change for the segments */
2794 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2795 	tso_cmn_info->ip_id++;
2796 
2797 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2798 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2799 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2800 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2801 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2802 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2803 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2804 
2805 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2806 
2807 	/*
2808 	 * First fragment for each segment always contains the ethernet,
2809 	 * IP and TCP header
2810 	 */
2811 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2812 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2813 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2814 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2815 
2816 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2817 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2818 		   tso_cmn_info->eit_hdr_len,
2819 		   curr_seg->seg.tso_flags.tcp_seq_num,
2820 		   curr_seg->seg.total_len);
2821 }
2822 
2823 /**
2824  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2825  * into segments
2826  * @nbuf: network buffer to be segmented
2827  * @tso_info: This is the output. The information about the
2828  *           TSO segments will be populated within this.
2829  *
2830  * This function fragments a TCP jumbo packet into smaller
2831  * segments to be transmitted by the driver. It chains the TSO
2832  * segments created into a list.
2833  *
2834  * Return: number of TSO segments
2835  */
2836 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2837 		struct qdf_tso_info_t *tso_info)
2838 {
2839 	/* common accross all segments */
2840 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2841 	/* segment specific */
2842 	void *tso_frag_vaddr;
2843 	qdf_dma_addr_t tso_frag_paddr = 0;
2844 	uint32_t num_seg = 0;
2845 	struct qdf_tso_seg_elem_t *curr_seg;
2846 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2847 	struct skb_frag_struct *frag = NULL;
2848 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2849 	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
2850 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2851 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2852 	int j = 0; /* skb fragment index */
2853 
2854 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2855 
2856 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2857 						skb, &tso_cmn_info))) {
2858 		qdf_print("TSO: error getting common segment info\n");
2859 		return 0;
2860 	}
2861 
2862 	total_num_seg = tso_info->tso_num_seg_list;
2863 	curr_seg = tso_info->tso_seg_list;
2864 
2865 	/* length of the first chunk of data in the skb */
2866 	skb_frag_len = skb_headlen(skb);
2867 
2868 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2869 	/* update the remaining skb fragment length and TSO segment length */
2870 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2871 	skb_proc -= tso_cmn_info.eit_hdr_len;
2872 
2873 	/* get the address to the next tso fragment */
2874 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2875 	/* get the length of the next tso fragment */
2876 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2877 
2878 	if (tso_frag_len != 0) {
2879 		tso_frag_paddr = dma_map_single(osdev->dev,
2880 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2881 	}
2882 
2883 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2884 		__LINE__, skb_frag_len, tso_frag_len);
2885 	num_seg = tso_info->num_segs;
2886 	tso_info->num_segs = 0;
2887 	tso_info->is_tso = 1;
2888 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2889 
2890 	while (num_seg && curr_seg) {
2891 		int i = 1; /* tso fragment index */
2892 		uint8_t more_tso_frags = 1;
2893 
2894 		curr_seg->seg.num_frags = 0;
2895 		tso_info->num_segs++;
2896 		total_num_seg->num_seg.tso_cmn_num_seg++;
2897 
2898 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2899 						 &tso_cmn_info);
2900 
2901 		if (unlikely(skb_proc == 0))
2902 			return tso_info->num_segs;
2903 
2904 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2905 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2906 		/* frag len is added to ip_len in while loop below*/
2907 
2908 		curr_seg->seg.num_frags++;
2909 
2910 		while (more_tso_frags) {
2911 			if (tso_frag_len != 0) {
2912 				curr_seg->seg.tso_frags[i].vaddr =
2913 					tso_frag_vaddr;
2914 				curr_seg->seg.tso_frags[i].length =
2915 					tso_frag_len;
2916 				curr_seg->seg.total_len += tso_frag_len;
2917 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2918 				curr_seg->seg.num_frags++;
2919 				skb_proc = skb_proc - tso_frag_len;
2920 
2921 				/* increment the TCP sequence number */
2922 
2923 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2924 				curr_seg->seg.tso_frags[i].paddr =
2925 					tso_frag_paddr;
2926 			}
2927 
2928 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2929 					__func__, __LINE__,
2930 					i,
2931 					tso_frag_len,
2932 					curr_seg->seg.total_len,
2933 					curr_seg->seg.tso_frags[i].vaddr);
2934 
2935 			/* if there is no more data left in the skb */
2936 			if (!skb_proc)
2937 				return tso_info->num_segs;
2938 
2939 			/* get the next payload fragment information */
2940 			/* check if there are more fragments in this segment */
2941 			if (tso_frag_len < tso_seg_size) {
2942 				more_tso_frags = 1;
2943 				if (tso_frag_len != 0) {
2944 					tso_seg_size = tso_seg_size -
2945 						tso_frag_len;
2946 					i++;
2947 					if (curr_seg->seg.num_frags ==
2948 								FRAG_NUM_MAX) {
2949 						more_tso_frags = 0;
2950 						/*
2951 						 * reset i and the tso
2952 						 * payload size
2953 						 */
2954 						i = 1;
2955 						tso_seg_size =
2956 							skb_shinfo(skb)->
2957 								gso_size;
2958 					}
2959 				}
2960 			} else {
2961 				more_tso_frags = 0;
2962 				/* reset i and the tso payload size */
2963 				i = 1;
2964 				tso_seg_size = skb_shinfo(skb)->gso_size;
2965 			}
2966 
2967 			/* if the next fragment is contiguous */
2968 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
2969 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
2970 				skb_frag_len = skb_frag_len - tso_frag_len;
2971 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2972 
2973 			} else { /* the next fragment is not contiguous */
2974 				if (skb_shinfo(skb)->nr_frags == 0) {
2975 					qdf_print("TSO: nr_frags == 0!\n");
2976 					qdf_assert(0);
2977 					return 0;
2978 				}
2979 				if (j >= skb_shinfo(skb)->nr_frags) {
2980 					qdf_print("TSO: nr_frags %d j %d\n",
2981 						  skb_shinfo(skb)->nr_frags, j);
2982 					qdf_assert(0);
2983 					return 0;
2984 				}
2985 				frag = &skb_shinfo(skb)->frags[j];
2986 				skb_frag_len = skb_frag_size(frag);
2987 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2988 				tso_frag_vaddr = skb_frag_address_safe(frag);
2989 				j++;
2990 			}
2991 
2992 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
2993 				__func__, __LINE__, skb_frag_len, tso_frag_len,
2994 				tso_seg_size);
2995 
2996 			if (!(tso_frag_vaddr)) {
2997 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
2998 						__func__);
2999 				return 0;
3000 			}
3001 
3002 			tso_frag_paddr =
3003 					 dma_map_single(osdev->dev,
3004 						 tso_frag_vaddr,
3005 						 tso_frag_len,
3006 						 DMA_TO_DEVICE);
3007 			if (unlikely(dma_mapping_error(osdev->dev,
3008 							tso_frag_paddr))) {
3009 				qdf_print("DMA mapping error!\n");
3010 				qdf_assert(0);
3011 				return 0;
3012 			}
3013 		}
3014 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3015 				curr_seg->seg.tso_flags.tcp_seq_num);
3016 		num_seg--;
3017 		/* if TCP FIN flag was set, set it in the last segment */
3018 		if (!num_seg)
3019 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3020 
3021 		curr_seg = curr_seg->next;
3022 	}
3023 	return tso_info->num_segs;
3024 }
3025 EXPORT_SYMBOL(__qdf_nbuf_get_tso_info);
3026 
3027 /**
3028  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3029  *
3030  * @osdev: qdf device handle
3031  * @tso_seg: TSO segment element to be unmapped
3032  * @is_last_seg: whether this is last tso seg or not
3033  *
3034  * Return: none
3035  */
3036 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3037 			  struct qdf_tso_seg_elem_t *tso_seg,
3038 			  bool is_last_seg)
3039 {
3040 	uint32_t num_frags = 0;
3041 
3042 	if (tso_seg->seg.num_frags > 0)
3043 		num_frags = tso_seg->seg.num_frags - 1;
3044 
3045 	/*Num of frags in a tso seg cannot be less than 2 */
3046 	if (num_frags < 1) {
3047 		qdf_assert(0);
3048 		qdf_print("ERROR: num of frags in a tso segment is %d\n",
3049 				  (num_frags + 1));
3050 		return;
3051 	}
3052 
3053 	while (num_frags) {
3054 		/*Do dma unmap the tso seg except the 0th frag */
3055 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3056 			qdf_print("ERROR: TSO seg frag %d mapped physical address is NULL\n",
3057 				  num_frags);
3058 			qdf_assert(0);
3059 			return;
3060 		}
3061 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3062 		dma_unmap_single(osdev->dev,
3063 				 tso_seg->seg.tso_frags[num_frags].paddr,
3064 				 tso_seg->seg.tso_frags[num_frags].length,
3065 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3066 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3067 		num_frags--;
3068 	}
3069 
3070 	if (is_last_seg) {
3071 		/*Do dma unmap for the tso seg 0th frag */
3072 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3073 			qdf_print("ERROR: TSO seg frag 0 mapped physical address is NULL\n");
3074 			qdf_assert(0);
3075 			return;
3076 		}
3077 		dma_unmap_single(osdev->dev,
3078 				 tso_seg->seg.tso_frags[0].paddr,
3079 				 tso_seg->seg.tso_frags[0].length,
3080 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3081 		tso_seg->seg.tso_frags[0].paddr = 0;
3082 	}
3083 }
3084 EXPORT_SYMBOL(__qdf_nbuf_unmap_tso_segment);
3085 
3086 /**
3087  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3088  * into segments
3089  * @nbuf:   network buffer to be segmented
3090  * @tso_info:  This is the output. The information about the
3091  *      TSO segments will be populated within this.
3092  *
3093  * This function fragments a TCP jumbo packet into smaller
3094  * segments to be transmitted by the driver. It chains the TSO
3095  * segments created into a list.
3096  *
3097  * Return: 0 - success, 1 - failure
3098  */
3099 #ifndef BUILD_X86
3100 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3101 {
3102 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3103 	uint32_t remainder, num_segs = 0;
3104 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3105 	uint8_t frags_per_tso = 0;
3106 	uint32_t skb_frag_len = 0;
3107 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3108 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3109 	struct skb_frag_struct *frag = NULL;
3110 	int j = 0;
3111 	uint32_t temp_num_seg = 0;
3112 
3113 	/* length of the first chunk of data in the skb minus eit header*/
3114 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3115 
3116 	/* Calculate num of segs for skb's first chunk of data*/
3117 	remainder = skb_frag_len % tso_seg_size;
3118 	num_segs = skb_frag_len / tso_seg_size;
3119 	/**
3120 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3121 	 * In that case, one more tso seg is required to accommodate
3122 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3123 	 * then remaining data will be accomodated while doing the calculation
3124 	 * for nr_frags data. Hence, frags_per_tso++.
3125 	 */
3126 	if (remainder) {
3127 		if (!skb_nr_frags)
3128 			num_segs++;
3129 		else
3130 			frags_per_tso++;
3131 	}
3132 
3133 	while (skb_nr_frags) {
3134 		if (j >= skb_shinfo(skb)->nr_frags) {
3135 			qdf_print("TSO: nr_frags %d j %d\n",
3136 			skb_shinfo(skb)->nr_frags, j);
3137 			qdf_assert(0);
3138 			return 0;
3139 		}
3140 		/**
3141 		 * Calculate the number of tso seg for nr_frags data:
3142 		 * Get the length of each frag in skb_frag_len, add to
3143 		 * remainder.Get the number of segments by dividing it to
3144 		 * tso_seg_size and calculate the new remainder.
3145 		 * Decrement the nr_frags value and keep
3146 		 * looping all the skb_fragments.
3147 		 */
3148 		frag = &skb_shinfo(skb)->frags[j];
3149 		skb_frag_len = skb_frag_size(frag);
3150 		temp_num_seg = num_segs;
3151 		remainder += skb_frag_len;
3152 		num_segs += remainder / tso_seg_size;
3153 		remainder = remainder % tso_seg_size;
3154 		skb_nr_frags--;
3155 		if (remainder) {
3156 			if (num_segs > temp_num_seg)
3157 				frags_per_tso = 0;
3158 			/**
3159 			 * increment the tso per frags whenever remainder is
3160 			 * positive. If frags_per_tso reaches the (max-1),
3161 			 * [First frags always have EIT header, therefore max-1]
3162 			 * increment the num_segs as no more data can be
3163 			 * accomodated in the curr tso seg. Reset the remainder
3164 			 * and frags per tso and keep looping.
3165 			 */
3166 			frags_per_tso++;
3167 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3168 				num_segs++;
3169 				frags_per_tso = 0;
3170 				remainder = 0;
3171 			}
3172 			/**
3173 			 * If this is the last skb frag and still remainder is
3174 			 * non-zero(frags_per_tso is not reached to the max-1)
3175 			 * then increment the num_segs to take care of the
3176 			 * remaining length.
3177 			 */
3178 			if (!skb_nr_frags && remainder) {
3179 				num_segs++;
3180 				frags_per_tso = 0;
3181 			}
3182 		} else {
3183 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3184 			frags_per_tso = 0;
3185 		}
3186 		j++;
3187 	}
3188 
3189 	return num_segs;
3190 }
3191 #else
3192 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3193 {
3194 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3195 	struct skb_frag_struct *frag = NULL;
3196 
3197 	/*
3198 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3199 	 * region which cannot be accessed by Target
3200 	 */
3201 	if (virt_to_phys(skb->data) < 0x50000040) {
3202 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3203 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3204 				virt_to_phys(skb->data));
3205 		goto fail;
3206 
3207 	}
3208 
3209 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3210 		frag = &skb_shinfo(skb)->frags[i];
3211 
3212 		if (!frag)
3213 			goto fail;
3214 
3215 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3216 			goto fail;
3217 	}
3218 
3219 
3220 	gso_size = skb_shinfo(skb)->gso_size;
3221 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3222 			+ tcp_hdrlen(skb));
3223 	while (tmp_len) {
3224 		num_segs++;
3225 		if (tmp_len > gso_size)
3226 			tmp_len -= gso_size;
3227 		else
3228 			break;
3229 	}
3230 
3231 	return num_segs;
3232 
3233 	/*
3234 	 * Do not free this frame, just do socket level accounting
3235 	 * so that this is not reused.
3236 	 */
3237 fail:
3238 	if (skb->sk)
3239 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3240 
3241 	return 0;
3242 }
3243 #endif
3244 EXPORT_SYMBOL(__qdf_nbuf_get_tso_num_seg);
3245 
3246 #endif /* FEATURE_TSO */
3247 
3248 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3249 {
3250 	qdf_nbuf_users_inc(&skb->users);
3251 	return skb;
3252 }
3253 EXPORT_SYMBOL(__qdf_nbuf_inc_users);
3254 
3255 int __qdf_nbuf_get_users(struct sk_buff *skb)
3256 {
3257 	return qdf_nbuf_users_read(&skb->users);
3258 }
3259 EXPORT_SYMBOL(__qdf_nbuf_get_users);
3260 
3261 /**
3262  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3263  * @skb: sk_buff handle
3264  *
3265  * Return: none
3266  */
3267 
3268 void __qdf_nbuf_ref(struct sk_buff *skb)
3269 {
3270 	skb_get(skb);
3271 }
3272 EXPORT_SYMBOL(__qdf_nbuf_ref);
3273 
3274 /**
3275  * __qdf_nbuf_shared() - Check whether the buffer is shared
3276  *  @skb: sk_buff buffer
3277  *
3278  *  Return: true if more than one person has a reference to this buffer.
3279  */
3280 int __qdf_nbuf_shared(struct sk_buff *skb)
3281 {
3282 	return skb_shared(skb);
3283 }
3284 EXPORT_SYMBOL(__qdf_nbuf_shared);
3285 
3286 /**
3287  * __qdf_nbuf_dmamap_create() - create a DMA map.
3288  * @osdev: qdf device handle
3289  * @dmap: dma map handle
3290  *
3291  * This can later be used to map networking buffers. They :
3292  * - need space in adf_drv's software descriptor
3293  * - are typically created during adf_drv_create
3294  * - need to be created before any API(qdf_nbuf_map) that uses them
3295  *
3296  * Return: QDF STATUS
3297  */
3298 QDF_STATUS
3299 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3300 {
3301 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3302 	/*
3303 	 * driver can tell its SG capablity, it must be handled.
3304 	 * Bounce buffers if they are there
3305 	 */
3306 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3307 	if (!(*dmap))
3308 		error = QDF_STATUS_E_NOMEM;
3309 
3310 	return error;
3311 }
3312 EXPORT_SYMBOL(__qdf_nbuf_dmamap_create);
3313 /**
3314  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3315  * @osdev: qdf device handle
3316  * @dmap: dma map handle
3317  *
3318  * Return: none
3319  */
3320 void
3321 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3322 {
3323 	kfree(dmap);
3324 }
3325 EXPORT_SYMBOL(__qdf_nbuf_dmamap_destroy);
3326 
3327 /**
3328  * __qdf_nbuf_map_nbytes_single() - map nbytes
3329  * @osdev: os device
3330  * @buf: buffer
3331  * @dir: direction
3332  * @nbytes: number of bytes
3333  *
3334  * Return: QDF_STATUS
3335  */
3336 #ifdef A_SIMOS_DEVHOST
3337 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3338 		qdf_device_t osdev, struct sk_buff *buf,
3339 		 qdf_dma_dir_t dir, int nbytes)
3340 {
3341 	qdf_dma_addr_t paddr;
3342 
3343 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3344 	return QDF_STATUS_SUCCESS;
3345 }
3346 EXPORT_SYMBOL(__qdf_nbuf_map_nbytes_single);
3347 #else
3348 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3349 		qdf_device_t osdev, struct sk_buff *buf,
3350 		 qdf_dma_dir_t dir, int nbytes)
3351 {
3352 	qdf_dma_addr_t paddr;
3353 
3354 	/* assume that the OS only provides a single fragment */
3355 	QDF_NBUF_CB_PADDR(buf) = paddr =
3356 		dma_map_single(osdev->dev, buf->data,
3357 			nbytes, __qdf_dma_dir_to_os(dir));
3358 	return dma_mapping_error(osdev->dev, paddr) ?
3359 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3360 }
3361 EXPORT_SYMBOL(__qdf_nbuf_map_nbytes_single);
3362 #endif
3363 /**
3364  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3365  * @osdev: os device
3366  * @buf: buffer
3367  * @dir: direction
3368  * @nbytes: number of bytes
3369  *
3370  * Return: none
3371  */
3372 #if defined(A_SIMOS_DEVHOST)
3373 void
3374 __qdf_nbuf_unmap_nbytes_single(
3375 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3376 {
3377 }
3378 EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes_single);
3379 
3380 #else
3381 void
3382 __qdf_nbuf_unmap_nbytes_single(
3383 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3384 {
3385 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3386 		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
3387 		return;
3388 	}
3389 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3390 			nbytes, __qdf_dma_dir_to_os(dir));
3391 }
3392 EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes_single);
3393 #endif
3394 /**
3395  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3396  * @osdev: os device
3397  * @skb: skb handle
3398  * @dir: dma direction
3399  * @nbytes: number of bytes to be mapped
3400  *
3401  * Return: QDF_STATUS
3402  */
3403 #ifdef QDF_OS_DEBUG
3404 QDF_STATUS
3405 __qdf_nbuf_map_nbytes(
3406 	qdf_device_t osdev,
3407 	struct sk_buff *skb,
3408 	qdf_dma_dir_t dir,
3409 	int nbytes)
3410 {
3411 	struct skb_shared_info  *sh = skb_shinfo(skb);
3412 
3413 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3414 
3415 	/*
3416 	 * Assume there's only a single fragment.
3417 	 * To support multiple fragments, it would be necessary to change
3418 	 * adf_nbuf_t to be a separate object that stores meta-info
3419 	 * (including the bus address for each fragment) and a pointer
3420 	 * to the underlying sk_buff.
3421 	 */
3422 	qdf_assert(sh->nr_frags == 0);
3423 
3424 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3425 }
3426 EXPORT_SYMBOL(__qdf_nbuf_map_nbytes);
3427 #else
3428 QDF_STATUS
3429 __qdf_nbuf_map_nbytes(
3430 	qdf_device_t osdev,
3431 	struct sk_buff *skb,
3432 	qdf_dma_dir_t dir,
3433 	int nbytes)
3434 {
3435 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3436 }
3437 EXPORT_SYMBOL(__qdf_nbuf_map_nbytes);
3438 #endif
3439 /**
3440  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3441  * @osdev: OS device
3442  * @skb: skb handle
3443  * @dir: direction
3444  * @nbytes: number of bytes
3445  *
3446  * Return: none
3447  */
3448 void
3449 __qdf_nbuf_unmap_nbytes(
3450 	qdf_device_t osdev,
3451 	struct sk_buff *skb,
3452 	qdf_dma_dir_t dir,
3453 	int nbytes)
3454 {
3455 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3456 
3457 	/*
3458 	 * Assume there's a single fragment.
3459 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3460 	 */
3461 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3462 }
3463 EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes);
3464 
3465 /**
3466  * __qdf_nbuf_dma_map_info() - return the dma map info
3467  * @bmap: dma map
3468  * @sg: dma map info
3469  *
3470  * Return: none
3471  */
3472 void
3473 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3474 {
3475 	qdf_assert(bmap->mapped);
3476 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3477 
3478 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3479 			sizeof(struct __qdf_segment));
3480 	sg->nsegs = bmap->nsegs;
3481 }
3482 EXPORT_SYMBOL(__qdf_nbuf_dma_map_info);
3483 /**
3484  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3485  *			specified by the index
3486  * @skb: sk buff
3487  * @sg: scatter/gather list of all the frags
3488  *
3489  * Return: none
3490  */
3491 #if defined(__QDF_SUPPORT_FRAG_MEM)
3492 void
3493 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3494 {
3495 	qdf_assert(skb != NULL);
3496 	sg->sg_segs[0].vaddr = skb->data;
3497 	sg->sg_segs[0].len   = skb->len;
3498 	sg->nsegs            = 1;
3499 
3500 	for (int i = 1; i <= sh->nr_frags; i++) {
3501 		skb_frag_t    *f        = &sh->frags[i - 1];
3502 
3503 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3504 			f->page_offset);
3505 		sg->sg_segs[i].len      = f->size;
3506 
3507 		qdf_assert(i < QDF_MAX_SGLIST);
3508 	}
3509 	sg->nsegs += i;
3510 
3511 }
3512 EXPORT_SYMBOL(__qdf_nbuf_frag_info);
3513 #else
3514 #ifdef QDF_OS_DEBUG
3515 void
3516 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3517 {
3518 
3519 	struct skb_shared_info  *sh = skb_shinfo(skb);
3520 
3521 	qdf_assert(skb != NULL);
3522 	sg->sg_segs[0].vaddr = skb->data;
3523 	sg->sg_segs[0].len   = skb->len;
3524 	sg->nsegs            = 1;
3525 
3526 	qdf_assert(sh->nr_frags == 0);
3527 }
3528 EXPORT_SYMBOL(__qdf_nbuf_frag_info);
3529 #else
3530 void
3531 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3532 {
3533 	sg->sg_segs[0].vaddr = skb->data;
3534 	sg->sg_segs[0].len   = skb->len;
3535 	sg->nsegs            = 1;
3536 }
3537 EXPORT_SYMBOL(__qdf_nbuf_frag_info);
3538 #endif
3539 #endif
3540 /**
3541  * __qdf_nbuf_get_frag_size() - get frag size
3542  * @nbuf: sk buffer
3543  * @cur_frag: current frag
3544  *
3545  * Return: frag size
3546  */
3547 uint32_t
3548 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3549 {
3550 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3551 	const skb_frag_t *frag = sh->frags + cur_frag;
3552 
3553 	return skb_frag_size(frag);
3554 }
3555 EXPORT_SYMBOL(__qdf_nbuf_get_frag_size);
3556 
3557 /**
3558  * __qdf_nbuf_frag_map() - dma map frag
3559  * @osdev: os device
3560  * @nbuf: sk buff
3561  * @offset: offset
3562  * @dir: direction
3563  * @cur_frag: current fragment
3564  *
3565  * Return: QDF status
3566  */
3567 #ifdef A_SIMOS_DEVHOST
3568 QDF_STATUS __qdf_nbuf_frag_map(
3569 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3570 	int offset, qdf_dma_dir_t dir, int cur_frag)
3571 {
3572 	int32_t paddr, frag_len;
3573 
3574 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3575 	return QDF_STATUS_SUCCESS;
3576 }
3577 EXPORT_SYMBOL(__qdf_nbuf_frag_map);
3578 #else
3579 QDF_STATUS __qdf_nbuf_frag_map(
3580 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3581 	int offset, qdf_dma_dir_t dir, int cur_frag)
3582 {
3583 	dma_addr_t paddr, frag_len;
3584 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3585 	const skb_frag_t *frag = sh->frags + cur_frag;
3586 
3587 	frag_len = skb_frag_size(frag);
3588 
3589 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3590 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3591 					__qdf_dma_dir_to_os(dir));
3592 	return dma_mapping_error(osdev->dev, paddr) ?
3593 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3594 }
3595 EXPORT_SYMBOL(__qdf_nbuf_frag_map);
3596 #endif
3597 /**
3598  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3599  * @dmap: dma map
3600  * @cb: callback
3601  * @arg: argument
3602  *
3603  * Return: none
3604  */
3605 void
3606 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3607 {
3608 	return;
3609 }
3610 EXPORT_SYMBOL(__qdf_nbuf_dmamap_set_cb);
3611 
3612 
3613 /**
3614  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3615  * @osdev: os device
3616  * @buf: sk buff
3617  * @dir: direction
3618  *
3619  * Return: none
3620  */
3621 #if defined(A_SIMOS_DEVHOST)
3622 static void __qdf_nbuf_sync_single_for_cpu(
3623 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3624 {
3625 	return;
3626 }
3627 #else
3628 static void __qdf_nbuf_sync_single_for_cpu(
3629 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3630 {
3631 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3632 		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
3633 		return;
3634 	}
3635 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3636 		skb_end_offset(buf) - skb_headroom(buf),
3637 		__qdf_dma_dir_to_os(dir));
3638 }
3639 #endif
3640 /**
3641  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3642  * @osdev: os device
3643  * @skb: sk buff
3644  * @dir: direction
3645  *
3646  * Return: none
3647  */
3648 void
3649 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3650 	struct sk_buff *skb, qdf_dma_dir_t dir)
3651 {
3652 	qdf_assert(
3653 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3654 
3655 	/*
3656 	 * Assume there's a single fragment.
3657 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3658 	 */
3659 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3660 }
3661 EXPORT_SYMBOL(__qdf_nbuf_sync_for_cpu);
3662 
3663 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3664 /**
3665  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3666  * @rx_status: Pointer to rx_status.
3667  * @rtap_buf: Buf to which VHT info has to be updated.
3668  * @rtap_len: Current length of radiotap buffer
3669  *
3670  * Return: Length of radiotap after VHT flags updated.
3671  */
3672 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3673 					struct mon_rx_status *rx_status,
3674 					int8_t *rtap_buf,
3675 					uint32_t rtap_len)
3676 {
3677 	uint16_t vht_flags = 0;
3678 
3679 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3680 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3681 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3682 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3683 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3684 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3685 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3686 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3687 	rtap_len += 2;
3688 
3689 	rtap_buf[rtap_len] |=
3690 		(rx_status->is_stbc ?
3691 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3692 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3693 		(rx_status->ldpc ?
3694 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3695 		(rx_status->beamformed ?
3696 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3697 	rtap_len += 1;
3698 	switch (rx_status->vht_flag_values2) {
3699 	case IEEE80211_RADIOTAP_VHT_BW_20:
3700 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3701 		break;
3702 	case IEEE80211_RADIOTAP_VHT_BW_40:
3703 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3704 		break;
3705 	case IEEE80211_RADIOTAP_VHT_BW_80:
3706 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3707 		break;
3708 	case IEEE80211_RADIOTAP_VHT_BW_160:
3709 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3710 		break;
3711 	}
3712 	rtap_len += 1;
3713 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3714 	rtap_len += 1;
3715 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3716 	rtap_len += 1;
3717 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3718 	rtap_len += 1;
3719 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3720 	rtap_len += 1;
3721 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3722 	rtap_len += 1;
3723 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3724 	rtap_len += 1;
3725 	put_unaligned_le16(rx_status->vht_flag_values6,
3726 			   &rtap_buf[rtap_len]);
3727 	rtap_len += 2;
3728 
3729 	return rtap_len;
3730 }
3731 
3732 /**
3733  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3734  * @rx_status: Pointer to rx_status.
3735  * @rtap_buf: buffer to which radiotap has to be updated
3736  * @rtap_len: radiotap length
3737  *
3738  * API update high-efficiency (11ax) fields in the radiotap header
3739  *
3740  * Return: length of rtap_len updated.
3741  */
3742 static unsigned int
3743 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3744 				     int8_t *rtap_buf, uint32_t rtap_len)
3745 {
3746 	/*
3747 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3748 	 * Enable all "known" HE radiotap flags for now
3749 	 */
3750 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3751 	rtap_len += 2;
3752 
3753 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3754 	rtap_len += 2;
3755 
3756 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3757 	rtap_len += 2;
3758 
3759 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3760 	rtap_len += 2;
3761 
3762 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3763 	rtap_len += 2;
3764 
3765 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3766 	rtap_len += 2;
3767 
3768 	return rtap_len;
3769 }
3770 
3771 
3772 /**
3773  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3774  * @rx_status: Pointer to rx_status.
3775  * @rtap_buf: buffer to which radiotap has to be updated
3776  * @rtap_len: radiotap length
3777  *
3778  * API update HE-MU fields in the radiotap header
3779  *
3780  * Return: length of rtap_len updated.
3781  */
3782 static unsigned int
3783 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3784 				     int8_t *rtap_buf, uint32_t rtap_len)
3785 {
3786 	/*
3787 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3788 	 * Enable all "known" he-mu radiotap flags for now
3789 	 */
3790 	if (rx_status->he_sig_b_common_known &
3791 			QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU0) {
3792 		rtap_buf[rtap_len] = rx_status->he_RU[0];
3793 		rtap_len += 1;
3794 	}
3795 
3796 	if (rx_status->he_sig_b_common_known &
3797 			QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU1) {
3798 		rtap_buf[rtap_len] = rx_status->he_RU[1];
3799 		rtap_len += 1;
3800 	}
3801 	if (rx_status->he_sig_b_common_known &
3802 			QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU2) {
3803 		rtap_buf[rtap_len] = rx_status->he_RU[2];
3804 		rtap_len += 1;
3805 	}
3806 	if (rx_status->he_sig_b_common_known &
3807 			QDF_MON_STATUS_HE_SIG_B_COMMON_KNOWN_RU3) {
3808 		rtap_buf[rtap_len] = rx_status->he_RU[3];
3809 		rtap_len += 1;
3810 	}
3811 
3812 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3813 	rtap_len += 2;
3814 
3815 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3816 	rtap_len += 2;
3817 
3818 	return rtap_len;
3819 }
3820 
3821 /**
3822  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3823  * @rx_status: Pointer to rx_status.
3824  * @rtap_buf: buffer to which radiotap has to be updated
3825  * @rtap_len: radiotap length
3826  *
3827  * API update he-mu-other fields in the radiotap header
3828  *
3829  * Return: length of rtap_len updated.
3830  */
3831 static unsigned int
3832 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3833 				     int8_t *rtap_buf, uint32_t rtap_len)
3834 {
3835 	/*
3836 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3837 	 * Enable all "known" he-mu-other radiotap flags for now
3838 	 */
3839 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3840 	rtap_len += 2;
3841 
3842 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3843 	rtap_len += 2;
3844 
3845 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3846 	rtap_len += 1;
3847 
3848 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3849 	rtap_len += 1;
3850 
3851 	/* HE fields */
3852 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3853 	rtap_len += 2;
3854 
3855 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3856 	rtap_len += 2;
3857 
3858 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3859 	rtap_len += 2;
3860 
3861 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3862 	rtap_len += 2;
3863 
3864 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3865 	rtap_len += 2;
3866 
3867 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3868 	rtap_len += 2;
3869 
3870 	return rtap_len;
3871 }
3872 
3873 #define NORMALIZED_TO_NOISE_FLOOR (-96)
3874 
3875 /* This is the length for radiotap, combined length
3876  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3877  * cannot be more than available headroom_sz.
3878  * Max size current radiotap we are populating is less than 100 bytes,
3879  * increase this when we add more radiotap elements.
3880  */
3881 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + 100)
3882 
3883 #define IEEE80211_RADIOTAP_HE 23
3884 #define IEEE80211_RADIOTAP_HE_MU	24
3885 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3886 
3887 /**
3888  * radiotap_num_to_freq() - Get frequency from chan number
3889  * @chan_num - Input channel number
3890  *
3891  * Return - Channel frequency in Mhz
3892  */
3893 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3894 {
3895 	if (chan_num == CHANNEL_NUM_14)
3896 		return CHANNEL_FREQ_2484;
3897 	if (chan_num < CHANNEL_NUM_14)
3898 		return CHANNEL_FREQ_2407 +
3899 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3900 
3901 	if (chan_num < CHANNEL_NUM_27)
3902 		return CHANNEL_FREQ_2512 +
3903 			((chan_num - CHANNEL_NUM_15) *
3904 			 FREQ_MULTIPLIER_CONST_20MHZ);
3905 
3906 	if (chan_num > CHANNEL_NUM_182 &&
3907 			chan_num < CHANNEL_NUM_197)
3908 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3909 			CHANNEL_FREQ_4000);
3910 
3911 	return CHANNEL_FREQ_5000 +
3912 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3913 }
3914 /**
3915  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
3916  * @rx_status: Pointer to rx_status.
3917  * @nbuf:      nbuf pointer to which radiotap has to be updated
3918  * @headroom_sz: Available headroom size.
3919  *
3920  * Return: length of rtap_len updated.
3921  */
3922 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
3923 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
3924 {
3925 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
3926 	struct ieee80211_radiotap_header *rthdr =
3927 		(struct ieee80211_radiotap_header *)rtap_buf;
3928 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
3929 	uint32_t rtap_len = rtap_hdr_len;
3930 
3931 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
3932 	rthdr->it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
3933 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
3934 	rtap_len += 8;
3935 
3936 	/* IEEE80211_RADIOTAP_FLAGS u8 */
3937 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS);
3938 	rtap_buf[rtap_len] = rx_status->rtap_flags;
3939 	rtap_len += 1;
3940 
3941 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
3942 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
3943 	    !rx_status->he_flags) {
3944 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
3945 		rtap_buf[rtap_len] = rx_status->rate;
3946 	} else
3947 		rtap_buf[rtap_len] = 0;
3948 	rtap_len += 1;
3949 
3950 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
3951 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
3952 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
3953 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
3954 	rtap_len += 2;
3955 	/* Channel flags. */
3956 	if (rx_status->chan_num > CHANNEL_NUM_35)
3957 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
3958 	else
3959 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
3960 	if (rx_status->cck_flag)
3961 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
3962 	if (rx_status->ofdm_flag)
3963 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
3964 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
3965 	rtap_len += 2;
3966 
3967 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
3968 	 *					(dBm)
3969 	 */
3970 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
3971 	/*
3972 	 * rssi_comb is int dB, need to convert it to dBm.
3973 	 * normalize value to noise floor of -96 dBm
3974 	 */
3975 	rtap_buf[rtap_len] = rx_status->rssi_comb +
3976 		NORMALIZED_TO_NOISE_FLOOR;
3977 	rtap_len += 1;
3978 
3979 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
3980 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA);
3981 	rtap_buf[rtap_len] = rx_status->nr_ant;
3982 	rtap_len += 1;
3983 
3984 	if (rx_status->ht_flags) {
3985 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
3986 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
3987 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
3988 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
3989 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
3990 		rtap_len += 1;
3991 
3992 		if (rx_status->sgi)
3993 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
3994 		if (rx_status->bw)
3995 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
3996 		else
3997 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
3998 		rtap_len += 1;
3999 
4000 		rtap_buf[rtap_len] = rx_status->mcs;
4001 		rtap_len += 1;
4002 	}
4003 
4004 	if (rx_status->vht_flags) {
4005 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4006 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
4007 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4008 							      rtap_buf,
4009 							      rtap_len);
4010 	}
4011 
4012 	if (rx_status->he_flags) {
4013 		/* IEEE80211_RADIOTAP_HE */
4014 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
4015 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4016 								rtap_buf,
4017 								rtap_len);
4018 	}
4019 
4020 	if (rx_status->he_mu_flags) {
4021 		/* IEEE80211_RADIOTAP_HE-MU */
4022 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
4023 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4024 								rtap_buf,
4025 								rtap_len);
4026 	}
4027 
4028 	if (rx_status->he_mu_other_flags) {
4029 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4030 		rthdr->it_present |=
4031 			cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4032 		rtap_len =
4033 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4034 								rtap_buf,
4035 								rtap_len);
4036 	}
4037 
4038 	rthdr->it_len = cpu_to_le16(rtap_len);
4039 
4040 	if (headroom_sz < rtap_len) {
4041 		qdf_print("ERROR: not enough space to update radiotap\n");
4042 		return 0;
4043 	}
4044 	qdf_nbuf_push_head(nbuf, rtap_len);
4045 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4046 	return rtap_len;
4047 }
4048 #else
4049 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4050 					struct mon_rx_status *rx_status,
4051 					int8_t *rtap_buf,
4052 					uint32_t rtap_len)
4053 {
4054 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4055 	return 0;
4056 }
4057 
4058 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4059 				      int8_t *rtap_buf, uint32_t rtap_len)
4060 {
4061 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4062 	return 0;
4063 }
4064 
4065 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4066 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4067 {
4068 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4069 	return 0;
4070 }
4071 #endif
4072 qdf_export_symbol(qdf_nbuf_update_radiotap);
4073 
4074 /**
4075  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4076  * @cb_func_ptr: function pointer to the nbuf free callback
4077  *
4078  * This function registers a callback function for nbuf free.
4079  *
4080  * Return: none
4081  */
4082 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4083 {
4084 	nbuf_free_cb = cb_func_ptr;
4085 }
4086 
4087 /**
4088  * qdf_nbuf_classify_pkt() - classify packet
4089  * @skb - sk buff
4090  *
4091  * Return: none
4092  */
4093 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4094 {
4095 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4096 
4097 	/* check destination mac address is broadcast/multicast */
4098 	if (is_broadcast_ether_addr((uint8_t *)eh))
4099 		QDF_NBUF_CB_SET_BCAST(skb);
4100 	else if (is_multicast_ether_addr((uint8_t *)eh))
4101 		QDF_NBUF_CB_SET_MCAST(skb);
4102 
4103 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4104 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4105 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4106 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4107 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4108 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4109 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4110 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4111 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4112 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4113 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4114 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4115 }
4116 EXPORT_SYMBOL(qdf_nbuf_classify_pkt);
4117 
4118 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4119 {
4120 	qdf_nbuf_users_set(&nbuf->users, 1);
4121 	nbuf->data = nbuf->head + NET_SKB_PAD;
4122 	skb_reset_tail_pointer(nbuf);
4123 }
4124 EXPORT_SYMBOL(__qdf_nbuf_init);
4125 
4126 #ifdef WLAN_FEATURE_FASTPATH
4127 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4128 {
4129 	qdf_nbuf_users_set(&nbuf->users, 1);
4130 	nbuf->data = nbuf->head + NET_SKB_PAD;
4131 	skb_reset_tail_pointer(nbuf);
4132 }
4133 EXPORT_SYMBOL(qdf_nbuf_init_fast);
4134 #endif /* WLAN_FEATURE_FASTPATH */
4135 
4136 
4137 #ifdef QDF_NBUF_GLOBAL_COUNT
4138 #ifdef WLAN_DEBUGFS
4139 /**
4140  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4141  *
4142  * Return void
4143  */
4144 void __qdf_nbuf_mod_init(void)
4145 {
4146 	qdf_atomic_init(&nbuf_count);
4147 	qdf_debugfs_init();
4148 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4149 }
4150 
4151 /**
4152  * __qdf_nbuf_mod_init() - Unintialization routine for qdf_nuf
4153  *
4154  * Return void
4155  */
4156 void __qdf_nbuf_mod_exit(void)
4157 {
4158 	qdf_debugfs_exit();
4159 }
4160 
4161 #else
4162 
4163 void __qdf_nbuf_mod_init(void)
4164 {
4165 	qdf_atomic_init(&nbuf_count);
4166 }
4167 
4168 void __qdf_nbuf_mod_exit(void)
4169 {
4170 }
4171 
4172 #endif
4173 #endif
4174