xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_types.h>
32 #include <qdf_nbuf.h>
33 #include "qdf_flex_mem.h"
34 #include <qdf_mem.h>
35 #include <qdf_status.h>
36 #include <qdf_lock.h>
37 #include <qdf_trace.h>
38 #include <qdf_debugfs.h>
39 #include <net/ieee80211_radiotap.h>
40 #include <qdf_module.h>
41 #include <qdf_atomic.h>
42 #include <pld_common.h>
43 #include <qdf_module.h>
44 
45 #if defined(FEATURE_TSO)
46 #include <net/ipv6.h>
47 #include <linux/ipv6.h>
48 #include <linux/tcp.h>
49 #include <linux/if_vlan.h>
50 #include <linux/ip.h>
51 #endif /* FEATURE_TSO */
52 
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
54 
55 #define qdf_nbuf_users_inc atomic_inc
56 #define qdf_nbuf_users_dec atomic_dec
57 #define qdf_nbuf_users_set atomic_set
58 #define qdf_nbuf_users_read atomic_read
59 #else
60 #define qdf_nbuf_users_inc refcount_inc
61 #define qdf_nbuf_users_dec refcount_dec
62 #define qdf_nbuf_users_set refcount_set
63 #define qdf_nbuf_users_read refcount_read
64 #endif /* KERNEL_VERSION(4, 13, 0) */
65 
66 #define IEEE80211_RADIOTAP_VHT_BW_20	0
67 #define IEEE80211_RADIOTAP_VHT_BW_40	1
68 #define IEEE80211_RADIOTAP_VHT_BW_80	2
69 #define IEEE80211_RADIOTAP_VHT_BW_160	3
70 
71 #define RADIOTAP_VHT_BW_20	0
72 #define RADIOTAP_VHT_BW_40	1
73 #define RADIOTAP_VHT_BW_80	4
74 #define RADIOTAP_VHT_BW_160	11
75 
76 /* channel number to freq conversion */
77 #define CHANNEL_NUM_14 14
78 #define CHANNEL_NUM_15 15
79 #define CHANNEL_NUM_27 27
80 #define CHANNEL_NUM_35 35
81 #define CHANNEL_NUM_182 182
82 #define CHANNEL_NUM_197 197
83 #define CHANNEL_FREQ_2484 2484
84 #define CHANNEL_FREQ_2407 2407
85 #define CHANNEL_FREQ_2512 2512
86 #define CHANNEL_FREQ_5000 5000
87 #define CHANNEL_FREQ_4000 4000
88 #define FREQ_MULTIPLIER_CONST_5MHZ 5
89 #define FREQ_MULTIPLIER_CONST_20MHZ 20
90 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
91 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
92 #define RADIOTAP_CCK_CHANNEL 0x0020
93 #define RADIOTAP_OFDM_CHANNEL 0x0040
94 
95 #ifdef CONFIG_MCL
96 #include <qdf_mc_timer.h>
97 
98 struct qdf_track_timer {
99 	qdf_mc_timer_t track_timer;
100 	qdf_atomic_t alloc_fail_cnt;
101 };
102 
103 static struct qdf_track_timer alloc_track_timer;
104 
105 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
106 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
107 #endif
108 
109 /* Packet Counter */
110 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
111 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
112 #ifdef QDF_NBUF_GLOBAL_COUNT
113 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
114 static qdf_atomic_t nbuf_count;
115 #endif
116 
117 /**
118  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
119  *
120  * Return: none
121  */
122 void qdf_nbuf_tx_desc_count_display(void)
123 {
124 	qdf_print("Current Snapshot of the Driver:\n");
125 	qdf_print("Data Packets:\n");
126 	qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
127 		nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
128 		(nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
129 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
130 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
131 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
132 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
133 		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
134 			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
135 		nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
136 			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
137 	qdf_print(" HTC %d  HIF %d CE %d TX_COMP %d\n",
138 		nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
139 			nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
140 		nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
141 			 nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
142 		nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
143 			 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
144 		nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
145 	qdf_print("Mgmt Packets:\n");
146 	qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
147 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
148 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
149 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
150 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
151 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
152 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
153 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
154 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
155 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
156 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
157 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
158 			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
159 		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
160 }
161 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
162 
163 /**
164  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
165  * @packet_type   : packet type either mgmt/data
166  * @current_state : layer at which the packet currently present
167  *
168  * Return: none
169  */
170 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
171 			uint8_t current_state)
172 {
173 	switch (packet_type) {
174 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
175 		nbuf_tx_mgmt[current_state]++;
176 		break;
177 	case QDF_NBUF_TX_PKT_DATA_TRACK:
178 		nbuf_tx_data[current_state]++;
179 		break;
180 	default:
181 		break;
182 	}
183 }
184 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
185 
186 /**
187  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
188  *
189  * Return: none
190  */
191 void qdf_nbuf_tx_desc_count_clear(void)
192 {
193 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
194 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
195 }
196 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
197 
198 /**
199  * qdf_nbuf_set_state() - Updates the packet state
200  * @nbuf:            network buffer
201  * @current_state :  layer at which the packet currently is
202  *
203  * This function updates the packet state to the layer at which the packet
204  * currently is
205  *
206  * Return: none
207  */
208 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
209 {
210 	/*
211 	 * Only Mgmt, Data Packets are tracked. WMI messages
212 	 * such as scan commands are not tracked
213 	 */
214 	uint8_t packet_type;
215 
216 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
217 
218 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
219 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
220 		return;
221 	}
222 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
223 	qdf_nbuf_tx_desc_count_update(packet_type,
224 					current_state);
225 }
226 qdf_export_symbol(qdf_nbuf_set_state);
227 
228 #ifdef CONFIG_MCL
229 /**
230  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
231  *
232  * This function starts the alloc fail replenish timer.
233  *
234  * Return: void
235  */
236 static void __qdf_nbuf_start_replenish_timer(void)
237 {
238 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
239 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
240 	    QDF_TIMER_STATE_RUNNING)
241 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
242 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
243 }
244 
245 /**
246  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
247  *
248  * This function stops the alloc fail replenish timer.
249  *
250  * Return: void
251  */
252 static void __qdf_nbuf_stop_replenish_timer(void)
253 {
254 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
255 		return;
256 
257 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
258 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
259 	    QDF_TIMER_STATE_RUNNING)
260 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
261 }
262 
263 /**
264  * qdf_replenish_expire_handler - Replenish expire handler
265  *
266  * This function triggers when the alloc fail replenish timer expires.
267  *
268  * Return: void
269  */
270 static void qdf_replenish_expire_handler(void *arg)
271 {
272 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
273 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
274 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
275 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
276 
277 		/* Error handling here */
278 	}
279 }
280 
281 /**
282  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
283  *
284  * This function initializes the nbuf alloc fail replenish timer.
285  *
286  * Return: void
287  */
288 void __qdf_nbuf_init_replenish_timer(void)
289 {
290 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
291 			  qdf_replenish_expire_handler, NULL);
292 }
293 
294 /**
295  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
296  *
297  * This function deinitializes the nbuf alloc fail replenish timer.
298  *
299  * Return: void
300  */
301 void __qdf_nbuf_deinit_replenish_timer(void)
302 {
303 	__qdf_nbuf_stop_replenish_timer();
304 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
305 }
306 #else
307 
308 static inline void __qdf_nbuf_start_replenish_timer(void) {}
309 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
310 #endif
311 
312 /* globals do not need to be initialized to NULL/0 */
313 qdf_nbuf_trace_update_t qdf_trace_update_cb;
314 qdf_nbuf_free_t nbuf_free_cb;
315 
316 #ifdef QDF_NBUF_GLOBAL_COUNT
317 
318 /**
319  * __qdf_nbuf_count_get() - get nbuf global count
320  *
321  * Return: nbuf global count
322  */
323 int __qdf_nbuf_count_get(void)
324 {
325 	return qdf_atomic_read(&nbuf_count);
326 }
327 qdf_export_symbol(__qdf_nbuf_count_get);
328 
329 /**
330  * __qdf_nbuf_count_inc() - increment nbuf global count
331  *
332  * @buf: sk buff
333  *
334  * Return: void
335  */
336 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
337 {
338 	qdf_atomic_inc(&nbuf_count);
339 }
340 qdf_export_symbol(__qdf_nbuf_count_inc);
341 
342 /**
343  * __qdf_nbuf_count_dec() - decrement nbuf global count
344  *
345  * @buf: sk buff
346  *
347  * Return: void
348  */
349 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
350 {
351 	qdf_atomic_dec(&nbuf_count);
352 }
353 qdf_export_symbol(__qdf_nbuf_count_dec);
354 #endif
355 
356 
357 /**
358  * __qdf_nbuf_alloc() - Allocate nbuf
359  * @hdl: Device handle
360  * @size: Netbuf requested size
361  * @reserve: headroom to start with
362  * @align: Align
363  * @prio: Priority
364  *
365  * This allocates an nbuf aligns if needed and reserves some space in the front,
366  * since the reserve is done after alignment the reserve value if being
367  * unaligned will result in an unaligned address.
368  *
369  * Return: nbuf or %NULL if no memory
370  */
371 #if defined(QCA_WIFI_QCA8074) && defined (BUILD_X86)
372 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
373 			 int align, int prio)
374 {
375 	struct sk_buff *skb;
376 	unsigned long offset;
377 	uint32_t lowmem_alloc_tries = 0;
378 
379 	if (align)
380 		size += (align - 1);
381 
382 realloc:
383 	skb = dev_alloc_skb(size);
384 
385 	if (skb)
386 		goto skb_alloc;
387 
388 	skb = pld_nbuf_pre_alloc(size);
389 
390 	if (!skb) {
391 		pr_info("ERROR:NBUF alloc failed\n");
392 		return NULL;
393 	}
394 
395 skb_alloc:
396 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
397 	 * Though we are trying to reserve low memory upfront to prevent this,
398 	 * we sometimes see SKBs allocated from low memory.
399 	 */
400 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
401 		lowmem_alloc_tries++;
402 		if (lowmem_alloc_tries > 100) {
403 			qdf_print("%s Failed \n",__func__);
404 			return NULL;
405 		} else {
406 			/* Not freeing to make sure it
407 			 * will not get allocated again
408 			 */
409 			goto realloc;
410 		}
411 	}
412 	memset(skb->cb, 0x0, sizeof(skb->cb));
413 
414 	/*
415 	 * The default is for netbuf fragments to be interpreted
416 	 * as wordstreams rather than bytestreams.
417 	 */
418 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
419 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
420 
421 	/*
422 	 * XXX:how about we reserve first then align
423 	 * Align & make sure that the tail & data are adjusted properly
424 	 */
425 
426 	if (align) {
427 		offset = ((unsigned long)skb->data) % align;
428 		if (offset)
429 			skb_reserve(skb, align - offset);
430 	}
431 
432 	/*
433 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
434 	 * pointer
435 	 */
436 	skb_reserve(skb, reserve);
437 	qdf_nbuf_count_inc(skb);
438 
439 	return skb;
440 }
441 #else
442 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
443 			 int align, int prio)
444 {
445 	struct sk_buff *skb;
446 	unsigned long offset;
447 	int flags = GFP_KERNEL;
448 
449 	if (align)
450 		size += (align - 1);
451 
452 	if (in_interrupt() || irqs_disabled() || in_atomic())
453 		flags = GFP_ATOMIC;
454 
455 	skb = __netdev_alloc_skb(NULL, size, flags);
456 
457 	if (skb)
458 		goto skb_alloc;
459 
460 	skb = pld_nbuf_pre_alloc(size);
461 
462 	if (!skb) {
463 		pr_err_ratelimited("ERROR:NBUF alloc failed, size = %zu\n",
464 				   size);
465 		__qdf_nbuf_start_replenish_timer();
466 		return NULL;
467 	} else {
468 		__qdf_nbuf_stop_replenish_timer();
469 	}
470 
471 skb_alloc:
472 	memset(skb->cb, 0x0, sizeof(skb->cb));
473 
474 	/*
475 	 * The default is for netbuf fragments to be interpreted
476 	 * as wordstreams rather than bytestreams.
477 	 */
478 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
479 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
480 
481 	/*
482 	 * XXX:how about we reserve first then align
483 	 * Align & make sure that the tail & data are adjusted properly
484 	 */
485 
486 	if (align) {
487 		offset = ((unsigned long)skb->data) % align;
488 		if (offset)
489 			skb_reserve(skb, align - offset);
490 	}
491 
492 	/*
493 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
494 	 * pointer
495 	 */
496 	skb_reserve(skb, reserve);
497 	qdf_nbuf_count_inc(skb);
498 
499 	return skb;
500 }
501 #endif
502 qdf_export_symbol(__qdf_nbuf_alloc);
503 
504 /**
505  * __qdf_nbuf_free() - free the nbuf its interrupt safe
506  * @skb: Pointer to network buffer
507  *
508  * Return: none
509  */
510 
511 #ifdef CONFIG_MCL
512 void __qdf_nbuf_free(struct sk_buff *skb)
513 {
514 	if (pld_nbuf_pre_alloc_free(skb))
515 		return;
516 
517 	qdf_nbuf_count_dec(skb);
518 	if (nbuf_free_cb)
519 		nbuf_free_cb(skb);
520 	else
521 		dev_kfree_skb_any(skb);
522 }
523 #else
524 void __qdf_nbuf_free(struct sk_buff *skb)
525 {
526 	if (pld_nbuf_pre_alloc_free(skb))
527 		return;
528 
529 	qdf_nbuf_count_dec(skb);
530 	dev_kfree_skb_any(skb);
531 }
532 #endif
533 
534 qdf_export_symbol(__qdf_nbuf_free);
535 
536 #ifdef NBUF_MEMORY_DEBUG
537 enum qdf_nbuf_event_type {
538 	QDF_NBUF_ALLOC,
539 	QDF_NBUF_FREE,
540 	QDF_NBUF_MAP,
541 	QDF_NBUF_UNMAP,
542 };
543 
544 struct qdf_nbuf_event {
545 	qdf_nbuf_t nbuf;
546 	const char *file;
547 	uint32_t line;
548 	enum qdf_nbuf_event_type type;
549 	uint64_t timestamp;
550 };
551 
552 #define QDF_NBUF_HISTORY_SIZE 4096
553 static qdf_atomic_t qdf_nbuf_history_index;
554 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
555 
556 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
557 {
558 	int32_t next = qdf_atomic_inc_return(index);
559 
560 	if (next == size)
561 		qdf_atomic_sub(size, index);
562 
563 	return next % size;
564 }
565 
566 static void
567 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *file, uint32_t line,
568 		     enum qdf_nbuf_event_type type)
569 {
570 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
571 						   QDF_NBUF_HISTORY_SIZE);
572 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
573 
574 	event->nbuf = nbuf;
575 	event->file = file;
576 	event->line = line;
577 	event->type = type;
578 	event->timestamp = qdf_get_log_timestamp();
579 }
580 
581 struct qdf_nbuf_map_metadata {
582 	struct hlist_node node;
583 	qdf_nbuf_t nbuf;
584 	const char *file;
585 	uint32_t line;
586 };
587 
588 DEFINE_QDF_FLEX_MEM_POOL(qdf_nbuf_map_pool,
589 			 sizeof(struct qdf_nbuf_map_metadata), 0);
590 #define QDF_NBUF_MAP_HT_BITS 10 /* 1024 buckets */
591 static DECLARE_HASHTABLE(qdf_nbuf_map_ht, QDF_NBUF_MAP_HT_BITS);
592 static qdf_spinlock_t qdf_nbuf_map_lock;
593 
594 static void qdf_nbuf_map_tracking_init(void)
595 {
596 	qdf_flex_mem_init(&qdf_nbuf_map_pool);
597 	hash_init(qdf_nbuf_map_ht);
598 	qdf_spinlock_create(&qdf_nbuf_map_lock);
599 }
600 
601 void qdf_nbuf_map_check_for_leaks(void)
602 {
603 	struct qdf_nbuf_map_metadata *meta;
604 	int bucket;
605 	uint32_t count = 0;
606 	bool is_empty;
607 
608 	qdf_flex_mem_release(&qdf_nbuf_map_pool);
609 
610 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
611 	is_empty = hash_empty(qdf_nbuf_map_ht);
612 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
613 
614 	if (is_empty)
615 		return;
616 
617 	qdf_err("Nbuf map without unmap events detected!");
618 	qdf_err("------------------------------------------------------------");
619 
620 	/* Hold the lock for the entire iteration for safe list/meta access. We
621 	 * are explicitly preferring the chance to watchdog on the print, over
622 	 * the posibility of invalid list/memory access. Since we are going to
623 	 * panic anyway, the worst case is loading up the crash dump to find out
624 	 * what was in the hash table.
625 	 */
626 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
627 	hash_for_each(qdf_nbuf_map_ht, bucket, meta, node) {
628 		count++;
629 		qdf_err("0x%pk @ %s:%u",
630 			meta->nbuf, kbasename(meta->file), meta->line);
631 	}
632 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
633 
634 	panic("%u fatal nbuf map without unmap events detected!", count);
635 }
636 
637 static void qdf_nbuf_map_tracking_deinit(void)
638 {
639 	qdf_nbuf_map_check_for_leaks();
640 	qdf_spinlock_destroy(&qdf_nbuf_map_lock);
641 	qdf_flex_mem_deinit(&qdf_nbuf_map_pool);
642 }
643 
644 static struct qdf_nbuf_map_metadata *qdf_nbuf_meta_get(qdf_nbuf_t nbuf)
645 {
646 	struct qdf_nbuf_map_metadata *meta;
647 
648 	hash_for_each_possible(qdf_nbuf_map_ht, meta, node, (size_t)nbuf) {
649 		if (meta->nbuf == nbuf)
650 			return meta;
651 	}
652 
653 	return NULL;
654 }
655 
656 static QDF_STATUS
657 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
658 {
659 	struct qdf_nbuf_map_metadata *meta;
660 
661 	QDF_BUG(nbuf);
662 	if (!nbuf) {
663 		qdf_err("Cannot map null nbuf");
664 		return QDF_STATUS_E_INVAL;
665 	}
666 
667 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
668 	meta = qdf_nbuf_meta_get(nbuf);
669 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
670 	if (meta)
671 		panic("Double nbuf map detected @ %s:%u",
672 		      kbasename(file), line);
673 
674 	meta = qdf_flex_mem_alloc(&qdf_nbuf_map_pool);
675 	if (!meta) {
676 		qdf_err("Failed to allocate nbuf map tracking metadata");
677 		return QDF_STATUS_E_NOMEM;
678 	}
679 
680 	meta->nbuf = nbuf;
681 	meta->file = file;
682 	meta->line = line;
683 
684 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
685 	hash_add(qdf_nbuf_map_ht, &meta->node, (size_t)nbuf);
686 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
687 
688 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_MAP);
689 
690 	return QDF_STATUS_SUCCESS;
691 }
692 
693 static void
694 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *file, uint32_t line)
695 {
696 	struct qdf_nbuf_map_metadata *meta;
697 
698 	QDF_BUG(nbuf);
699 	if (!nbuf) {
700 		qdf_err("Cannot unmap null nbuf");
701 		return;
702 	}
703 
704 	qdf_spin_lock_irqsave(&qdf_nbuf_map_lock);
705 	meta = qdf_nbuf_meta_get(nbuf);
706 
707 	if (!meta)
708 		panic("Double nbuf unmap or unmap without map detected @%s:%u",
709 		      kbasename(file), line);
710 
711 	hash_del(&meta->node);
712 	qdf_spin_unlock_irqrestore(&qdf_nbuf_map_lock);
713 
714 	qdf_flex_mem_free(&qdf_nbuf_map_pool, meta);
715 
716 	qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_UNMAP);
717 }
718 
719 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
720 			      qdf_nbuf_t buf,
721 			      qdf_dma_dir_t dir,
722 			      const char *file,
723 			      uint32_t line)
724 {
725 	QDF_STATUS status;
726 
727 	status = qdf_nbuf_track_map(buf, file, line);
728 	if (QDF_IS_STATUS_ERROR(status))
729 		return status;
730 
731 	status = __qdf_nbuf_map(osdev, buf, dir);
732 	if (QDF_IS_STATUS_ERROR(status))
733 		qdf_nbuf_untrack_map(buf, file, line);
734 
735 	return status;
736 }
737 
738 qdf_export_symbol(qdf_nbuf_map_debug);
739 
740 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
741 			  qdf_nbuf_t buf,
742 			  qdf_dma_dir_t dir,
743 			  const char *file,
744 			  uint32_t line)
745 {
746 	qdf_nbuf_untrack_map(buf, file, line);
747 	__qdf_nbuf_unmap_single(osdev, buf, dir);
748 }
749 
750 qdf_export_symbol(qdf_nbuf_unmap_debug);
751 
752 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
753 				     qdf_nbuf_t buf,
754 				     qdf_dma_dir_t dir,
755 				     const char *file,
756 				     uint32_t line)
757 {
758 	QDF_STATUS status;
759 
760 	status = qdf_nbuf_track_map(buf, file, line);
761 	if (QDF_IS_STATUS_ERROR(status))
762 		return status;
763 
764 	status = __qdf_nbuf_map_single(osdev, buf, dir);
765 	if (QDF_IS_STATUS_ERROR(status))
766 		qdf_nbuf_untrack_map(buf, file, line);
767 
768 	return status;
769 }
770 
771 qdf_export_symbol(qdf_nbuf_map_single_debug);
772 
773 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
774 				 qdf_nbuf_t buf,
775 				 qdf_dma_dir_t dir,
776 				 const char *file,
777 				 uint32_t line)
778 {
779 	qdf_nbuf_untrack_map(buf, file, line);
780 	__qdf_nbuf_unmap_single(osdev, buf, dir);
781 }
782 
783 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
784 
785 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
786 				     qdf_nbuf_t buf,
787 				     qdf_dma_dir_t dir,
788 				     int nbytes,
789 				     const char *file,
790 				     uint32_t line)
791 {
792 	QDF_STATUS status;
793 
794 	status = qdf_nbuf_track_map(buf, file, line);
795 	if (QDF_IS_STATUS_ERROR(status))
796 		return status;
797 
798 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
799 	if (QDF_IS_STATUS_ERROR(status))
800 		qdf_nbuf_untrack_map(buf, file, line);
801 
802 	return status;
803 }
804 
805 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
806 
807 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
808 				 qdf_nbuf_t buf,
809 				 qdf_dma_dir_t dir,
810 				 int nbytes,
811 				 const char *file,
812 				 uint32_t line)
813 {
814 	qdf_nbuf_untrack_map(buf, file, line);
815 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
816 }
817 
818 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
819 
820 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
821 					    qdf_nbuf_t buf,
822 					    qdf_dma_dir_t dir,
823 					    int nbytes,
824 					    const char *file,
825 					    uint32_t line)
826 {
827 	QDF_STATUS status;
828 
829 	status = qdf_nbuf_track_map(buf, file, line);
830 	if (QDF_IS_STATUS_ERROR(status))
831 		return status;
832 
833 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
834 	if (QDF_IS_STATUS_ERROR(status))
835 		qdf_nbuf_untrack_map(buf, file, line);
836 
837 	return status;
838 }
839 
840 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
841 
842 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
843 					qdf_nbuf_t buf,
844 					qdf_dma_dir_t dir,
845 					int nbytes,
846 					const char *file,
847 					uint32_t line)
848 {
849 	qdf_nbuf_untrack_map(buf, file, line);
850 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
851 }
852 
853 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
854 #endif /* NBUF_MEMORY_DEBUG */
855 
856 /**
857  * __qdf_nbuf_map() - map a buffer to local bus address space
858  * @osdev: OS device
859  * @bmap: Bitmap
860  * @skb: Pointer to network buffer
861  * @dir: Direction
862  *
863  * Return: QDF_STATUS
864  */
865 #ifdef QDF_OS_DEBUG
866 QDF_STATUS
867 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
868 {
869 	struct skb_shared_info *sh = skb_shinfo(skb);
870 
871 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
872 			|| (dir == QDF_DMA_FROM_DEVICE));
873 
874 	/*
875 	 * Assume there's only a single fragment.
876 	 * To support multiple fragments, it would be necessary to change
877 	 * qdf_nbuf_t to be a separate object that stores meta-info
878 	 * (including the bus address for each fragment) and a pointer
879 	 * to the underlying sk_buff.
880 	 */
881 	qdf_assert(sh->nr_frags == 0);
882 
883 	return __qdf_nbuf_map_single(osdev, skb, dir);
884 }
885 qdf_export_symbol(__qdf_nbuf_map);
886 
887 #else
888 QDF_STATUS
889 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
890 {
891 	return __qdf_nbuf_map_single(osdev, skb, dir);
892 }
893 qdf_export_symbol(__qdf_nbuf_map);
894 #endif
895 /**
896  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
897  * @osdev: OS device
898  * @skb: Pointer to network buffer
899  * @dir: dma direction
900  *
901  * Return: none
902  */
903 void
904 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
905 			qdf_dma_dir_t dir)
906 {
907 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
908 		   || (dir == QDF_DMA_FROM_DEVICE));
909 
910 	/*
911 	 * Assume there's a single fragment.
912 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
913 	 */
914 	__qdf_nbuf_unmap_single(osdev, skb, dir);
915 }
916 qdf_export_symbol(__qdf_nbuf_unmap);
917 
918 /**
919  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
920  * @osdev: OS device
921  * @skb: Pointer to network buffer
922  * @dir: Direction
923  *
924  * Return: QDF_STATUS
925  */
926 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
927 QDF_STATUS
928 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
929 {
930 	qdf_dma_addr_t paddr;
931 
932 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
933 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
934 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
935 	return QDF_STATUS_SUCCESS;
936 }
937 qdf_export_symbol(__qdf_nbuf_map_single);
938 #else
939 QDF_STATUS
940 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
941 {
942 	qdf_dma_addr_t paddr;
943 
944 	/* assume that the OS only provides a single fragment */
945 	QDF_NBUF_CB_PADDR(buf) = paddr =
946 		dma_map_single(osdev->dev, buf->data,
947 				skb_end_pointer(buf) - buf->data,
948 				__qdf_dma_dir_to_os(dir));
949 	return dma_mapping_error(osdev->dev, paddr)
950 		? QDF_STATUS_E_FAILURE
951 		: QDF_STATUS_SUCCESS;
952 }
953 qdf_export_symbol(__qdf_nbuf_map_single);
954 #endif
955 /**
956  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
957  * @osdev: OS device
958  * @skb: Pointer to network buffer
959  * @dir: Direction
960  *
961  * Return: none
962  */
963 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
964 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
965 				qdf_dma_dir_t dir)
966 {
967 }
968 #else
969 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
970 					qdf_dma_dir_t dir)
971 {
972 	if (QDF_NBUF_CB_PADDR(buf))
973 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
974 			skb_end_pointer(buf) - buf->data,
975 			__qdf_dma_dir_to_os(dir));
976 }
977 #endif
978 qdf_export_symbol(__qdf_nbuf_unmap_single);
979 
980 /**
981  * __qdf_nbuf_set_rx_cksum() - set rx checksum
982  * @skb: Pointer to network buffer
983  * @cksum: Pointer to checksum value
984  *
985  * Return: QDF_STATUS
986  */
987 QDF_STATUS
988 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
989 {
990 	switch (cksum->l4_result) {
991 	case QDF_NBUF_RX_CKSUM_NONE:
992 		skb->ip_summed = CHECKSUM_NONE;
993 		break;
994 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
995 		skb->ip_summed = CHECKSUM_UNNECESSARY;
996 		break;
997 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
998 		skb->ip_summed = CHECKSUM_PARTIAL;
999 		skb->csum = cksum->val;
1000 		break;
1001 	default:
1002 		pr_err("Unknown checksum type\n");
1003 		qdf_assert(0);
1004 		return QDF_STATUS_E_NOSUPPORT;
1005 	}
1006 	return QDF_STATUS_SUCCESS;
1007 }
1008 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1009 
1010 /**
1011  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1012  * @skb: Pointer to network buffer
1013  *
1014  * Return: TX checksum value
1015  */
1016 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1017 {
1018 	switch (skb->ip_summed) {
1019 	case CHECKSUM_NONE:
1020 		return QDF_NBUF_TX_CKSUM_NONE;
1021 	case CHECKSUM_PARTIAL:
1022 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1023 	case CHECKSUM_COMPLETE:
1024 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1025 	default:
1026 		return QDF_NBUF_TX_CKSUM_NONE;
1027 	}
1028 }
1029 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1030 
1031 /**
1032  * __qdf_nbuf_get_tid() - get tid
1033  * @skb: Pointer to network buffer
1034  *
1035  * Return: tid
1036  */
1037 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1038 {
1039 	return skb->priority;
1040 }
1041 qdf_export_symbol(__qdf_nbuf_get_tid);
1042 
1043 /**
1044  * __qdf_nbuf_set_tid() - set tid
1045  * @skb: Pointer to network buffer
1046  *
1047  * Return: none
1048  */
1049 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1050 {
1051 	skb->priority = tid;
1052 }
1053 qdf_export_symbol(__qdf_nbuf_set_tid);
1054 
1055 /**
1056  * __qdf_nbuf_set_tid() - set tid
1057  * @skb: Pointer to network buffer
1058  *
1059  * Return: none
1060  */
1061 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1062 {
1063 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1064 }
1065 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1066 
1067 /**
1068  * __qdf_nbuf_reg_trace_cb() - register trace callback
1069  * @cb_func_ptr: Pointer to trace callback function
1070  *
1071  * Return: none
1072  */
1073 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1074 {
1075 	qdf_trace_update_cb = cb_func_ptr;
1076 }
1077 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1078 
1079 /**
1080  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1081  *              of DHCP packet.
1082  * @data: Pointer to DHCP packet data buffer
1083  *
1084  * This func. returns the subtype of DHCP packet.
1085  *
1086  * Return: subtype of the DHCP packet.
1087  */
1088 enum qdf_proto_subtype
1089 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1090 {
1091 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1092 
1093 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1094 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1095 					QDF_DHCP_OPTION53_LENGTH)) {
1096 
1097 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1098 		case QDF_DHCP_DISCOVER:
1099 			subtype = QDF_PROTO_DHCP_DISCOVER;
1100 			break;
1101 		case QDF_DHCP_REQUEST:
1102 			subtype = QDF_PROTO_DHCP_REQUEST;
1103 			break;
1104 		case QDF_DHCP_OFFER:
1105 			subtype = QDF_PROTO_DHCP_OFFER;
1106 			break;
1107 		case QDF_DHCP_ACK:
1108 			subtype = QDF_PROTO_DHCP_ACK;
1109 			break;
1110 		case QDF_DHCP_NAK:
1111 			subtype = QDF_PROTO_DHCP_NACK;
1112 			break;
1113 		case QDF_DHCP_RELEASE:
1114 			subtype = QDF_PROTO_DHCP_RELEASE;
1115 			break;
1116 		case QDF_DHCP_INFORM:
1117 			subtype = QDF_PROTO_DHCP_INFORM;
1118 			break;
1119 		case QDF_DHCP_DECLINE:
1120 			subtype = QDF_PROTO_DHCP_DECLINE;
1121 			break;
1122 		default:
1123 			break;
1124 		}
1125 	}
1126 
1127 	return subtype;
1128 }
1129 
1130 /**
1131  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1132  *            of EAPOL packet.
1133  * @data: Pointer to EAPOL packet data buffer
1134  *
1135  * This func. returns the subtype of EAPOL packet.
1136  *
1137  * Return: subtype of the EAPOL packet.
1138  */
1139 enum qdf_proto_subtype
1140 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1141 {
1142 	uint16_t eapol_key_info;
1143 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1144 	uint16_t mask;
1145 
1146 	eapol_key_info = (uint16_t)(*(uint16_t *)
1147 			(data + EAPOL_KEY_INFO_OFFSET));
1148 
1149 	mask = eapol_key_info & EAPOL_MASK;
1150 	switch (mask) {
1151 	case EAPOL_M1_BIT_MASK:
1152 		subtype = QDF_PROTO_EAPOL_M1;
1153 		break;
1154 	case EAPOL_M2_BIT_MASK:
1155 		subtype = QDF_PROTO_EAPOL_M2;
1156 		break;
1157 	case EAPOL_M3_BIT_MASK:
1158 		subtype = QDF_PROTO_EAPOL_M3;
1159 		break;
1160 	case EAPOL_M4_BIT_MASK:
1161 		subtype = QDF_PROTO_EAPOL_M4;
1162 		break;
1163 	default:
1164 		break;
1165 	}
1166 
1167 	return subtype;
1168 }
1169 
1170 /**
1171  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1172  *            of ARP packet.
1173  * @data: Pointer to ARP packet data buffer
1174  *
1175  * This func. returns the subtype of ARP packet.
1176  *
1177  * Return: subtype of the ARP packet.
1178  */
1179 enum qdf_proto_subtype
1180 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1181 {
1182 	uint16_t subtype;
1183 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1184 
1185 	subtype = (uint16_t)(*(uint16_t *)
1186 			(data + ARP_SUB_TYPE_OFFSET));
1187 
1188 	switch (QDF_SWAP_U16(subtype)) {
1189 	case ARP_REQUEST:
1190 		proto_subtype = QDF_PROTO_ARP_REQ;
1191 		break;
1192 	case ARP_RESPONSE:
1193 		proto_subtype = QDF_PROTO_ARP_RES;
1194 		break;
1195 	default:
1196 		break;
1197 	}
1198 
1199 	return proto_subtype;
1200 }
1201 
1202 /**
1203  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1204  *            of IPV4 ICMP packet.
1205  * @data: Pointer to IPV4 ICMP packet data buffer
1206  *
1207  * This func. returns the subtype of ICMP packet.
1208  *
1209  * Return: subtype of the ICMP packet.
1210  */
1211 enum qdf_proto_subtype
1212 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1213 {
1214 	uint8_t subtype;
1215 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1216 
1217 	subtype = (uint8_t)(*(uint8_t *)
1218 			(data + ICMP_SUBTYPE_OFFSET));
1219 
1220 	switch (subtype) {
1221 	case ICMP_REQUEST:
1222 		proto_subtype = QDF_PROTO_ICMP_REQ;
1223 		break;
1224 	case ICMP_RESPONSE:
1225 		proto_subtype = QDF_PROTO_ICMP_RES;
1226 		break;
1227 	default:
1228 		break;
1229 	}
1230 
1231 	return proto_subtype;
1232 }
1233 
1234 /**
1235  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1236  *            of IPV6 ICMPV6 packet.
1237  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1238  *
1239  * This func. returns the subtype of ICMPV6 packet.
1240  *
1241  * Return: subtype of the ICMPV6 packet.
1242  */
1243 enum qdf_proto_subtype
1244 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1245 {
1246 	uint8_t subtype;
1247 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1248 
1249 	subtype = (uint8_t)(*(uint8_t *)
1250 			(data + ICMPV6_SUBTYPE_OFFSET));
1251 
1252 	switch (subtype) {
1253 	case ICMPV6_REQUEST:
1254 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1255 		break;
1256 	case ICMPV6_RESPONSE:
1257 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1258 		break;
1259 	case ICMPV6_RS:
1260 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1261 		break;
1262 	case ICMPV6_RA:
1263 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1264 		break;
1265 	case ICMPV6_NS:
1266 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1267 		break;
1268 	case ICMPV6_NA:
1269 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1270 		break;
1271 	default:
1272 		break;
1273 	}
1274 
1275 	return proto_subtype;
1276 }
1277 
1278 /**
1279  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1280  *            of IPV4 packet.
1281  * @data: Pointer to IPV4 packet data buffer
1282  *
1283  * This func. returns the proto type of IPV4 packet.
1284  *
1285  * Return: proto type of IPV4 packet.
1286  */
1287 uint8_t
1288 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1289 {
1290 	uint8_t proto_type;
1291 
1292 	proto_type = (uint8_t)(*(uint8_t *)(data +
1293 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1294 	return proto_type;
1295 }
1296 
1297 /**
1298  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1299  *            of IPV6 packet.
1300  * @data: Pointer to IPV6 packet data buffer
1301  *
1302  * This func. returns the proto type of IPV6 packet.
1303  *
1304  * Return: proto type of IPV6 packet.
1305  */
1306 uint8_t
1307 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1308 {
1309 	uint8_t proto_type;
1310 
1311 	proto_type = (uint8_t)(*(uint8_t *)(data +
1312 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1313 	return proto_type;
1314 }
1315 
1316 /**
1317  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1318  * @data: Pointer to network data
1319  *
1320  * This api is for Tx packets.
1321  *
1322  * Return: true if packet is ipv4 packet
1323  *	   false otherwise
1324  */
1325 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1326 {
1327 	uint16_t ether_type;
1328 
1329 	ether_type = (uint16_t)(*(uint16_t *)(data +
1330 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1331 
1332 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1333 		return true;
1334 	else
1335 		return false;
1336 }
1337 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1338 
1339 /**
1340  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1341  * @data: Pointer to network data buffer
1342  *
1343  * This api is for ipv4 packet.
1344  *
1345  * Return: true if packet is DHCP packet
1346  *	   false otherwise
1347  */
1348 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1349 {
1350 	uint16_t sport;
1351 	uint16_t dport;
1352 
1353 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1354 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1355 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1356 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1357 					 sizeof(uint16_t)));
1358 
1359 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1360 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1361 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1362 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1363 		return true;
1364 	else
1365 		return false;
1366 }
1367 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1368 
1369 /**
1370  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1371  * @data: Pointer to network data buffer
1372  *
1373  * This api is for ipv4 packet.
1374  *
1375  * Return: true if packet is EAPOL packet
1376  *	   false otherwise.
1377  */
1378 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1379 {
1380 	uint16_t ether_type;
1381 
1382 	ether_type = (uint16_t)(*(uint16_t *)(data +
1383 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1384 
1385 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1386 		return true;
1387 	else
1388 		return false;
1389 }
1390 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1391 
1392 /**
1393  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1394  * @skb: Pointer to network buffer
1395  *
1396  * This api is for ipv4 packet.
1397  *
1398  * Return: true if packet is WAPI packet
1399  *	   false otherwise.
1400  */
1401 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1402 {
1403 	uint16_t ether_type;
1404 
1405 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1406 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1407 
1408 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1409 		return true;
1410 	else
1411 		return false;
1412 }
1413 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1414 
1415 /**
1416  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1417  * @skb: Pointer to network buffer
1418  *
1419  * This api is for ipv4 packet.
1420  *
1421  * Return: true if packet is tdls packet
1422  *	   false otherwise.
1423  */
1424 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1425 {
1426 	uint16_t ether_type;
1427 
1428 	ether_type = *(uint16_t *)(skb->data +
1429 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1430 
1431 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1432 		return true;
1433 	else
1434 		return false;
1435 }
1436 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1437 
1438 /**
1439  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1440  * @data: Pointer to network data buffer
1441  *
1442  * This api is for ipv4 packet.
1443  *
1444  * Return: true if packet is ARP packet
1445  *	   false otherwise.
1446  */
1447 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1448 {
1449 	uint16_t ether_type;
1450 
1451 	ether_type = (uint16_t)(*(uint16_t *)(data +
1452 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1453 
1454 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1455 		return true;
1456 	else
1457 		return false;
1458 }
1459 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1460 
1461 /**
1462  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1463  * @data: Pointer to network data buffer
1464  *
1465  * This api is for ipv4 packet.
1466  *
1467  * Return: true if packet is ARP request
1468  *	   false otherwise.
1469  */
1470 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1471 {
1472 	uint16_t op_code;
1473 
1474 	op_code = (uint16_t)(*(uint16_t *)(data +
1475 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1476 
1477 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1478 		return true;
1479 	return false;
1480 }
1481 
1482 /**
1483  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1484  * @data: Pointer to network data buffer
1485  *
1486  * This api is for ipv4 packet.
1487  *
1488  * Return: true if packet is ARP response
1489  *	   false otherwise.
1490  */
1491 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1492 {
1493 	uint16_t op_code;
1494 
1495 	op_code = (uint16_t)(*(uint16_t *)(data +
1496 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1497 
1498 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1499 		return true;
1500 	return false;
1501 }
1502 
1503 /**
1504  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1505  * @data: Pointer to network data buffer
1506  *
1507  * This api is for ipv4 packet.
1508  *
1509  * Return: ARP packet source IP value.
1510  */
1511 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1512 {
1513 	uint32_t src_ip;
1514 
1515 	src_ip = (uint32_t)(*(uint32_t *)(data +
1516 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1517 
1518 	return src_ip;
1519 }
1520 
1521 /**
1522  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1523  * @data: Pointer to network data buffer
1524  *
1525  * This api is for ipv4 packet.
1526  *
1527  * Return: ARP packet target IP value.
1528  */
1529 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1530 {
1531 	uint32_t tgt_ip;
1532 
1533 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1534 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1535 
1536 	return tgt_ip;
1537 }
1538 
1539 /**
1540  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1541  * @data: Pointer to network data buffer
1542  * @len: length to copy
1543  *
1544  * This api is for dns domain name
1545  *
1546  * Return: dns domain name.
1547  */
1548 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1549 {
1550 	uint8_t *domain_name;
1551 
1552 	domain_name = (uint8_t *)
1553 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1554 	return domain_name;
1555 }
1556 
1557 
1558 /**
1559  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1560  * @data: Pointer to network data buffer
1561  *
1562  * This api is for dns query packet.
1563  *
1564  * Return: true if packet is dns query packet.
1565  *	   false otherwise.
1566  */
1567 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1568 {
1569 	uint16_t op_code;
1570 	uint16_t tgt_port;
1571 
1572 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1573 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1574 	/* Standard DNS query always happen on Dest Port 53. */
1575 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1576 		op_code = (uint16_t)(*(uint16_t *)(data +
1577 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1578 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1579 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1580 			return true;
1581 	}
1582 	return false;
1583 }
1584 
1585 /**
1586  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1587  * @data: Pointer to network data buffer
1588  *
1589  * This api is for dns query response.
1590  *
1591  * Return: true if packet is dns response packet.
1592  *	   false otherwise.
1593  */
1594 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1595 {
1596 	uint16_t op_code;
1597 	uint16_t src_port;
1598 
1599 	src_port = (uint16_t)(*(uint16_t *)(data +
1600 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1601 	/* Standard DNS response always comes on Src Port 53. */
1602 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1603 		op_code = (uint16_t)(*(uint16_t *)(data +
1604 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1605 
1606 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1607 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1608 			return true;
1609 	}
1610 	return false;
1611 }
1612 
1613 /**
1614  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1615  * @data: Pointer to network data buffer
1616  *
1617  * This api is for tcp syn packet.
1618  *
1619  * Return: true if packet is tcp syn packet.
1620  *	   false otherwise.
1621  */
1622 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1623 {
1624 	uint8_t op_code;
1625 
1626 	op_code = (uint8_t)(*(uint8_t *)(data +
1627 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1628 
1629 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1630 		return true;
1631 	return false;
1632 }
1633 
1634 /**
1635  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1636  * @data: Pointer to network data buffer
1637  *
1638  * This api is for tcp syn ack packet.
1639  *
1640  * Return: true if packet is tcp syn ack packet.
1641  *	   false otherwise.
1642  */
1643 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1644 {
1645 	uint8_t op_code;
1646 
1647 	op_code = (uint8_t)(*(uint8_t *)(data +
1648 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1649 
1650 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1651 		return true;
1652 	return false;
1653 }
1654 
1655 /**
1656  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1657  * @data: Pointer to network data buffer
1658  *
1659  * This api is for tcp ack packet.
1660  *
1661  * Return: true if packet is tcp ack packet.
1662  *	   false otherwise.
1663  */
1664 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1665 {
1666 	uint8_t op_code;
1667 
1668 	op_code = (uint8_t)(*(uint8_t *)(data +
1669 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1670 
1671 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1672 		return true;
1673 	return false;
1674 }
1675 
1676 /**
1677  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1678  * @data: Pointer to network data buffer
1679  *
1680  * This api is for tcp packet.
1681  *
1682  * Return: tcp source port value.
1683  */
1684 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1685 {
1686 	uint16_t src_port;
1687 
1688 	src_port = (uint16_t)(*(uint16_t *)(data +
1689 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1690 
1691 	return src_port;
1692 }
1693 
1694 /**
1695  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1696  * @data: Pointer to network data buffer
1697  *
1698  * This api is for tcp packet.
1699  *
1700  * Return: tcp destination port value.
1701  */
1702 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1703 {
1704 	uint16_t tgt_port;
1705 
1706 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1707 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1708 
1709 	return tgt_port;
1710 }
1711 
1712 /**
1713  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1714  * @data: Pointer to network data buffer
1715  *
1716  * This api is for ipv4 req packet.
1717  *
1718  * Return: true if packet is icmpv4 request
1719  *	   false otherwise.
1720  */
1721 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1722 {
1723 	uint8_t op_code;
1724 
1725 	op_code = (uint8_t)(*(uint8_t *)(data +
1726 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1727 
1728 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1729 		return true;
1730 	return false;
1731 }
1732 
1733 /**
1734  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1735  * @data: Pointer to network data buffer
1736  *
1737  * This api is for ipv4 res packet.
1738  *
1739  * Return: true if packet is icmpv4 response
1740  *	   false otherwise.
1741  */
1742 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1743 {
1744 	uint8_t op_code;
1745 
1746 	op_code = (uint8_t)(*(uint8_t *)(data +
1747 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1748 
1749 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1750 		return true;
1751 	return false;
1752 }
1753 
1754 /**
1755  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1756  * @data: Pointer to network data buffer
1757  *
1758  * This api is for ipv4 packet.
1759  *
1760  * Return: icmpv4 packet source IP value.
1761  */
1762 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1763 {
1764 	uint32_t src_ip;
1765 
1766 	src_ip = (uint32_t)(*(uint32_t *)(data +
1767 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1768 
1769 	return src_ip;
1770 }
1771 
1772 /**
1773  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1774  * @data: Pointer to network data buffer
1775  *
1776  * This api is for ipv4 packet.
1777  *
1778  * Return: icmpv4 packet target IP value.
1779  */
1780 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1781 {
1782 	uint32_t tgt_ip;
1783 
1784 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1785 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1786 
1787 	return tgt_ip;
1788 }
1789 
1790 
1791 /**
1792  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1793  * @data: Pointer to IPV6 packet data buffer
1794  *
1795  * This func. checks whether it is a IPV6 packet or not.
1796  *
1797  * Return: TRUE if it is a IPV6 packet
1798  *         FALSE if not
1799  */
1800 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1801 {
1802 	uint16_t ether_type;
1803 
1804 	ether_type = (uint16_t)(*(uint16_t *)(data +
1805 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1806 
1807 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1808 		return true;
1809 	else
1810 		return false;
1811 }
1812 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1813 
1814 /**
1815  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1816  * @data: Pointer to network data buffer
1817  *
1818  * This api is for ipv6 packet.
1819  *
1820  * Return: true if packet is DHCP packet
1821  *	   false otherwise
1822  */
1823 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1824 {
1825 	uint16_t sport;
1826 	uint16_t dport;
1827 
1828 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1829 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1830 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1831 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1832 					sizeof(uint16_t));
1833 
1834 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1835 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1836 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1837 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1838 		return true;
1839 	else
1840 		return false;
1841 }
1842 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1843 
1844 /**
1845  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1846  * @data: Pointer to IPV4 packet data buffer
1847  *
1848  * This func. checks whether it is a IPV4 multicast packet or not.
1849  *
1850  * Return: TRUE if it is a IPV4 multicast packet
1851  *         FALSE if not
1852  */
1853 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1854 {
1855 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1856 		uint32_t *dst_addr =
1857 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1858 
1859 		/*
1860 		 * Check first word of the IPV4 address and if it is
1861 		 * equal to 0xE then it represents multicast IP.
1862 		 */
1863 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1864 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1865 			return true;
1866 		else
1867 			return false;
1868 	} else
1869 		return false;
1870 }
1871 
1872 /**
1873  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1874  * @data: Pointer to IPV6 packet data buffer
1875  *
1876  * This func. checks whether it is a IPV6 multicast packet or not.
1877  *
1878  * Return: TRUE if it is a IPV6 multicast packet
1879  *         FALSE if not
1880  */
1881 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1882 {
1883 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1884 		uint16_t *dst_addr;
1885 
1886 		dst_addr = (uint16_t *)
1887 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1888 
1889 		/*
1890 		 * Check first byte of the IP address and if it
1891 		 * 0xFF00 then it is a IPV6 mcast packet.
1892 		 */
1893 		if (*dst_addr ==
1894 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1895 			return true;
1896 		else
1897 			return false;
1898 	} else
1899 		return false;
1900 }
1901 
1902 /**
1903  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1904  * @data: Pointer to IPV4 ICMP packet data buffer
1905  *
1906  * This func. checks whether it is a ICMP packet or not.
1907  *
1908  * Return: TRUE if it is a ICMP packet
1909  *         FALSE if not
1910  */
1911 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1912 {
1913 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1914 		uint8_t pkt_type;
1915 
1916 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1917 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1918 
1919 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1920 			return true;
1921 		else
1922 			return false;
1923 	} else
1924 		return false;
1925 }
1926 
1927 /**
1928  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1929  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1930  *
1931  * This func. checks whether it is a ICMPV6 packet or not.
1932  *
1933  * Return: TRUE if it is a ICMPV6 packet
1934  *         FALSE if not
1935  */
1936 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1937 {
1938 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1939 		uint8_t pkt_type;
1940 
1941 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1942 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1943 
1944 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1945 			return true;
1946 		else
1947 			return false;
1948 	} else
1949 		return false;
1950 }
1951 
1952 /**
1953  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1954  * @data: Pointer to IPV4 UDP packet data buffer
1955  *
1956  * This func. checks whether it is a IPV4 UDP packet or not.
1957  *
1958  * Return: TRUE if it is a IPV4 UDP packet
1959  *         FALSE if not
1960  */
1961 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1962 {
1963 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1964 		uint8_t pkt_type;
1965 
1966 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1967 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1968 
1969 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1970 			return true;
1971 		else
1972 			return false;
1973 	} else
1974 		return false;
1975 }
1976 
1977 /**
1978  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1979  * @data: Pointer to IPV4 TCP packet data buffer
1980  *
1981  * This func. checks whether it is a IPV4 TCP packet or not.
1982  *
1983  * Return: TRUE if it is a IPV4 TCP packet
1984  *         FALSE if not
1985  */
1986 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1987 {
1988 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1989 		uint8_t pkt_type;
1990 
1991 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1992 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1993 
1994 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1995 			return true;
1996 		else
1997 			return false;
1998 	} else
1999 		return false;
2000 }
2001 
2002 /**
2003  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2004  * @data: Pointer to IPV6 UDP packet data buffer
2005  *
2006  * This func. checks whether it is a IPV6 UDP packet or not.
2007  *
2008  * Return: TRUE if it is a IPV6 UDP packet
2009  *         FALSE if not
2010  */
2011 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2012 {
2013 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2014 		uint8_t pkt_type;
2015 
2016 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2017 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2018 
2019 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2020 			return true;
2021 		else
2022 			return false;
2023 	} else
2024 		return false;
2025 }
2026 
2027 /**
2028  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2029  * @data: Pointer to IPV6 TCP packet data buffer
2030  *
2031  * This func. checks whether it is a IPV6 TCP packet or not.
2032  *
2033  * Return: TRUE if it is a IPV6 TCP packet
2034  *         FALSE if not
2035  */
2036 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2037 {
2038 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2039 		uint8_t pkt_type;
2040 
2041 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2042 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2043 
2044 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2045 			return true;
2046 		else
2047 			return false;
2048 	} else
2049 		return false;
2050 }
2051 
2052 /**
2053  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2054  * @nbuf - sk buff
2055  *
2056  * Return: true if packet is broadcast
2057  *	   false otherwise
2058  */
2059 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2060 {
2061 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2062 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2063 }
2064 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2065 
2066 #ifdef NBUF_MEMORY_DEBUG
2067 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2068 
2069 /**
2070  * struct qdf_nbuf_track_t - Network buffer track structure
2071  *
2072  * @p_next: Pointer to next
2073  * @net_buf: Pointer to network buffer
2074  * @file_name: File name
2075  * @line_num: Line number
2076  * @size: Size
2077  */
2078 struct qdf_nbuf_track_t {
2079 	struct qdf_nbuf_track_t *p_next;
2080 	qdf_nbuf_t net_buf;
2081 	uint8_t *file_name;
2082 	uint32_t line_num;
2083 	size_t size;
2084 };
2085 
2086 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2087 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2088 
2089 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2090 static struct kmem_cache *nbuf_tracking_cache;
2091 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2092 static spinlock_t qdf_net_buf_track_free_list_lock;
2093 static uint32_t qdf_net_buf_track_free_list_count;
2094 static uint32_t qdf_net_buf_track_used_list_count;
2095 static uint32_t qdf_net_buf_track_max_used;
2096 static uint32_t qdf_net_buf_track_max_free;
2097 static uint32_t qdf_net_buf_track_max_allocated;
2098 
2099 /**
2100  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2101  *
2102  * tracks the max number of network buffers that the wlan driver was tracking
2103  * at any one time.
2104  *
2105  * Return: none
2106  */
2107 static inline void update_max_used(void)
2108 {
2109 	int sum;
2110 
2111 	if (qdf_net_buf_track_max_used <
2112 	    qdf_net_buf_track_used_list_count)
2113 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2114 	sum = qdf_net_buf_track_free_list_count +
2115 		qdf_net_buf_track_used_list_count;
2116 	if (qdf_net_buf_track_max_allocated < sum)
2117 		qdf_net_buf_track_max_allocated = sum;
2118 }
2119 
2120 /**
2121  * update_max_free() - update qdf_net_buf_track_free_list_count
2122  *
2123  * tracks the max number tracking buffers kept in the freelist.
2124  *
2125  * Return: none
2126  */
2127 static inline void update_max_free(void)
2128 {
2129 	if (qdf_net_buf_track_max_free <
2130 	    qdf_net_buf_track_free_list_count)
2131 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2132 }
2133 
2134 /**
2135  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2136  *
2137  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2138  * This function also ads fexibility to adjust the allocation and freelist
2139  * scheems.
2140  *
2141  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2142  */
2143 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2144 {
2145 	int flags = GFP_KERNEL;
2146 	unsigned long irq_flag;
2147 	QDF_NBUF_TRACK *new_node = NULL;
2148 
2149 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2150 	qdf_net_buf_track_used_list_count++;
2151 	if (qdf_net_buf_track_free_list != NULL) {
2152 		new_node = qdf_net_buf_track_free_list;
2153 		qdf_net_buf_track_free_list =
2154 			qdf_net_buf_track_free_list->p_next;
2155 		qdf_net_buf_track_free_list_count--;
2156 	}
2157 	update_max_used();
2158 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2159 
2160 	if (new_node != NULL)
2161 		return new_node;
2162 
2163 	if (in_interrupt() || irqs_disabled() || in_atomic())
2164 		flags = GFP_ATOMIC;
2165 
2166 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2167 }
2168 
2169 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2170 #define FREEQ_POOLSIZE 2048
2171 
2172 /**
2173  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2174  *
2175  * Matches calls to qdf_nbuf_track_alloc.
2176  * Either frees the tracking cookie to kernel or an internal
2177  * freelist based on the size of the freelist.
2178  *
2179  * Return: none
2180  */
2181 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2182 {
2183 	unsigned long irq_flag;
2184 
2185 	if (!node)
2186 		return;
2187 
2188 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2189 	 * only shrink the freelist if it is bigger than twice the number of
2190 	 * nbufs in use. If the driver is stalling in a consistent bursty
2191 	 * fasion, this will keep 3/4 of thee allocations from the free list
2192 	 * while also allowing the system to recover memory as less frantic
2193 	 * traffic occurs.
2194 	 */
2195 
2196 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2197 
2198 	qdf_net_buf_track_used_list_count--;
2199 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2200 	   (qdf_net_buf_track_free_list_count >
2201 	    qdf_net_buf_track_used_list_count << 1)) {
2202 		kmem_cache_free(nbuf_tracking_cache, node);
2203 	} else {
2204 		node->p_next = qdf_net_buf_track_free_list;
2205 		qdf_net_buf_track_free_list = node;
2206 		qdf_net_buf_track_free_list_count++;
2207 	}
2208 	update_max_free();
2209 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2210 }
2211 
2212 /**
2213  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2214  *
2215  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2216  * the freelist first makes it performant for the first iperf udp burst
2217  * as well as steady state.
2218  *
2219  * Return: None
2220  */
2221 static void qdf_nbuf_track_prefill(void)
2222 {
2223 	int i;
2224 	QDF_NBUF_TRACK *node, *head;
2225 
2226 	/* prepopulate the freelist */
2227 	head = NULL;
2228 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2229 		node = qdf_nbuf_track_alloc();
2230 		if (node == NULL)
2231 			continue;
2232 		node->p_next = head;
2233 		head = node;
2234 	}
2235 	while (head) {
2236 		node = head->p_next;
2237 		qdf_nbuf_track_free(head);
2238 		head = node;
2239 	}
2240 
2241 	/* prefilled buffers should not count as used */
2242 	qdf_net_buf_track_max_used = 0;
2243 }
2244 
2245 /**
2246  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2247  *
2248  * This initializes the memory manager for the nbuf tracking cookies.  Because
2249  * these cookies are all the same size and only used in this feature, we can
2250  * use a kmem_cache to provide tracking as well as to speed up allocations.
2251  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2252  * features) a freelist is prepopulated here.
2253  *
2254  * Return: None
2255  */
2256 static void qdf_nbuf_track_memory_manager_create(void)
2257 {
2258 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2259 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2260 						sizeof(QDF_NBUF_TRACK),
2261 						0, 0, NULL);
2262 
2263 	qdf_nbuf_track_prefill();
2264 }
2265 
2266 /**
2267  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2268  *
2269  * Empty the freelist and print out usage statistics when it is no longer
2270  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2271  * any nbuf tracking cookies were leaked.
2272  *
2273  * Return: None
2274  */
2275 static void qdf_nbuf_track_memory_manager_destroy(void)
2276 {
2277 	QDF_NBUF_TRACK *node, *tmp;
2278 	unsigned long irq_flag;
2279 
2280 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2281 	node = qdf_net_buf_track_free_list;
2282 
2283 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2284 		qdf_print("%s: unexpectedly large max_used count %d",
2285 			  __func__, qdf_net_buf_track_max_used);
2286 
2287 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2288 		qdf_print("%s: %d unused trackers were allocated",
2289 			  __func__,
2290 			  qdf_net_buf_track_max_allocated -
2291 			  qdf_net_buf_track_max_used);
2292 
2293 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2294 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2295 		qdf_print("%s: check freelist shrinking functionality",
2296 			  __func__);
2297 
2298 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2299 		  "%s: %d residual freelist size\n",
2300 		  __func__, qdf_net_buf_track_free_list_count);
2301 
2302 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2303 		  "%s: %d max freelist size observed\n",
2304 		  __func__, qdf_net_buf_track_max_free);
2305 
2306 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2307 		  "%s: %d max buffers used observed\n",
2308 		  __func__, qdf_net_buf_track_max_used);
2309 
2310 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2311 		  "%s: %d max buffers allocated observed\n",
2312 		  __func__, qdf_net_buf_track_max_allocated);
2313 
2314 	while (node) {
2315 		tmp = node;
2316 		node = node->p_next;
2317 		kmem_cache_free(nbuf_tracking_cache, tmp);
2318 		qdf_net_buf_track_free_list_count--;
2319 	}
2320 
2321 	if (qdf_net_buf_track_free_list_count != 0)
2322 		qdf_print("%s: %d unfreed tracking memory lost in freelist\n",
2323 			  __func__, qdf_net_buf_track_free_list_count);
2324 
2325 	if (qdf_net_buf_track_used_list_count != 0)
2326 		qdf_print("%s: %d unfreed tracking memory still in use\n",
2327 			  __func__, qdf_net_buf_track_used_list_count);
2328 
2329 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2330 	kmem_cache_destroy(nbuf_tracking_cache);
2331 	qdf_net_buf_track_free_list = NULL;
2332 }
2333 
2334 /**
2335  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2336  *
2337  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2338  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2339  * WLAN driver module whose allocated SKB is freed by network stack are
2340  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2341  * reported as memory leak.
2342  *
2343  * Return: none
2344  */
2345 void qdf_net_buf_debug_init(void)
2346 {
2347 	uint32_t i;
2348 
2349 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2350 
2351 	qdf_nbuf_map_tracking_init();
2352 	qdf_nbuf_track_memory_manager_create();
2353 
2354 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2355 		gp_qdf_net_buf_track_tbl[i] = NULL;
2356 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2357 	}
2358 }
2359 qdf_export_symbol(qdf_net_buf_debug_init);
2360 
2361 /**
2362  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2363  *
2364  * Exit network buffer tracking debug functionality and log SKB memory leaks
2365  * As part of exiting the functionality, free the leaked memory and
2366  * cleanup the tracking buffers.
2367  *
2368  * Return: none
2369  */
2370 void qdf_net_buf_debug_exit(void)
2371 {
2372 	uint32_t i;
2373 	uint32_t count = 0;
2374 	unsigned long irq_flag;
2375 	QDF_NBUF_TRACK *p_node;
2376 	QDF_NBUF_TRACK *p_prev;
2377 
2378 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2379 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2380 		p_node = gp_qdf_net_buf_track_tbl[i];
2381 		while (p_node) {
2382 			p_prev = p_node;
2383 			p_node = p_node->p_next;
2384 			count++;
2385 			qdf_print("SKB buf memory Leak@ File %s, @Line %d, size %zu, nbuf %pK\n",
2386 				  p_prev->file_name, p_prev->line_num,
2387 				  p_prev->size, p_prev->net_buf);
2388 			qdf_nbuf_track_free(p_prev);
2389 		}
2390 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2391 	}
2392 
2393 	qdf_nbuf_track_memory_manager_destroy();
2394 	qdf_nbuf_map_tracking_deinit();
2395 
2396 #ifdef CONFIG_HALT_KMEMLEAK
2397 	if (count) {
2398 		qdf_print("%d SKBs leaked .. please fix the SKB leak", count);
2399 		QDF_BUG(0);
2400 	}
2401 #endif
2402 }
2403 qdf_export_symbol(qdf_net_buf_debug_exit);
2404 
2405 /**
2406  * qdf_net_buf_debug_hash() - hash network buffer pointer
2407  *
2408  * Return: hash value
2409  */
2410 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2411 {
2412 	uint32_t i;
2413 
2414 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2415 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2416 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2417 
2418 	return i;
2419 }
2420 
2421 /**
2422  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2423  *
2424  * Return: If skb is found in hash table then return pointer to network buffer
2425  *	else return %NULL
2426  */
2427 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2428 {
2429 	uint32_t i;
2430 	QDF_NBUF_TRACK *p_node;
2431 
2432 	i = qdf_net_buf_debug_hash(net_buf);
2433 	p_node = gp_qdf_net_buf_track_tbl[i];
2434 
2435 	while (p_node) {
2436 		if (p_node->net_buf == net_buf)
2437 			return p_node;
2438 		p_node = p_node->p_next;
2439 	}
2440 
2441 	return NULL;
2442 }
2443 
2444 /**
2445  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2446  *
2447  * Return: none
2448  */
2449 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2450 				uint8_t *file_name, uint32_t line_num)
2451 {
2452 	uint32_t i;
2453 	unsigned long irq_flag;
2454 	QDF_NBUF_TRACK *p_node;
2455 	QDF_NBUF_TRACK *new_node;
2456 
2457 	new_node = qdf_nbuf_track_alloc();
2458 
2459 	i = qdf_net_buf_debug_hash(net_buf);
2460 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2461 
2462 	p_node = qdf_net_buf_debug_look_up(net_buf);
2463 
2464 	if (p_node) {
2465 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2466 			  p_node->net_buf, p_node->file_name, p_node->line_num,
2467 			  net_buf, file_name, line_num);
2468 		qdf_nbuf_track_free(new_node);
2469 	} else {
2470 		p_node = new_node;
2471 		if (p_node) {
2472 			p_node->net_buf = net_buf;
2473 			p_node->file_name = file_name;
2474 			p_node->line_num = line_num;
2475 			p_node->size = size;
2476 			qdf_mem_skb_inc(size);
2477 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2478 			gp_qdf_net_buf_track_tbl[i] = p_node;
2479 		} else
2480 			qdf_print(
2481 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2482 				  file_name, line_num, size);
2483 	}
2484 
2485 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2486 }
2487 qdf_export_symbol(qdf_net_buf_debug_add_node);
2488 
2489 /**
2490  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2491  *
2492  * Return: none
2493  */
2494 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2495 {
2496 	uint32_t i;
2497 	QDF_NBUF_TRACK *p_head;
2498 	QDF_NBUF_TRACK *p_node = NULL;
2499 	unsigned long irq_flag;
2500 	QDF_NBUF_TRACK *p_prev;
2501 
2502 	i = qdf_net_buf_debug_hash(net_buf);
2503 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2504 
2505 	p_head = gp_qdf_net_buf_track_tbl[i];
2506 
2507 	/* Unallocated SKB */
2508 	if (!p_head)
2509 		goto done;
2510 
2511 	p_node = p_head;
2512 	/* Found at head of the table */
2513 	if (p_head->net_buf == net_buf) {
2514 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2515 		goto done;
2516 	}
2517 
2518 	/* Search in collision list */
2519 	while (p_node) {
2520 		p_prev = p_node;
2521 		p_node = p_node->p_next;
2522 		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
2523 			p_prev->p_next = p_node->p_next;
2524 			break;
2525 		}
2526 	}
2527 
2528 done:
2529 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2530 
2531 	if (p_node) {
2532 		qdf_mem_skb_dec(p_node->size);
2533 		qdf_nbuf_track_free(p_node);
2534 	} else {
2535 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2536 			  net_buf);
2537 		QDF_BUG(0);
2538 	}
2539 }
2540 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2541 
2542 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2543 			uint8_t *file_name, uint32_t line_num)
2544 {
2545 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2546 
2547 	while (ext_list) {
2548 		/*
2549 		 * Take care to add if it is Jumbo packet connected using
2550 		 * frag_list
2551 		 */
2552 		qdf_nbuf_t next;
2553 
2554 		next = qdf_nbuf_queue_next(ext_list);
2555 		qdf_net_buf_debug_add_node(ext_list, 0, file_name, line_num);
2556 		ext_list = next;
2557 	}
2558 	qdf_net_buf_debug_add_node(net_buf, 0, file_name, line_num);
2559 }
2560 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2561 
2562 /**
2563  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2564  * @net_buf: Network buf holding head segment (single)
2565  *
2566  * WLAN driver module whose allocated SKB is freed by network stack are
2567  * suppose to call this API before returning SKB to network stack such
2568  * that the SKB is not reported as memory leak.
2569  *
2570  * Return: none
2571  */
2572 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2573 {
2574 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2575 
2576 	while (ext_list) {
2577 		/*
2578 		 * Take care to free if it is Jumbo packet connected using
2579 		 * frag_list
2580 		 */
2581 		qdf_nbuf_t next;
2582 
2583 		next = qdf_nbuf_queue_next(ext_list);
2584 
2585 		if (qdf_nbuf_is_tso(ext_list) &&
2586 			qdf_nbuf_get_users(ext_list) > 1) {
2587 			ext_list = next;
2588 			continue;
2589 		}
2590 
2591 		qdf_net_buf_debug_delete_node(ext_list);
2592 		ext_list = next;
2593 	}
2594 
2595 	if (qdf_nbuf_is_tso(net_buf) && qdf_nbuf_get_users(net_buf) > 1)
2596 		return;
2597 
2598 	qdf_net_buf_debug_delete_node(net_buf);
2599 }
2600 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2601 
2602 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2603 				int reserve, int align, int prio,
2604 				uint8_t *file, uint32_t line)
2605 {
2606 	qdf_nbuf_t nbuf;
2607 
2608 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio);
2609 
2610 	/* Store SKB in internal QDF tracking table */
2611 	if (qdf_likely(nbuf)) {
2612 		qdf_net_buf_debug_add_node(nbuf, size, file, line);
2613 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_ALLOC);
2614 	}
2615 
2616 	return nbuf;
2617 }
2618 qdf_export_symbol(qdf_nbuf_alloc_debug);
2619 
2620 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, uint8_t *file, uint32_t line)
2621 {
2622 	if (qdf_nbuf_is_tso(nbuf) && qdf_nbuf_get_users(nbuf) > 1)
2623 		goto free_buf;
2624 
2625 	/* Remove SKB from internal QDF tracking table */
2626 	if (qdf_likely(nbuf)) {
2627 		qdf_net_buf_debug_delete_node(nbuf);
2628 		qdf_nbuf_history_add(nbuf, file, line, QDF_NBUF_FREE);
2629 	}
2630 
2631 free_buf:
2632 	__qdf_nbuf_free(nbuf);
2633 }
2634 qdf_export_symbol(qdf_nbuf_free_debug);
2635 
2636 #endif /* NBUF_MEMORY_DEBUG */
2637 
2638 #if defined(FEATURE_TSO)
2639 
2640 /**
2641  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2642  *
2643  * @ethproto: ethernet type of the msdu
2644  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2645  * @l2_len: L2 length for the msdu
2646  * @eit_hdr: pointer to EIT header
2647  * @eit_hdr_len: EIT header length for the msdu
2648  * @eit_hdr_dma_map_addr: dma addr for EIT header
2649  * @tcphdr: pointer to tcp header
2650  * @ipv4_csum_en: ipv4 checksum enable
2651  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2652  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2653  * @ip_id: IP id
2654  * @tcp_seq_num: TCP sequence number
2655  *
2656  * This structure holds the TSO common info that is common
2657  * across all the TCP segments of the jumbo packet.
2658  */
2659 struct qdf_tso_cmn_seg_info_t {
2660 	uint16_t ethproto;
2661 	uint16_t ip_tcp_hdr_len;
2662 	uint16_t l2_len;
2663 	uint8_t *eit_hdr;
2664 	uint32_t eit_hdr_len;
2665 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2666 	struct tcphdr *tcphdr;
2667 	uint16_t ipv4_csum_en;
2668 	uint16_t tcp_ipv4_csum_en;
2669 	uint16_t tcp_ipv6_csum_en;
2670 	uint16_t ip_id;
2671 	uint32_t tcp_seq_num;
2672 };
2673 
2674 /**
2675  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2676  * information
2677  * @osdev: qdf device handle
2678  * @skb: skb buffer
2679  * @tso_info: Parameters common to all segements
2680  *
2681  * Get the TSO information that is common across all the TCP
2682  * segments of the jumbo packet
2683  *
2684  * Return: 0 - success 1 - failure
2685  */
2686 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2687 			struct sk_buff *skb,
2688 			struct qdf_tso_cmn_seg_info_t *tso_info)
2689 {
2690 	/* Get ethernet type and ethernet header length */
2691 	tso_info->ethproto = vlan_get_protocol(skb);
2692 
2693 	/* Determine whether this is an IPv4 or IPv6 packet */
2694 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2695 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2696 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2697 
2698 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2699 		tso_info->ipv4_csum_en = 1;
2700 		tso_info->tcp_ipv4_csum_en = 1;
2701 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2702 			qdf_print("TSO IPV4 proto 0x%x not TCP\n",
2703 				 ipv4_hdr->protocol);
2704 			return 1;
2705 		}
2706 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2707 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2708 		tso_info->tcp_ipv6_csum_en = 1;
2709 	} else {
2710 		qdf_print("TSO: ethertype 0x%x is not supported!\n",
2711 			 tso_info->ethproto);
2712 		return 1;
2713 	}
2714 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2715 	tso_info->tcphdr = tcp_hdr(skb);
2716 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2717 	/* get pointer to the ethernet + IP + TCP header and their length */
2718 	tso_info->eit_hdr = skb->data;
2719 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2720 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2721 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2722 							tso_info->eit_hdr,
2723 							tso_info->eit_hdr_len,
2724 							DMA_TO_DEVICE);
2725 	if (unlikely(dma_mapping_error(osdev->dev,
2726 				       tso_info->eit_hdr_dma_map_addr))) {
2727 		qdf_print("DMA mapping error!\n");
2728 		qdf_assert(0);
2729 		return 1;
2730 	}
2731 
2732 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2733 		/* inlcude IPv4 header length for IPV4 (total length) */
2734 		tso_info->ip_tcp_hdr_len =
2735 			tso_info->eit_hdr_len - tso_info->l2_len;
2736 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2737 		/* exclude IPv6 header length for IPv6 (payload length) */
2738 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2739 	}
2740 	/*
2741 	 * The length of the payload (application layer data) is added to
2742 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2743 	 * descriptor.
2744 	 */
2745 
2746 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2747 		tso_info->tcp_seq_num,
2748 		tso_info->eit_hdr_len,
2749 		tso_info->l2_len,
2750 		skb->len);
2751 	return 0;
2752 }
2753 
2754 
2755 /**
2756  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
2757  *
2758  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
2759  *
2760  * Return: N/A
2761  */
2762 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
2763 				      uint32_t *lo, uint32_t *hi)
2764 {
2765 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
2766 		*lo = lower_32_bits(dmaaddr);
2767 		*hi = upper_32_bits(dmaaddr);
2768 	} else {
2769 		*lo = dmaaddr;
2770 		*hi = 0;
2771 	}
2772 }
2773 qdf_export_symbol(__qdf_dmaaddr_to_32s);
2774 
2775 /**
2776  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2777  *
2778  * @curr_seg: Segment whose contents are initialized
2779  * @tso_cmn_info: Parameters common to all segements
2780  *
2781  * Return: None
2782  */
2783 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2784 				struct qdf_tso_seg_elem_t *curr_seg,
2785 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2786 {
2787 	/* Initialize the flags to 0 */
2788 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2789 
2790 	/*
2791 	 * The following fields remain the same across all segments of
2792 	 * a jumbo packet
2793 	 */
2794 	curr_seg->seg.tso_flags.tso_enable = 1;
2795 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2796 		tso_cmn_info->ipv4_csum_en;
2797 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2798 		tso_cmn_info->tcp_ipv6_csum_en;
2799 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2800 		tso_cmn_info->tcp_ipv4_csum_en;
2801 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2802 
2803 	/* The following fields change for the segments */
2804 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2805 	tso_cmn_info->ip_id++;
2806 
2807 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2808 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2809 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2810 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2811 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2812 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2813 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2814 
2815 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2816 
2817 	/*
2818 	 * First fragment for each segment always contains the ethernet,
2819 	 * IP and TCP header
2820 	 */
2821 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2822 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2823 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2824 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2825 
2826 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2827 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2828 		   tso_cmn_info->eit_hdr_len,
2829 		   curr_seg->seg.tso_flags.tcp_seq_num,
2830 		   curr_seg->seg.total_len);
2831 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2832 }
2833 
2834 /**
2835  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2836  * into segments
2837  * @nbuf: network buffer to be segmented
2838  * @tso_info: This is the output. The information about the
2839  *           TSO segments will be populated within this.
2840  *
2841  * This function fragments a TCP jumbo packet into smaller
2842  * segments to be transmitted by the driver. It chains the TSO
2843  * segments created into a list.
2844  *
2845  * Return: number of TSO segments
2846  */
2847 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2848 		struct qdf_tso_info_t *tso_info)
2849 {
2850 	/* common across all segments */
2851 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2852 	/* segment specific */
2853 	void *tso_frag_vaddr;
2854 	qdf_dma_addr_t tso_frag_paddr = 0;
2855 	uint32_t num_seg = 0;
2856 	struct qdf_tso_seg_elem_t *curr_seg;
2857 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2858 	struct skb_frag_struct *frag = NULL;
2859 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2860 	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
2861 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2862 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2863 	int j = 0; /* skb fragment index */
2864 
2865 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2866 
2867 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2868 						skb, &tso_cmn_info))) {
2869 		qdf_print("TSO: error getting common segment info\n");
2870 		return 0;
2871 	}
2872 
2873 	total_num_seg = tso_info->tso_num_seg_list;
2874 	curr_seg = tso_info->tso_seg_list;
2875 
2876 	/* length of the first chunk of data in the skb */
2877 	skb_frag_len = skb_headlen(skb);
2878 
2879 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2880 	/* update the remaining skb fragment length and TSO segment length */
2881 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2882 	skb_proc -= tso_cmn_info.eit_hdr_len;
2883 
2884 	/* get the address to the next tso fragment */
2885 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2886 	/* get the length of the next tso fragment */
2887 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2888 
2889 	if (tso_frag_len != 0) {
2890 		tso_frag_paddr = dma_map_single(osdev->dev,
2891 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2892 	}
2893 
2894 	if (unlikely(dma_mapping_error(osdev->dev,
2895 					tso_frag_paddr))) {
2896 		qdf_print("%s:%d DMA mapping error!\n", __func__, __LINE__);
2897 		qdf_assert(0);
2898 		return 0;
2899 	}
2900 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2901 		__LINE__, skb_frag_len, tso_frag_len);
2902 	num_seg = tso_info->num_segs;
2903 	tso_info->num_segs = 0;
2904 	tso_info->is_tso = 1;
2905 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2906 
2907 	while (num_seg && curr_seg) {
2908 		int i = 1; /* tso fragment index */
2909 		uint8_t more_tso_frags = 1;
2910 
2911 		curr_seg->seg.num_frags = 0;
2912 		tso_info->num_segs++;
2913 		total_num_seg->num_seg.tso_cmn_num_seg++;
2914 
2915 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
2916 						 &tso_cmn_info);
2917 
2918 		if (unlikely(skb_proc == 0))
2919 			return tso_info->num_segs;
2920 
2921 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
2922 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
2923 		/* frag len is added to ip_len in while loop below*/
2924 
2925 		curr_seg->seg.num_frags++;
2926 
2927 		while (more_tso_frags) {
2928 			if (tso_frag_len != 0) {
2929 				curr_seg->seg.tso_frags[i].vaddr =
2930 					tso_frag_vaddr;
2931 				curr_seg->seg.tso_frags[i].length =
2932 					tso_frag_len;
2933 				curr_seg->seg.total_len += tso_frag_len;
2934 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
2935 				curr_seg->seg.num_frags++;
2936 				skb_proc = skb_proc - tso_frag_len;
2937 
2938 				/* increment the TCP sequence number */
2939 
2940 				tso_cmn_info.tcp_seq_num += tso_frag_len;
2941 				curr_seg->seg.tso_frags[i].paddr =
2942 					tso_frag_paddr;
2943 			}
2944 
2945 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
2946 					__func__, __LINE__,
2947 					i,
2948 					tso_frag_len,
2949 					curr_seg->seg.total_len,
2950 					curr_seg->seg.tso_frags[i].vaddr);
2951 
2952 			/* if there is no more data left in the skb */
2953 			if (!skb_proc)
2954 				return tso_info->num_segs;
2955 
2956 			/* get the next payload fragment information */
2957 			/* check if there are more fragments in this segment */
2958 			if (tso_frag_len < tso_seg_size) {
2959 				more_tso_frags = 1;
2960 				if (tso_frag_len != 0) {
2961 					tso_seg_size = tso_seg_size -
2962 						tso_frag_len;
2963 					i++;
2964 					if (curr_seg->seg.num_frags ==
2965 								FRAG_NUM_MAX) {
2966 						more_tso_frags = 0;
2967 						/*
2968 						 * reset i and the tso
2969 						 * payload size
2970 						 */
2971 						i = 1;
2972 						tso_seg_size =
2973 							skb_shinfo(skb)->
2974 								gso_size;
2975 					}
2976 				}
2977 			} else {
2978 				more_tso_frags = 0;
2979 				/* reset i and the tso payload size */
2980 				i = 1;
2981 				tso_seg_size = skb_shinfo(skb)->gso_size;
2982 			}
2983 
2984 			/* if the next fragment is contiguous */
2985 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
2986 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
2987 				skb_frag_len = skb_frag_len - tso_frag_len;
2988 				tso_frag_len = min(skb_frag_len, tso_seg_size);
2989 
2990 			} else { /* the next fragment is not contiguous */
2991 				if (skb_shinfo(skb)->nr_frags == 0) {
2992 					qdf_print("TSO: nr_frags == 0!\n");
2993 					qdf_assert(0);
2994 					return 0;
2995 				}
2996 				if (j >= skb_shinfo(skb)->nr_frags) {
2997 					qdf_print("TSO: nr_frags %d j %d\n",
2998 						  skb_shinfo(skb)->nr_frags, j);
2999 					qdf_assert(0);
3000 					return 0;
3001 				}
3002 				frag = &skb_shinfo(skb)->frags[j];
3003 				skb_frag_len = skb_frag_size(frag);
3004 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3005 				tso_frag_vaddr = skb_frag_address_safe(frag);
3006 				j++;
3007 			}
3008 
3009 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3010 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3011 				tso_seg_size);
3012 
3013 			if (!(tso_frag_vaddr)) {
3014 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3015 						__func__);
3016 				return 0;
3017 			}
3018 
3019 			tso_frag_paddr =
3020 					 dma_map_single(osdev->dev,
3021 						 tso_frag_vaddr,
3022 						 tso_frag_len,
3023 						 DMA_TO_DEVICE);
3024 			if (unlikely(dma_mapping_error(osdev->dev,
3025 							tso_frag_paddr))) {
3026 				qdf_print("%s:%d DMA mapping error!\n",
3027 						__func__, __LINE__);
3028 				qdf_assert(0);
3029 				return 0;
3030 			}
3031 		}
3032 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3033 				curr_seg->seg.tso_flags.tcp_seq_num);
3034 		num_seg--;
3035 		/* if TCP FIN flag was set, set it in the last segment */
3036 		if (!num_seg)
3037 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3038 
3039 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3040 		curr_seg = curr_seg->next;
3041 	}
3042 	return tso_info->num_segs;
3043 }
3044 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3045 
3046 /**
3047  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3048  *
3049  * @osdev: qdf device handle
3050  * @tso_seg: TSO segment element to be unmapped
3051  * @is_last_seg: whether this is last tso seg or not
3052  *
3053  * Return: none
3054  */
3055 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3056 			  struct qdf_tso_seg_elem_t *tso_seg,
3057 			  bool is_last_seg)
3058 {
3059 	uint32_t num_frags = 0;
3060 
3061 	if (tso_seg->seg.num_frags > 0)
3062 		num_frags = tso_seg->seg.num_frags - 1;
3063 
3064 	/*Num of frags in a tso seg cannot be less than 2 */
3065 	if (num_frags < 1) {
3066 		qdf_assert(0);
3067 		qdf_print("ERROR: num of frags in a tso segment is %d\n",
3068 				  (num_frags + 1));
3069 		return;
3070 	}
3071 
3072 	while (num_frags) {
3073 		/*Do dma unmap the tso seg except the 0th frag */
3074 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3075 			qdf_print("ERROR: TSO seg frag %d mapped physical address is NULL\n",
3076 				  num_frags);
3077 			qdf_assert(0);
3078 			return;
3079 		}
3080 		dma_unmap_single(osdev->dev,
3081 				 tso_seg->seg.tso_frags[num_frags].paddr,
3082 				 tso_seg->seg.tso_frags[num_frags].length,
3083 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3084 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3085 		num_frags--;
3086 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3087 	}
3088 
3089 	if (is_last_seg) {
3090 		/*Do dma unmap for the tso seg 0th frag */
3091 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3092 			qdf_print("ERROR: TSO seg frag 0 mapped physical address is NULL\n");
3093 			qdf_assert(0);
3094 			return;
3095 		}
3096 		dma_unmap_single(osdev->dev,
3097 				 tso_seg->seg.tso_frags[0].paddr,
3098 				 tso_seg->seg.tso_frags[0].length,
3099 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3100 		tso_seg->seg.tso_frags[0].paddr = 0;
3101 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3102 	}
3103 }
3104 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3105 
3106 /**
3107  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3108  * into segments
3109  * @nbuf:   network buffer to be segmented
3110  * @tso_info:  This is the output. The information about the
3111  *      TSO segments will be populated within this.
3112  *
3113  * This function fragments a TCP jumbo packet into smaller
3114  * segments to be transmitted by the driver. It chains the TSO
3115  * segments created into a list.
3116  *
3117  * Return: 0 - success, 1 - failure
3118  */
3119 #ifndef BUILD_X86
3120 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3121 {
3122 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3123 	uint32_t remainder, num_segs = 0;
3124 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3125 	uint8_t frags_per_tso = 0;
3126 	uint32_t skb_frag_len = 0;
3127 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3128 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3129 	struct skb_frag_struct *frag = NULL;
3130 	int j = 0;
3131 	uint32_t temp_num_seg = 0;
3132 
3133 	/* length of the first chunk of data in the skb minus eit header*/
3134 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3135 
3136 	/* Calculate num of segs for skb's first chunk of data*/
3137 	remainder = skb_frag_len % tso_seg_size;
3138 	num_segs = skb_frag_len / tso_seg_size;
3139 	/**
3140 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3141 	 * In that case, one more tso seg is required to accommodate
3142 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3143 	 * then remaining data will be accomodated while doing the calculation
3144 	 * for nr_frags data. Hence, frags_per_tso++.
3145 	 */
3146 	if (remainder) {
3147 		if (!skb_nr_frags)
3148 			num_segs++;
3149 		else
3150 			frags_per_tso++;
3151 	}
3152 
3153 	while (skb_nr_frags) {
3154 		if (j >= skb_shinfo(skb)->nr_frags) {
3155 			qdf_print("TSO: nr_frags %d j %d\n",
3156 			skb_shinfo(skb)->nr_frags, j);
3157 			qdf_assert(0);
3158 			return 0;
3159 		}
3160 		/**
3161 		 * Calculate the number of tso seg for nr_frags data:
3162 		 * Get the length of each frag in skb_frag_len, add to
3163 		 * remainder.Get the number of segments by dividing it to
3164 		 * tso_seg_size and calculate the new remainder.
3165 		 * Decrement the nr_frags value and keep
3166 		 * looping all the skb_fragments.
3167 		 */
3168 		frag = &skb_shinfo(skb)->frags[j];
3169 		skb_frag_len = skb_frag_size(frag);
3170 		temp_num_seg = num_segs;
3171 		remainder += skb_frag_len;
3172 		num_segs += remainder / tso_seg_size;
3173 		remainder = remainder % tso_seg_size;
3174 		skb_nr_frags--;
3175 		if (remainder) {
3176 			if (num_segs > temp_num_seg)
3177 				frags_per_tso = 0;
3178 			/**
3179 			 * increment the tso per frags whenever remainder is
3180 			 * positive. If frags_per_tso reaches the (max-1),
3181 			 * [First frags always have EIT header, therefore max-1]
3182 			 * increment the num_segs as no more data can be
3183 			 * accomodated in the curr tso seg. Reset the remainder
3184 			 * and frags per tso and keep looping.
3185 			 */
3186 			frags_per_tso++;
3187 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3188 				num_segs++;
3189 				frags_per_tso = 0;
3190 				remainder = 0;
3191 			}
3192 			/**
3193 			 * If this is the last skb frag and still remainder is
3194 			 * non-zero(frags_per_tso is not reached to the max-1)
3195 			 * then increment the num_segs to take care of the
3196 			 * remaining length.
3197 			 */
3198 			if (!skb_nr_frags && remainder) {
3199 				num_segs++;
3200 				frags_per_tso = 0;
3201 			}
3202 		} else {
3203 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3204 			frags_per_tso = 0;
3205 		}
3206 		j++;
3207 	}
3208 
3209 	return num_segs;
3210 }
3211 #else
3212 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3213 {
3214 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3215 	struct skb_frag_struct *frag = NULL;
3216 
3217 	/*
3218 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3219 	 * region which cannot be accessed by Target
3220 	 */
3221 	if (virt_to_phys(skb->data) < 0x50000040) {
3222 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3223 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3224 				virt_to_phys(skb->data));
3225 		goto fail;
3226 
3227 	}
3228 
3229 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3230 		frag = &skb_shinfo(skb)->frags[i];
3231 
3232 		if (!frag)
3233 			goto fail;
3234 
3235 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3236 			goto fail;
3237 	}
3238 
3239 
3240 	gso_size = skb_shinfo(skb)->gso_size;
3241 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3242 			+ tcp_hdrlen(skb));
3243 	while (tmp_len) {
3244 		num_segs++;
3245 		if (tmp_len > gso_size)
3246 			tmp_len -= gso_size;
3247 		else
3248 			break;
3249 	}
3250 
3251 	return num_segs;
3252 
3253 	/*
3254 	 * Do not free this frame, just do socket level accounting
3255 	 * so that this is not reused.
3256 	 */
3257 fail:
3258 	if (skb->sk)
3259 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3260 
3261 	return 0;
3262 }
3263 #endif
3264 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3265 
3266 #endif /* FEATURE_TSO */
3267 
3268 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3269 {
3270 	qdf_nbuf_users_inc(&skb->users);
3271 	return skb;
3272 }
3273 qdf_export_symbol(__qdf_nbuf_inc_users);
3274 
3275 int __qdf_nbuf_get_users(struct sk_buff *skb)
3276 {
3277 	return qdf_nbuf_users_read(&skb->users);
3278 }
3279 qdf_export_symbol(__qdf_nbuf_get_users);
3280 
3281 /**
3282  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3283  * @skb: sk_buff handle
3284  *
3285  * Return: none
3286  */
3287 
3288 void __qdf_nbuf_ref(struct sk_buff *skb)
3289 {
3290 	skb_get(skb);
3291 }
3292 qdf_export_symbol(__qdf_nbuf_ref);
3293 
3294 /**
3295  * __qdf_nbuf_shared() - Check whether the buffer is shared
3296  *  @skb: sk_buff buffer
3297  *
3298  *  Return: true if more than one person has a reference to this buffer.
3299  */
3300 int __qdf_nbuf_shared(struct sk_buff *skb)
3301 {
3302 	return skb_shared(skb);
3303 }
3304 qdf_export_symbol(__qdf_nbuf_shared);
3305 
3306 /**
3307  * __qdf_nbuf_dmamap_create() - create a DMA map.
3308  * @osdev: qdf device handle
3309  * @dmap: dma map handle
3310  *
3311  * This can later be used to map networking buffers. They :
3312  * - need space in adf_drv's software descriptor
3313  * - are typically created during adf_drv_create
3314  * - need to be created before any API(qdf_nbuf_map) that uses them
3315  *
3316  * Return: QDF STATUS
3317  */
3318 QDF_STATUS
3319 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3320 {
3321 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3322 	/*
3323 	 * driver can tell its SG capablity, it must be handled.
3324 	 * Bounce buffers if they are there
3325 	 */
3326 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3327 	if (!(*dmap))
3328 		error = QDF_STATUS_E_NOMEM;
3329 
3330 	return error;
3331 }
3332 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3333 /**
3334  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3335  * @osdev: qdf device handle
3336  * @dmap: dma map handle
3337  *
3338  * Return: none
3339  */
3340 void
3341 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3342 {
3343 	kfree(dmap);
3344 }
3345 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3346 
3347 /**
3348  * __qdf_nbuf_map_nbytes_single() - map nbytes
3349  * @osdev: os device
3350  * @buf: buffer
3351  * @dir: direction
3352  * @nbytes: number of bytes
3353  *
3354  * Return: QDF_STATUS
3355  */
3356 #ifdef A_SIMOS_DEVHOST
3357 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3358 		qdf_device_t osdev, struct sk_buff *buf,
3359 		 qdf_dma_dir_t dir, int nbytes)
3360 {
3361 	qdf_dma_addr_t paddr;
3362 
3363 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3364 	return QDF_STATUS_SUCCESS;
3365 }
3366 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3367 #else
3368 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3369 		qdf_device_t osdev, struct sk_buff *buf,
3370 		 qdf_dma_dir_t dir, int nbytes)
3371 {
3372 	qdf_dma_addr_t paddr;
3373 
3374 	/* assume that the OS only provides a single fragment */
3375 	QDF_NBUF_CB_PADDR(buf) = paddr =
3376 		dma_map_single(osdev->dev, buf->data,
3377 			nbytes, __qdf_dma_dir_to_os(dir));
3378 	return dma_mapping_error(osdev->dev, paddr) ?
3379 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3380 }
3381 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3382 #endif
3383 /**
3384  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3385  * @osdev: os device
3386  * @buf: buffer
3387  * @dir: direction
3388  * @nbytes: number of bytes
3389  *
3390  * Return: none
3391  */
3392 #if defined(A_SIMOS_DEVHOST)
3393 void
3394 __qdf_nbuf_unmap_nbytes_single(
3395 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3396 {
3397 }
3398 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3399 
3400 #else
3401 void
3402 __qdf_nbuf_unmap_nbytes_single(
3403 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3404 {
3405 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3406 		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
3407 		return;
3408 	}
3409 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3410 			nbytes, __qdf_dma_dir_to_os(dir));
3411 }
3412 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3413 #endif
3414 /**
3415  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3416  * @osdev: os device
3417  * @skb: skb handle
3418  * @dir: dma direction
3419  * @nbytes: number of bytes to be mapped
3420  *
3421  * Return: QDF_STATUS
3422  */
3423 #ifdef QDF_OS_DEBUG
3424 QDF_STATUS
3425 __qdf_nbuf_map_nbytes(
3426 	qdf_device_t osdev,
3427 	struct sk_buff *skb,
3428 	qdf_dma_dir_t dir,
3429 	int nbytes)
3430 {
3431 	struct skb_shared_info  *sh = skb_shinfo(skb);
3432 
3433 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3434 
3435 	/*
3436 	 * Assume there's only a single fragment.
3437 	 * To support multiple fragments, it would be necessary to change
3438 	 * adf_nbuf_t to be a separate object that stores meta-info
3439 	 * (including the bus address for each fragment) and a pointer
3440 	 * to the underlying sk_buff.
3441 	 */
3442 	qdf_assert(sh->nr_frags == 0);
3443 
3444 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3445 }
3446 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3447 #else
3448 QDF_STATUS
3449 __qdf_nbuf_map_nbytes(
3450 	qdf_device_t osdev,
3451 	struct sk_buff *skb,
3452 	qdf_dma_dir_t dir,
3453 	int nbytes)
3454 {
3455 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3456 }
3457 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3458 #endif
3459 /**
3460  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3461  * @osdev: OS device
3462  * @skb: skb handle
3463  * @dir: direction
3464  * @nbytes: number of bytes
3465  *
3466  * Return: none
3467  */
3468 void
3469 __qdf_nbuf_unmap_nbytes(
3470 	qdf_device_t osdev,
3471 	struct sk_buff *skb,
3472 	qdf_dma_dir_t dir,
3473 	int nbytes)
3474 {
3475 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3476 
3477 	/*
3478 	 * Assume there's a single fragment.
3479 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3480 	 */
3481 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3482 }
3483 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3484 
3485 /**
3486  * __qdf_nbuf_dma_map_info() - return the dma map info
3487  * @bmap: dma map
3488  * @sg: dma map info
3489  *
3490  * Return: none
3491  */
3492 void
3493 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3494 {
3495 	qdf_assert(bmap->mapped);
3496 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3497 
3498 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3499 			sizeof(struct __qdf_segment));
3500 	sg->nsegs = bmap->nsegs;
3501 }
3502 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3503 /**
3504  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3505  *			specified by the index
3506  * @skb: sk buff
3507  * @sg: scatter/gather list of all the frags
3508  *
3509  * Return: none
3510  */
3511 #if defined(__QDF_SUPPORT_FRAG_MEM)
3512 void
3513 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3514 {
3515 	qdf_assert(skb != NULL);
3516 	sg->sg_segs[0].vaddr = skb->data;
3517 	sg->sg_segs[0].len   = skb->len;
3518 	sg->nsegs            = 1;
3519 
3520 	for (int i = 1; i <= sh->nr_frags; i++) {
3521 		skb_frag_t    *f        = &sh->frags[i - 1];
3522 
3523 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3524 			f->page_offset);
3525 		sg->sg_segs[i].len      = f->size;
3526 
3527 		qdf_assert(i < QDF_MAX_SGLIST);
3528 	}
3529 	sg->nsegs += i;
3530 
3531 }
3532 qdf_export_symbol(__qdf_nbuf_frag_info);
3533 #else
3534 #ifdef QDF_OS_DEBUG
3535 void
3536 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3537 {
3538 
3539 	struct skb_shared_info  *sh = skb_shinfo(skb);
3540 
3541 	qdf_assert(skb != NULL);
3542 	sg->sg_segs[0].vaddr = skb->data;
3543 	sg->sg_segs[0].len   = skb->len;
3544 	sg->nsegs            = 1;
3545 
3546 	qdf_assert(sh->nr_frags == 0);
3547 }
3548 qdf_export_symbol(__qdf_nbuf_frag_info);
3549 #else
3550 void
3551 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3552 {
3553 	sg->sg_segs[0].vaddr = skb->data;
3554 	sg->sg_segs[0].len   = skb->len;
3555 	sg->nsegs            = 1;
3556 }
3557 qdf_export_symbol(__qdf_nbuf_frag_info);
3558 #endif
3559 #endif
3560 /**
3561  * __qdf_nbuf_get_frag_size() - get frag size
3562  * @nbuf: sk buffer
3563  * @cur_frag: current frag
3564  *
3565  * Return: frag size
3566  */
3567 uint32_t
3568 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3569 {
3570 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3571 	const skb_frag_t *frag = sh->frags + cur_frag;
3572 
3573 	return skb_frag_size(frag);
3574 }
3575 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3576 
3577 /**
3578  * __qdf_nbuf_frag_map() - dma map frag
3579  * @osdev: os device
3580  * @nbuf: sk buff
3581  * @offset: offset
3582  * @dir: direction
3583  * @cur_frag: current fragment
3584  *
3585  * Return: QDF status
3586  */
3587 #ifdef A_SIMOS_DEVHOST
3588 QDF_STATUS __qdf_nbuf_frag_map(
3589 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3590 	int offset, qdf_dma_dir_t dir, int cur_frag)
3591 {
3592 	int32_t paddr, frag_len;
3593 
3594 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3595 	return QDF_STATUS_SUCCESS;
3596 }
3597 qdf_export_symbol(__qdf_nbuf_frag_map);
3598 #else
3599 QDF_STATUS __qdf_nbuf_frag_map(
3600 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3601 	int offset, qdf_dma_dir_t dir, int cur_frag)
3602 {
3603 	dma_addr_t paddr, frag_len;
3604 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3605 	const skb_frag_t *frag = sh->frags + cur_frag;
3606 
3607 	frag_len = skb_frag_size(frag);
3608 
3609 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3610 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3611 					__qdf_dma_dir_to_os(dir));
3612 	return dma_mapping_error(osdev->dev, paddr) ?
3613 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3614 }
3615 qdf_export_symbol(__qdf_nbuf_frag_map);
3616 #endif
3617 /**
3618  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3619  * @dmap: dma map
3620  * @cb: callback
3621  * @arg: argument
3622  *
3623  * Return: none
3624  */
3625 void
3626 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3627 {
3628 	return;
3629 }
3630 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3631 
3632 
3633 /**
3634  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3635  * @osdev: os device
3636  * @buf: sk buff
3637  * @dir: direction
3638  *
3639  * Return: none
3640  */
3641 #if defined(A_SIMOS_DEVHOST)
3642 static void __qdf_nbuf_sync_single_for_cpu(
3643 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3644 {
3645 	return;
3646 }
3647 #else
3648 static void __qdf_nbuf_sync_single_for_cpu(
3649 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3650 {
3651 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3652 		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
3653 		return;
3654 	}
3655 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3656 		skb_end_offset(buf) - skb_headroom(buf),
3657 		__qdf_dma_dir_to_os(dir));
3658 }
3659 #endif
3660 /**
3661  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3662  * @osdev: os device
3663  * @skb: sk buff
3664  * @dir: direction
3665  *
3666  * Return: none
3667  */
3668 void
3669 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3670 	struct sk_buff *skb, qdf_dma_dir_t dir)
3671 {
3672 	qdf_assert(
3673 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3674 
3675 	/*
3676 	 * Assume there's a single fragment.
3677 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3678 	 */
3679 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3680 }
3681 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3682 
3683 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3684 /**
3685  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3686  * @rx_status: Pointer to rx_status.
3687  * @rtap_buf: Buf to which VHT info has to be updated.
3688  * @rtap_len: Current length of radiotap buffer
3689  *
3690  * Return: Length of radiotap after VHT flags updated.
3691  */
3692 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3693 					struct mon_rx_status *rx_status,
3694 					int8_t *rtap_buf,
3695 					uint32_t rtap_len)
3696 {
3697 	uint16_t vht_flags = 0;
3698 
3699 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3700 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3701 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3702 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3703 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3704 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3705 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3706 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3707 	rtap_len += 2;
3708 
3709 	rtap_buf[rtap_len] |=
3710 		(rx_status->is_stbc ?
3711 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3712 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3713 		(rx_status->ldpc ?
3714 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3715 		(rx_status->beamformed ?
3716 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3717 	rtap_len += 1;
3718 	switch (rx_status->vht_flag_values2) {
3719 	case IEEE80211_RADIOTAP_VHT_BW_20:
3720 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3721 		break;
3722 	case IEEE80211_RADIOTAP_VHT_BW_40:
3723 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3724 		break;
3725 	case IEEE80211_RADIOTAP_VHT_BW_80:
3726 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3727 		break;
3728 	case IEEE80211_RADIOTAP_VHT_BW_160:
3729 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3730 		break;
3731 	}
3732 	rtap_len += 1;
3733 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3734 	rtap_len += 1;
3735 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3736 	rtap_len += 1;
3737 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3738 	rtap_len += 1;
3739 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3740 	rtap_len += 1;
3741 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3742 	rtap_len += 1;
3743 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3744 	rtap_len += 1;
3745 	put_unaligned_le16(rx_status->vht_flag_values6,
3746 			   &rtap_buf[rtap_len]);
3747 	rtap_len += 2;
3748 
3749 	return rtap_len;
3750 }
3751 
3752 /**
3753  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3754  * @rx_status: Pointer to rx_status.
3755  * @rtap_buf: buffer to which radiotap has to be updated
3756  * @rtap_len: radiotap length
3757  *
3758  * API update high-efficiency (11ax) fields in the radiotap header
3759  *
3760  * Return: length of rtap_len updated.
3761  */
3762 static unsigned int
3763 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3764 				     int8_t *rtap_buf, uint32_t rtap_len)
3765 {
3766 	/*
3767 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3768 	 * Enable all "known" HE radiotap flags for now
3769 	 */
3770 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3771 	rtap_len += 2;
3772 
3773 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3774 	rtap_len += 2;
3775 
3776 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3777 	rtap_len += 2;
3778 
3779 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3780 	rtap_len += 2;
3781 
3782 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3783 	rtap_len += 2;
3784 
3785 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3786 	rtap_len += 2;
3787 
3788 	return rtap_len;
3789 }
3790 
3791 
3792 /**
3793  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3794  * @rx_status: Pointer to rx_status.
3795  * @rtap_buf: buffer to which radiotap has to be updated
3796  * @rtap_len: radiotap length
3797  *
3798  * API update HE-MU fields in the radiotap header
3799  *
3800  * Return: length of rtap_len updated.
3801  */
3802 static unsigned int
3803 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3804 				     int8_t *rtap_buf, uint32_t rtap_len)
3805 {
3806 	/*
3807 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3808 	 * Enable all "known" he-mu radiotap flags for now
3809 	 */
3810 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3811 	rtap_len += 2;
3812 
3813 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3814 	rtap_len += 2;
3815 
3816 	rtap_buf[rtap_len] = rx_status->he_RU[0];
3817 	rtap_len += 1;
3818 
3819 	rtap_buf[rtap_len] = rx_status->he_RU[1];
3820 	rtap_len += 1;
3821 
3822 	rtap_buf[rtap_len] = rx_status->he_RU[2];
3823 	rtap_len += 1;
3824 
3825 	rtap_buf[rtap_len] = rx_status->he_RU[3];
3826 	rtap_len += 1;
3827 
3828 	return rtap_len;
3829 }
3830 
3831 /**
3832  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
3833  * @rx_status: Pointer to rx_status.
3834  * @rtap_buf: buffer to which radiotap has to be updated
3835  * @rtap_len: radiotap length
3836  *
3837  * API update he-mu-other fields in the radiotap header
3838  *
3839  * Return: length of rtap_len updated.
3840  */
3841 static unsigned int
3842 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
3843 				     int8_t *rtap_buf, uint32_t rtap_len)
3844 {
3845 	/*
3846 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
3847 	 * Enable all "known" he-mu-other radiotap flags for now
3848 	 */
3849 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
3850 	rtap_len += 2;
3851 
3852 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
3853 	rtap_len += 2;
3854 
3855 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
3856 	rtap_len += 1;
3857 
3858 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
3859 	rtap_len += 1;
3860 
3861 	return rtap_len;
3862 }
3863 
3864 #define NORMALIZED_TO_NOISE_FLOOR (-96)
3865 
3866 /* This is the length for radiotap, combined length
3867  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
3868  * cannot be more than available headroom_sz.
3869  * increase this when we add more radiotap elements.
3870  */
3871 
3872 #define RADIOTAP_VHT_FLAGS_LEN 12
3873 #define RADIOTAP_HE_FLAGS_LEN 12
3874 #define RADIOTAP_HE_MU_FLAGS_LEN 8
3875 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN 18
3876 #define RADIOTAP_FIXED_HEADER_LEN 16
3877 #define RADIOTAP_HT_FLAGS_LEN 3
3878 #define RADIOTAP_AMPDU_STATUS_LEN 8
3879 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
3880 				RADIOTAP_FIXED_HEADER_LEN + \
3881 				RADIOTAP_HT_FLAGS_LEN + \
3882 				RADIOTAP_VHT_FLAGS_LEN + \
3883 				RADIOTAP_AMPDU_STATUS_LEN + \
3884 				RADIOTAP_HE_FLAGS_LEN + \
3885 				RADIOTAP_HE_MU_FLAGS_LEN + \
3886 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN)
3887 
3888 #define IEEE80211_RADIOTAP_HE 23
3889 #define IEEE80211_RADIOTAP_HE_MU	24
3890 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
3891 
3892 /**
3893  * radiotap_num_to_freq() - Get frequency from chan number
3894  * @chan_num - Input channel number
3895  *
3896  * Return - Channel frequency in Mhz
3897  */
3898 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
3899 {
3900 	if (chan_num == CHANNEL_NUM_14)
3901 		return CHANNEL_FREQ_2484;
3902 	if (chan_num < CHANNEL_NUM_14)
3903 		return CHANNEL_FREQ_2407 +
3904 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3905 
3906 	if (chan_num < CHANNEL_NUM_27)
3907 		return CHANNEL_FREQ_2512 +
3908 			((chan_num - CHANNEL_NUM_15) *
3909 			 FREQ_MULTIPLIER_CONST_20MHZ);
3910 
3911 	if (chan_num > CHANNEL_NUM_182 &&
3912 			chan_num < CHANNEL_NUM_197)
3913 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
3914 			CHANNEL_FREQ_4000);
3915 
3916 	return CHANNEL_FREQ_5000 +
3917 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
3918 }
3919 
3920 /**
3921  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
3922  * @rx_status: Pointer to rx_status.
3923  * @rtap_buf: Buf to which AMPDU info has to be updated.
3924  * @rtap_len: Current length of radiotap buffer
3925  *
3926  * Return: Length of radiotap after AMPDU flags updated.
3927  */
3928 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
3929 					struct mon_rx_status *rx_status,
3930 					uint8_t *rtap_buf,
3931 					uint32_t rtap_len)
3932 {
3933 	/*
3934 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
3935 	 * First 32 bits of AMPDU represents the reference number
3936 	 */
3937 
3938 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
3939 	uint16_t ampdu_flags = 0;
3940 	uint16_t ampdu_reserved_flags = 0;
3941 
3942 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
3943 	rtap_len += 4;
3944 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
3945 	rtap_len += 2;
3946 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
3947 	rtap_len += 2;
3948 
3949 	return rtap_len;
3950 }
3951 
3952 /**
3953  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
3954  * @rx_status: Pointer to rx_status.
3955  * @nbuf:      nbuf pointer to which radiotap has to be updated
3956  * @headroom_sz: Available headroom size.
3957  *
3958  * Return: length of rtap_len updated.
3959  */
3960 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
3961 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
3962 {
3963 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
3964 	struct ieee80211_radiotap_header *rthdr =
3965 		(struct ieee80211_radiotap_header *)rtap_buf;
3966 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
3967 	uint32_t rtap_len = rtap_hdr_len;
3968 	uint8_t length = rtap_len;
3969 
3970 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
3971 	rthdr->it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
3972 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
3973 	rtap_len += 8;
3974 
3975 	/* IEEE80211_RADIOTAP_FLAGS u8 */
3976 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS);
3977 
3978 	if (rx_status->rs_fcs_err)
3979 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3980 
3981 	rtap_buf[rtap_len] = rx_status->rtap_flags;
3982 	rtap_len += 1;
3983 
3984 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
3985 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
3986 	    !rx_status->he_flags) {
3987 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
3988 		rtap_buf[rtap_len] = rx_status->rate;
3989 	} else
3990 		rtap_buf[rtap_len] = 0;
3991 	rtap_len += 1;
3992 
3993 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
3994 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
3995 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
3996 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
3997 	rtap_len += 2;
3998 	/* Channel flags. */
3999 	if (rx_status->chan_num > CHANNEL_NUM_35)
4000 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4001 	else
4002 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4003 	if (rx_status->cck_flag)
4004 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4005 	if (rx_status->ofdm_flag)
4006 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4007 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4008 	rtap_len += 2;
4009 
4010 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4011 	 *					(dBm)
4012 	 */
4013 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4014 	/*
4015 	 * rssi_comb is int dB, need to convert it to dBm.
4016 	 * normalize value to noise floor of -96 dBm
4017 	 */
4018 	rtap_buf[rtap_len] = rx_status->rssi_comb +
4019 		NORMALIZED_TO_NOISE_FLOOR;
4020 	rtap_len += 1;
4021 
4022 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4023 	rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA);
4024 	rtap_buf[rtap_len] = rx_status->nr_ant;
4025 	rtap_len += 1;
4026 
4027 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4028 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4029 		return 0;
4030 	}
4031 
4032 	if (rx_status->ht_flags) {
4033 		length = rtap_len;
4034 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4035 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
4036 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4037 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4038 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4039 		rtap_len += 1;
4040 
4041 		if (rx_status->sgi)
4042 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4043 		if (rx_status->bw)
4044 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4045 		else
4046 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4047 		rtap_len += 1;
4048 
4049 		rtap_buf[rtap_len] = rx_status->mcs;
4050 		rtap_len += 1;
4051 
4052 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4053 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4054 			return 0;
4055 		}
4056 	}
4057 
4058 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4059 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4060 		rthdr->it_present |=
4061 			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4062 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4063 								rtap_buf,
4064 								rtap_len);
4065 	}
4066 
4067 	if (rx_status->vht_flags) {
4068 		length = rtap_len;
4069 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4070 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
4071 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4072 								rtap_buf,
4073 								rtap_len);
4074 
4075 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4076 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4077 			return 0;
4078 		}
4079 	}
4080 
4081 	if (rx_status->he_flags) {
4082 		length = rtap_len;
4083 		/* IEEE80211_RADIOTAP_HE */
4084 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
4085 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4086 								rtap_buf,
4087 								rtap_len);
4088 
4089 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4090 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4091 			return 0;
4092 		}
4093 	}
4094 
4095 	if (rx_status->he_mu_flags) {
4096 		length = rtap_len;
4097 		/* IEEE80211_RADIOTAP_HE-MU */
4098 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
4099 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4100 								rtap_buf,
4101 								rtap_len);
4102 
4103 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4104 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4105 			return 0;
4106 		}
4107 	}
4108 
4109 	if (rx_status->he_mu_other_flags) {
4110 		length = rtap_len;
4111 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4112 		rthdr->it_present |=
4113 			cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4114 		rtap_len =
4115 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4116 								rtap_buf,
4117 								rtap_len);
4118 
4119 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4120 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4121 			return 0;
4122 		}
4123 	}
4124 
4125 	rthdr->it_len = cpu_to_le16(rtap_len);
4126 
4127 	if (headroom_sz < rtap_len) {
4128 		qdf_print("ERROR: not enough space to update radiotap\n");
4129 		return 0;
4130 	}
4131 	qdf_nbuf_push_head(nbuf, rtap_len);
4132 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4133 	return rtap_len;
4134 }
4135 #else
4136 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4137 					struct mon_rx_status *rx_status,
4138 					int8_t *rtap_buf,
4139 					uint32_t rtap_len)
4140 {
4141 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4142 	return 0;
4143 }
4144 
4145 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4146 				      int8_t *rtap_buf, uint32_t rtap_len)
4147 {
4148 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4149 	return 0;
4150 }
4151 
4152 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4153 					struct mon_rx_status *rx_status,
4154 					uint8_t *rtap_buf,
4155 					uint32_t rtap_len)
4156 {
4157 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4158 	return 0;
4159 }
4160 
4161 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4162 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4163 {
4164 	qdf_print("ERROR: struct ieee80211_radiotap_header not supported");
4165 	return 0;
4166 }
4167 #endif
4168 qdf_export_symbol(qdf_nbuf_update_radiotap);
4169 
4170 /**
4171  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4172  * @cb_func_ptr: function pointer to the nbuf free callback
4173  *
4174  * This function registers a callback function for nbuf free.
4175  *
4176  * Return: none
4177  */
4178 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4179 {
4180 	nbuf_free_cb = cb_func_ptr;
4181 }
4182 
4183 /**
4184  * qdf_nbuf_classify_pkt() - classify packet
4185  * @skb - sk buff
4186  *
4187  * Return: none
4188  */
4189 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4190 {
4191 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4192 
4193 	/* check destination mac address is broadcast/multicast */
4194 	if (is_broadcast_ether_addr((uint8_t *)eh))
4195 		QDF_NBUF_CB_SET_BCAST(skb);
4196 	else if (is_multicast_ether_addr((uint8_t *)eh))
4197 		QDF_NBUF_CB_SET_MCAST(skb);
4198 
4199 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4200 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4201 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4202 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4203 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4204 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4205 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4206 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4207 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4208 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4209 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4210 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4211 }
4212 qdf_export_symbol(qdf_nbuf_classify_pkt);
4213 
4214 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4215 {
4216 	qdf_nbuf_users_set(&nbuf->users, 1);
4217 	nbuf->data = nbuf->head + NET_SKB_PAD;
4218 	skb_reset_tail_pointer(nbuf);
4219 }
4220 qdf_export_symbol(__qdf_nbuf_init);
4221 
4222 #ifdef WLAN_FEATURE_FASTPATH
4223 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4224 {
4225 	qdf_nbuf_users_set(&nbuf->users, 1);
4226 	nbuf->data = nbuf->head + NET_SKB_PAD;
4227 	skb_reset_tail_pointer(nbuf);
4228 }
4229 qdf_export_symbol(qdf_nbuf_init_fast);
4230 #endif /* WLAN_FEATURE_FASTPATH */
4231 
4232 
4233 #ifdef QDF_NBUF_GLOBAL_COUNT
4234 #ifdef WLAN_DEBUGFS
4235 /**
4236  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4237  *
4238  * Return void
4239  */
4240 void __qdf_nbuf_mod_init(void)
4241 {
4242 	qdf_atomic_init(&nbuf_count);
4243 	qdf_debugfs_init();
4244 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4245 }
4246 
4247 /**
4248  * __qdf_nbuf_mod_init() - Unintialization routine for qdf_nuf
4249  *
4250  * Return void
4251  */
4252 void __qdf_nbuf_mod_exit(void)
4253 {
4254 	qdf_debugfs_exit();
4255 }
4256 
4257 #else
4258 
4259 void __qdf_nbuf_mod_init(void)
4260 {
4261 	qdf_atomic_init(&nbuf_count);
4262 }
4263 
4264 void __qdf_nbuf_mod_exit(void)
4265 {
4266 }
4267 
4268 #endif
4269 #endif
4270