xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define FREQ_MULTIPLIER_CONST_5MHZ 5
88 #define FREQ_MULTIPLIER_CONST_20MHZ 20
89 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
90 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
91 #define RADIOTAP_CCK_CHANNEL 0x0020
92 #define RADIOTAP_OFDM_CHANNEL 0x0040
93 
94 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
95 #include <qdf_mc_timer.h>
96 
97 struct qdf_track_timer {
98 	qdf_mc_timer_t track_timer;
99 	qdf_atomic_t alloc_fail_cnt;
100 };
101 
102 static struct qdf_track_timer alloc_track_timer;
103 
104 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
105 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
106 #endif
107 
108 /* Packet Counter */
109 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
110 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
111 #ifdef QDF_NBUF_GLOBAL_COUNT
112 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
113 static qdf_atomic_t nbuf_count;
114 #endif
115 
116 #if defined(NBUF_MEMORY_DEBUG)
117 static bool is_initial_mem_debug_disabled;
118 #endif
119 
120 /**
121  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
122  *
123  * Return: none
124  */
125 void qdf_nbuf_tx_desc_count_display(void)
126 {
127 	qdf_debug("Current Snapshot of the Driver:");
128 	qdf_debug("Data Packets:");
129 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
130 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
131 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
132 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
140 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
141 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
146 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
147 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
148 	qdf_debug("Mgmt Packets:");
149 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
150 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
161 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
162 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
163 }
164 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
165 
166 /**
167  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
168  * @packet_type   : packet type either mgmt/data
169  * @current_state : layer at which the packet currently present
170  *
171  * Return: none
172  */
173 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
174 			uint8_t current_state)
175 {
176 	switch (packet_type) {
177 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
178 		nbuf_tx_mgmt[current_state]++;
179 		break;
180 	case QDF_NBUF_TX_PKT_DATA_TRACK:
181 		nbuf_tx_data[current_state]++;
182 		break;
183 	default:
184 		break;
185 	}
186 }
187 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
188 
189 /**
190  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
191  *
192  * Return: none
193  */
194 void qdf_nbuf_tx_desc_count_clear(void)
195 {
196 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
197 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
198 }
199 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
200 
201 /**
202  * qdf_nbuf_set_state() - Updates the packet state
203  * @nbuf:            network buffer
204  * @current_state :  layer at which the packet currently is
205  *
206  * This function updates the packet state to the layer at which the packet
207  * currently is
208  *
209  * Return: none
210  */
211 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
212 {
213 	/*
214 	 * Only Mgmt, Data Packets are tracked. WMI messages
215 	 * such as scan commands are not tracked
216 	 */
217 	uint8_t packet_type;
218 
219 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
220 
221 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
222 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
223 		return;
224 	}
225 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
226 	qdf_nbuf_tx_desc_count_update(packet_type,
227 					current_state);
228 }
229 qdf_export_symbol(qdf_nbuf_set_state);
230 
231 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
232 /**
233  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
234  *
235  * This function starts the alloc fail replenish timer.
236  *
237  * Return: void
238  */
239 static void __qdf_nbuf_start_replenish_timer(void)
240 {
241 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
242 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
243 	    QDF_TIMER_STATE_RUNNING)
244 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
245 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
246 }
247 
248 /**
249  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
250  *
251  * This function stops the alloc fail replenish timer.
252  *
253  * Return: void
254  */
255 static void __qdf_nbuf_stop_replenish_timer(void)
256 {
257 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
258 		return;
259 
260 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
261 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
262 	    QDF_TIMER_STATE_RUNNING)
263 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
264 }
265 
266 /**
267  * qdf_replenish_expire_handler - Replenish expire handler
268  *
269  * This function triggers when the alloc fail replenish timer expires.
270  *
271  * Return: void
272  */
273 static void qdf_replenish_expire_handler(void *arg)
274 {
275 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
276 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
277 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
278 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
279 
280 		/* Error handling here */
281 	}
282 }
283 
284 /**
285  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
286  *
287  * This function initializes the nbuf alloc fail replenish timer.
288  *
289  * Return: void
290  */
291 void __qdf_nbuf_init_replenish_timer(void)
292 {
293 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
294 			  qdf_replenish_expire_handler, NULL);
295 }
296 
297 /**
298  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
299  *
300  * This function deinitializes the nbuf alloc fail replenish timer.
301  *
302  * Return: void
303  */
304 void __qdf_nbuf_deinit_replenish_timer(void)
305 {
306 	__qdf_nbuf_stop_replenish_timer();
307 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
308 }
309 #else
310 
311 static inline void __qdf_nbuf_start_replenish_timer(void) {}
312 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
313 #endif
314 
315 /* globals do not need to be initialized to NULL/0 */
316 qdf_nbuf_trace_update_t qdf_trace_update_cb;
317 qdf_nbuf_free_t nbuf_free_cb;
318 
319 #ifdef QDF_NBUF_GLOBAL_COUNT
320 
321 /**
322  * __qdf_nbuf_count_get() - get nbuf global count
323  *
324  * Return: nbuf global count
325  */
326 int __qdf_nbuf_count_get(void)
327 {
328 	return qdf_atomic_read(&nbuf_count);
329 }
330 qdf_export_symbol(__qdf_nbuf_count_get);
331 
332 /**
333  * __qdf_nbuf_count_inc() - increment nbuf global count
334  *
335  * @buf: sk buff
336  *
337  * Return: void
338  */
339 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
340 {
341 	int num_nbuf = 1;
342 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(nbuf);
343 
344 	/* Take care to account for frag_list */
345 	while (ext_list) {
346 		++num_nbuf;
347 		ext_list = qdf_nbuf_queue_next(ext_list);
348 	}
349 
350 	qdf_atomic_add(num_nbuf, &nbuf_count);
351 }
352 qdf_export_symbol(__qdf_nbuf_count_inc);
353 
354 /**
355  * __qdf_nbuf_count_dec() - decrement nbuf global count
356  *
357  * @buf: sk buff
358  *
359  * Return: void
360  */
361 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
362 {
363 	qdf_nbuf_t ext_list;
364 	int num_nbuf;
365 
366 	if (qdf_nbuf_get_users(nbuf) > 1)
367 		return;
368 
369 	num_nbuf = 1;
370 
371 	/* Take care to account for frag_list */
372 	ext_list = qdf_nbuf_get_ext_list(nbuf);
373 	while (ext_list) {
374 		if (qdf_nbuf_get_users(ext_list) == 1)
375 			++num_nbuf;
376 		ext_list = qdf_nbuf_queue_next(ext_list);
377 	}
378 
379 	qdf_atomic_sub(num_nbuf, &nbuf_count);
380 }
381 qdf_export_symbol(__qdf_nbuf_count_dec);
382 #endif
383 
384 #if defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) && \
385 	!defined(QCA_WIFI_QCN9000)
386 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
387 				 int align, int prio, const char *func,
388 				 uint32_t line)
389 {
390 	struct sk_buff *skb;
391 	unsigned long offset;
392 	uint32_t lowmem_alloc_tries = 0;
393 
394 	if (align)
395 		size += (align - 1);
396 
397 realloc:
398 	skb = dev_alloc_skb(size);
399 
400 	if (skb)
401 		goto skb_alloc;
402 
403 	skb = pld_nbuf_pre_alloc(size);
404 
405 	if (!skb) {
406 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
407 				size, func, line);
408 		return NULL;
409 	}
410 
411 skb_alloc:
412 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
413 	 * Though we are trying to reserve low memory upfront to prevent this,
414 	 * we sometimes see SKBs allocated from low memory.
415 	 */
416 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
417 		lowmem_alloc_tries++;
418 		if (lowmem_alloc_tries > 100) {
419 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
420 				     size, func, line);
421 			return NULL;
422 		} else {
423 			/* Not freeing to make sure it
424 			 * will not get allocated again
425 			 */
426 			goto realloc;
427 		}
428 	}
429 	memset(skb->cb, 0x0, sizeof(skb->cb));
430 
431 	/*
432 	 * The default is for netbuf fragments to be interpreted
433 	 * as wordstreams rather than bytestreams.
434 	 */
435 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
436 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
437 
438 	/*
439 	 * XXX:how about we reserve first then align
440 	 * Align & make sure that the tail & data are adjusted properly
441 	 */
442 
443 	if (align) {
444 		offset = ((unsigned long)skb->data) % align;
445 		if (offset)
446 			skb_reserve(skb, align - offset);
447 	}
448 
449 	/*
450 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
451 	 * pointer
452 	 */
453 	skb_reserve(skb, reserve);
454 	qdf_nbuf_count_inc(skb);
455 
456 	return skb;
457 }
458 #else
459 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
460 				 int align, int prio, const char *func,
461 				 uint32_t line)
462 {
463 	struct sk_buff *skb;
464 	unsigned long offset;
465 	int flags = GFP_KERNEL;
466 
467 	if (align)
468 		size += (align - 1);
469 
470 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
471 		flags = GFP_ATOMIC;
472 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
473 		/*
474 		 * Observed that kcompactd burns out CPU to make order-3 page.
475 		 *__netdev_alloc_skb has 4k page fallback option just in case of
476 		 * failing high order page allocation so we don't need to be
477 		 * hard. Make kcompactd rest in piece.
478 		 */
479 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
480 #endif
481 	}
482 
483 	skb = __netdev_alloc_skb(NULL, size, flags);
484 
485 	if (skb)
486 		goto skb_alloc;
487 
488 	skb = pld_nbuf_pre_alloc(size);
489 
490 	if (!skb) {
491 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
492 				size, func, line);
493 		__qdf_nbuf_start_replenish_timer();
494 		return NULL;
495 	} else {
496 		__qdf_nbuf_stop_replenish_timer();
497 	}
498 
499 skb_alloc:
500 	memset(skb->cb, 0x0, sizeof(skb->cb));
501 
502 	/*
503 	 * The default is for netbuf fragments to be interpreted
504 	 * as wordstreams rather than bytestreams.
505 	 */
506 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
507 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
508 
509 	/*
510 	 * XXX:how about we reserve first then align
511 	 * Align & make sure that the tail & data are adjusted properly
512 	 */
513 
514 	if (align) {
515 		offset = ((unsigned long)skb->data) % align;
516 		if (offset)
517 			skb_reserve(skb, align - offset);
518 	}
519 
520 	/*
521 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
522 	 * pointer
523 	 */
524 	skb_reserve(skb, reserve);
525 	qdf_nbuf_count_inc(skb);
526 
527 	return skb;
528 }
529 #endif
530 qdf_export_symbol(__qdf_nbuf_alloc);
531 
532 /**
533  * __qdf_nbuf_free() - free the nbuf its interrupt safe
534  * @skb: Pointer to network buffer
535  *
536  * Return: none
537  */
538 
539 void __qdf_nbuf_free(struct sk_buff *skb)
540 {
541 	if (pld_nbuf_pre_alloc_free(skb))
542 		return;
543 
544 	qdf_nbuf_count_dec(skb);
545 	if (nbuf_free_cb)
546 		nbuf_free_cb(skb);
547 	else
548 		dev_kfree_skb_any(skb);
549 }
550 
551 qdf_export_symbol(__qdf_nbuf_free);
552 
553 #ifdef NBUF_MEMORY_DEBUG
554 enum qdf_nbuf_event_type {
555 	QDF_NBUF_ALLOC,
556 	QDF_NBUF_ALLOC_CLONE,
557 	QDF_NBUF_ALLOC_COPY,
558 	QDF_NBUF_ALLOC_FAILURE,
559 	QDF_NBUF_FREE,
560 	QDF_NBUF_MAP,
561 	QDF_NBUF_UNMAP,
562 	QDF_NBUF_ALLOC_COPY_EXPAND,
563 };
564 
565 struct qdf_nbuf_event {
566 	qdf_nbuf_t nbuf;
567 	char func[QDF_MEM_FUNC_NAME_SIZE];
568 	uint32_t line;
569 	enum qdf_nbuf_event_type type;
570 	uint64_t timestamp;
571 };
572 
573 #define QDF_NBUF_HISTORY_SIZE 4096
574 static qdf_atomic_t qdf_nbuf_history_index;
575 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
576 
577 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
578 {
579 	int32_t next = qdf_atomic_inc_return(index);
580 
581 	if (next == size)
582 		qdf_atomic_sub(size, index);
583 
584 	return next % size;
585 }
586 
587 static void
588 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
589 		     enum qdf_nbuf_event_type type)
590 {
591 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
592 						   QDF_NBUF_HISTORY_SIZE);
593 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
594 
595 	event->nbuf = nbuf;
596 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
597 	event->line = line;
598 	event->type = type;
599 	event->timestamp = qdf_get_log_timestamp();
600 }
601 #endif /* NBUF_MEMORY_DEBUG */
602 
603 #ifdef NBUF_MAP_UNMAP_DEBUG
604 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
605 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
606 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
607 
608 static void qdf_nbuf_map_tracking_init(void)
609 {
610 	qdf_tracker_init(&qdf_nbuf_map_tracker);
611 }
612 
613 static void qdf_nbuf_map_tracking_deinit(void)
614 {
615 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
616 }
617 
618 static QDF_STATUS
619 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
620 {
621 	QDF_STATUS status;
622 
623 	if (is_initial_mem_debug_disabled)
624 		return QDF_STATUS_SUCCESS;
625 
626 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
627 	if (QDF_IS_STATUS_ERROR(status))
628 		return status;
629 
630 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
631 
632 	return QDF_STATUS_SUCCESS;
633 }
634 
635 static void
636 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
637 {
638 	if (is_initial_mem_debug_disabled)
639 		return;
640 
641 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
642 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
643 }
644 
645 void qdf_nbuf_map_check_for_leaks(void)
646 {
647 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
648 }
649 
650 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
651 			      qdf_nbuf_t buf,
652 			      qdf_dma_dir_t dir,
653 			      const char *func,
654 			      uint32_t line)
655 {
656 	QDF_STATUS status;
657 
658 	status = qdf_nbuf_track_map(buf, func, line);
659 	if (QDF_IS_STATUS_ERROR(status))
660 		return status;
661 
662 	status = __qdf_nbuf_map(osdev, buf, dir);
663 	if (QDF_IS_STATUS_ERROR(status))
664 		qdf_nbuf_untrack_map(buf, func, line);
665 
666 	return status;
667 }
668 
669 qdf_export_symbol(qdf_nbuf_map_debug);
670 
671 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
672 			  qdf_nbuf_t buf,
673 			  qdf_dma_dir_t dir,
674 			  const char *func,
675 			  uint32_t line)
676 {
677 	qdf_nbuf_untrack_map(buf, func, line);
678 	__qdf_nbuf_unmap_single(osdev, buf, dir);
679 }
680 
681 qdf_export_symbol(qdf_nbuf_unmap_debug);
682 
683 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
684 				     qdf_nbuf_t buf,
685 				     qdf_dma_dir_t dir,
686 				     const char *func,
687 				     uint32_t line)
688 {
689 	QDF_STATUS status;
690 
691 	status = qdf_nbuf_track_map(buf, func, line);
692 	if (QDF_IS_STATUS_ERROR(status))
693 		return status;
694 
695 	status = __qdf_nbuf_map_single(osdev, buf, dir);
696 	if (QDF_IS_STATUS_ERROR(status))
697 		qdf_nbuf_untrack_map(buf, func, line);
698 
699 	return status;
700 }
701 
702 qdf_export_symbol(qdf_nbuf_map_single_debug);
703 
704 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
705 				 qdf_nbuf_t buf,
706 				 qdf_dma_dir_t dir,
707 				 const char *func,
708 				 uint32_t line)
709 {
710 	qdf_nbuf_untrack_map(buf, func, line);
711 	__qdf_nbuf_unmap_single(osdev, buf, dir);
712 }
713 
714 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
715 
716 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
717 				     qdf_nbuf_t buf,
718 				     qdf_dma_dir_t dir,
719 				     int nbytes,
720 				     const char *func,
721 				     uint32_t line)
722 {
723 	QDF_STATUS status;
724 
725 	status = qdf_nbuf_track_map(buf, func, line);
726 	if (QDF_IS_STATUS_ERROR(status))
727 		return status;
728 
729 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
730 	if (QDF_IS_STATUS_ERROR(status))
731 		qdf_nbuf_untrack_map(buf, func, line);
732 
733 	return status;
734 }
735 
736 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
737 
738 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
739 				 qdf_nbuf_t buf,
740 				 qdf_dma_dir_t dir,
741 				 int nbytes,
742 				 const char *func,
743 				 uint32_t line)
744 {
745 	qdf_nbuf_untrack_map(buf, func, line);
746 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
747 }
748 
749 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
750 
751 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
752 					    qdf_nbuf_t buf,
753 					    qdf_dma_dir_t dir,
754 					    int nbytes,
755 					    const char *func,
756 					    uint32_t line)
757 {
758 	QDF_STATUS status;
759 
760 	status = qdf_nbuf_track_map(buf, func, line);
761 	if (QDF_IS_STATUS_ERROR(status))
762 		return status;
763 
764 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
765 	if (QDF_IS_STATUS_ERROR(status))
766 		qdf_nbuf_untrack_map(buf, func, line);
767 
768 	return status;
769 }
770 
771 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
772 
773 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
774 					qdf_nbuf_t buf,
775 					qdf_dma_dir_t dir,
776 					int nbytes,
777 					const char *func,
778 					uint32_t line)
779 {
780 	qdf_nbuf_untrack_map(buf, func, line);
781 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
782 }
783 
784 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
785 
786 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
787 					     const char *func,
788 					     uint32_t line)
789 {
790 	char map_func[QDF_TRACKER_FUNC_SIZE];
791 	uint32_t map_line;
792 
793 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
794 				&map_func, &map_line))
795 		return;
796 
797 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
798 			   func, line, map_func, map_line);
799 }
800 #else
801 static inline void qdf_nbuf_map_tracking_init(void)
802 {
803 }
804 
805 static inline void qdf_nbuf_map_tracking_deinit(void)
806 {
807 }
808 
809 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
810 						    const char *func,
811 						    uint32_t line)
812 {
813 }
814 #endif /* NBUF_MAP_UNMAP_DEBUG */
815 
816 /**
817  * __qdf_nbuf_map() - map a buffer to local bus address space
818  * @osdev: OS device
819  * @bmap: Bitmap
820  * @skb: Pointer to network buffer
821  * @dir: Direction
822  *
823  * Return: QDF_STATUS
824  */
825 #ifdef QDF_OS_DEBUG
826 QDF_STATUS
827 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
828 {
829 	struct skb_shared_info *sh = skb_shinfo(skb);
830 
831 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
832 			|| (dir == QDF_DMA_FROM_DEVICE));
833 
834 	/*
835 	 * Assume there's only a single fragment.
836 	 * To support multiple fragments, it would be necessary to change
837 	 * qdf_nbuf_t to be a separate object that stores meta-info
838 	 * (including the bus address for each fragment) and a pointer
839 	 * to the underlying sk_buff.
840 	 */
841 	qdf_assert(sh->nr_frags == 0);
842 
843 	return __qdf_nbuf_map_single(osdev, skb, dir);
844 }
845 qdf_export_symbol(__qdf_nbuf_map);
846 
847 #else
848 QDF_STATUS
849 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
850 {
851 	return __qdf_nbuf_map_single(osdev, skb, dir);
852 }
853 qdf_export_symbol(__qdf_nbuf_map);
854 #endif
855 /**
856  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
857  * @osdev: OS device
858  * @skb: Pointer to network buffer
859  * @dir: dma direction
860  *
861  * Return: none
862  */
863 void
864 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
865 			qdf_dma_dir_t dir)
866 {
867 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
868 		   || (dir == QDF_DMA_FROM_DEVICE));
869 
870 	/*
871 	 * Assume there's a single fragment.
872 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
873 	 */
874 	__qdf_nbuf_unmap_single(osdev, skb, dir);
875 }
876 qdf_export_symbol(__qdf_nbuf_unmap);
877 
878 /**
879  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
880  * @osdev: OS device
881  * @skb: Pointer to network buffer
882  * @dir: Direction
883  *
884  * Return: QDF_STATUS
885  */
886 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
887 QDF_STATUS
888 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
889 {
890 	qdf_dma_addr_t paddr;
891 
892 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
893 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
894 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
895 	return QDF_STATUS_SUCCESS;
896 }
897 qdf_export_symbol(__qdf_nbuf_map_single);
898 #else
899 QDF_STATUS
900 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
901 {
902 	qdf_dma_addr_t paddr;
903 
904 	/* assume that the OS only provides a single fragment */
905 	QDF_NBUF_CB_PADDR(buf) = paddr =
906 		dma_map_single(osdev->dev, buf->data,
907 				skb_end_pointer(buf) - buf->data,
908 				__qdf_dma_dir_to_os(dir));
909 	return dma_mapping_error(osdev->dev, paddr)
910 		? QDF_STATUS_E_FAILURE
911 		: QDF_STATUS_SUCCESS;
912 }
913 qdf_export_symbol(__qdf_nbuf_map_single);
914 #endif
915 /**
916  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
917  * @osdev: OS device
918  * @skb: Pointer to network buffer
919  * @dir: Direction
920  *
921  * Return: none
922  */
923 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
924 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
925 				qdf_dma_dir_t dir)
926 {
927 }
928 #else
929 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
930 					qdf_dma_dir_t dir)
931 {
932 	if (QDF_NBUF_CB_PADDR(buf))
933 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
934 			skb_end_pointer(buf) - buf->data,
935 			__qdf_dma_dir_to_os(dir));
936 }
937 #endif
938 qdf_export_symbol(__qdf_nbuf_unmap_single);
939 
940 /**
941  * __qdf_nbuf_set_rx_cksum() - set rx checksum
942  * @skb: Pointer to network buffer
943  * @cksum: Pointer to checksum value
944  *
945  * Return: QDF_STATUS
946  */
947 QDF_STATUS
948 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
949 {
950 	switch (cksum->l4_result) {
951 	case QDF_NBUF_RX_CKSUM_NONE:
952 		skb->ip_summed = CHECKSUM_NONE;
953 		break;
954 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
955 		skb->ip_summed = CHECKSUM_UNNECESSARY;
956 		break;
957 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
958 		skb->ip_summed = CHECKSUM_PARTIAL;
959 		skb->csum = cksum->val;
960 		break;
961 	default:
962 		pr_err("Unknown checksum type\n");
963 		qdf_assert(0);
964 		return QDF_STATUS_E_NOSUPPORT;
965 	}
966 	return QDF_STATUS_SUCCESS;
967 }
968 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
969 
970 /**
971  * __qdf_nbuf_get_tx_cksum() - get tx checksum
972  * @skb: Pointer to network buffer
973  *
974  * Return: TX checksum value
975  */
976 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
977 {
978 	switch (skb->ip_summed) {
979 	case CHECKSUM_NONE:
980 		return QDF_NBUF_TX_CKSUM_NONE;
981 	case CHECKSUM_PARTIAL:
982 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
983 	case CHECKSUM_COMPLETE:
984 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
985 	default:
986 		return QDF_NBUF_TX_CKSUM_NONE;
987 	}
988 }
989 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
990 
991 /**
992  * __qdf_nbuf_get_tid() - get tid
993  * @skb: Pointer to network buffer
994  *
995  * Return: tid
996  */
997 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
998 {
999 	return skb->priority;
1000 }
1001 qdf_export_symbol(__qdf_nbuf_get_tid);
1002 
1003 /**
1004  * __qdf_nbuf_set_tid() - set tid
1005  * @skb: Pointer to network buffer
1006  *
1007  * Return: none
1008  */
1009 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1010 {
1011 	skb->priority = tid;
1012 }
1013 qdf_export_symbol(__qdf_nbuf_set_tid);
1014 
1015 /**
1016  * __qdf_nbuf_set_tid() - set tid
1017  * @skb: Pointer to network buffer
1018  *
1019  * Return: none
1020  */
1021 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1022 {
1023 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1024 }
1025 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1026 
1027 /**
1028  * __qdf_nbuf_reg_trace_cb() - register trace callback
1029  * @cb_func_ptr: Pointer to trace callback function
1030  *
1031  * Return: none
1032  */
1033 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1034 {
1035 	qdf_trace_update_cb = cb_func_ptr;
1036 }
1037 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1038 
1039 /**
1040  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1041  *              of DHCP packet.
1042  * @data: Pointer to DHCP packet data buffer
1043  *
1044  * This func. returns the subtype of DHCP packet.
1045  *
1046  * Return: subtype of the DHCP packet.
1047  */
1048 enum qdf_proto_subtype
1049 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1050 {
1051 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1052 
1053 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1054 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1055 					QDF_DHCP_OPTION53_LENGTH)) {
1056 
1057 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1058 		case QDF_DHCP_DISCOVER:
1059 			subtype = QDF_PROTO_DHCP_DISCOVER;
1060 			break;
1061 		case QDF_DHCP_REQUEST:
1062 			subtype = QDF_PROTO_DHCP_REQUEST;
1063 			break;
1064 		case QDF_DHCP_OFFER:
1065 			subtype = QDF_PROTO_DHCP_OFFER;
1066 			break;
1067 		case QDF_DHCP_ACK:
1068 			subtype = QDF_PROTO_DHCP_ACK;
1069 			break;
1070 		case QDF_DHCP_NAK:
1071 			subtype = QDF_PROTO_DHCP_NACK;
1072 			break;
1073 		case QDF_DHCP_RELEASE:
1074 			subtype = QDF_PROTO_DHCP_RELEASE;
1075 			break;
1076 		case QDF_DHCP_INFORM:
1077 			subtype = QDF_PROTO_DHCP_INFORM;
1078 			break;
1079 		case QDF_DHCP_DECLINE:
1080 			subtype = QDF_PROTO_DHCP_DECLINE;
1081 			break;
1082 		default:
1083 			break;
1084 		}
1085 	}
1086 
1087 	return subtype;
1088 }
1089 
1090 /**
1091  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1092  *            of EAPOL packet.
1093  * @data: Pointer to EAPOL packet data buffer
1094  *
1095  * This func. returns the subtype of EAPOL packet.
1096  *
1097  * Return: subtype of the EAPOL packet.
1098  */
1099 enum qdf_proto_subtype
1100 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1101 {
1102 	uint16_t eapol_key_info;
1103 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1104 	uint16_t mask;
1105 
1106 	eapol_key_info = (uint16_t)(*(uint16_t *)
1107 			(data + EAPOL_KEY_INFO_OFFSET));
1108 
1109 	mask = eapol_key_info & EAPOL_MASK;
1110 	switch (mask) {
1111 	case EAPOL_M1_BIT_MASK:
1112 		subtype = QDF_PROTO_EAPOL_M1;
1113 		break;
1114 	case EAPOL_M2_BIT_MASK:
1115 		subtype = QDF_PROTO_EAPOL_M2;
1116 		break;
1117 	case EAPOL_M3_BIT_MASK:
1118 		subtype = QDF_PROTO_EAPOL_M3;
1119 		break;
1120 	case EAPOL_M4_BIT_MASK:
1121 		subtype = QDF_PROTO_EAPOL_M4;
1122 		break;
1123 	default:
1124 		break;
1125 	}
1126 
1127 	return subtype;
1128 }
1129 
1130 /**
1131  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1132  *            of ARP packet.
1133  * @data: Pointer to ARP packet data buffer
1134  *
1135  * This func. returns the subtype of ARP packet.
1136  *
1137  * Return: subtype of the ARP packet.
1138  */
1139 enum qdf_proto_subtype
1140 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1141 {
1142 	uint16_t subtype;
1143 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1144 
1145 	subtype = (uint16_t)(*(uint16_t *)
1146 			(data + ARP_SUB_TYPE_OFFSET));
1147 
1148 	switch (QDF_SWAP_U16(subtype)) {
1149 	case ARP_REQUEST:
1150 		proto_subtype = QDF_PROTO_ARP_REQ;
1151 		break;
1152 	case ARP_RESPONSE:
1153 		proto_subtype = QDF_PROTO_ARP_RES;
1154 		break;
1155 	default:
1156 		break;
1157 	}
1158 
1159 	return proto_subtype;
1160 }
1161 
1162 /**
1163  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1164  *            of IPV4 ICMP packet.
1165  * @data: Pointer to IPV4 ICMP packet data buffer
1166  *
1167  * This func. returns the subtype of ICMP packet.
1168  *
1169  * Return: subtype of the ICMP packet.
1170  */
1171 enum qdf_proto_subtype
1172 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1173 {
1174 	uint8_t subtype;
1175 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1176 
1177 	subtype = (uint8_t)(*(uint8_t *)
1178 			(data + ICMP_SUBTYPE_OFFSET));
1179 
1180 	switch (subtype) {
1181 	case ICMP_REQUEST:
1182 		proto_subtype = QDF_PROTO_ICMP_REQ;
1183 		break;
1184 	case ICMP_RESPONSE:
1185 		proto_subtype = QDF_PROTO_ICMP_RES;
1186 		break;
1187 	default:
1188 		break;
1189 	}
1190 
1191 	return proto_subtype;
1192 }
1193 
1194 /**
1195  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1196  *            of IPV6 ICMPV6 packet.
1197  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1198  *
1199  * This func. returns the subtype of ICMPV6 packet.
1200  *
1201  * Return: subtype of the ICMPV6 packet.
1202  */
1203 enum qdf_proto_subtype
1204 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1205 {
1206 	uint8_t subtype;
1207 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1208 
1209 	subtype = (uint8_t)(*(uint8_t *)
1210 			(data + ICMPV6_SUBTYPE_OFFSET));
1211 
1212 	switch (subtype) {
1213 	case ICMPV6_REQUEST:
1214 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1215 		break;
1216 	case ICMPV6_RESPONSE:
1217 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1218 		break;
1219 	case ICMPV6_RS:
1220 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1221 		break;
1222 	case ICMPV6_RA:
1223 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1224 		break;
1225 	case ICMPV6_NS:
1226 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1227 		break;
1228 	case ICMPV6_NA:
1229 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1230 		break;
1231 	default:
1232 		break;
1233 	}
1234 
1235 	return proto_subtype;
1236 }
1237 
1238 /**
1239  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1240  *            of IPV4 packet.
1241  * @data: Pointer to IPV4 packet data buffer
1242  *
1243  * This func. returns the proto type of IPV4 packet.
1244  *
1245  * Return: proto type of IPV4 packet.
1246  */
1247 uint8_t
1248 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1249 {
1250 	uint8_t proto_type;
1251 
1252 	proto_type = (uint8_t)(*(uint8_t *)(data +
1253 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1254 	return proto_type;
1255 }
1256 
1257 /**
1258  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1259  *            of IPV6 packet.
1260  * @data: Pointer to IPV6 packet data buffer
1261  *
1262  * This func. returns the proto type of IPV6 packet.
1263  *
1264  * Return: proto type of IPV6 packet.
1265  */
1266 uint8_t
1267 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1268 {
1269 	uint8_t proto_type;
1270 
1271 	proto_type = (uint8_t)(*(uint8_t *)(data +
1272 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1273 	return proto_type;
1274 }
1275 
1276 /**
1277  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1278  * @data: Pointer to network data
1279  *
1280  * This api is for Tx packets.
1281  *
1282  * Return: true if packet is ipv4 packet
1283  *	   false otherwise
1284  */
1285 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1286 {
1287 	uint16_t ether_type;
1288 
1289 	ether_type = (uint16_t)(*(uint16_t *)(data +
1290 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1291 
1292 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1293 		return true;
1294 	else
1295 		return false;
1296 }
1297 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1298 
1299 /**
1300  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1301  * @data: Pointer to network data buffer
1302  *
1303  * This api is for ipv4 packet.
1304  *
1305  * Return: true if packet is DHCP packet
1306  *	   false otherwise
1307  */
1308 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1309 {
1310 	uint16_t sport;
1311 	uint16_t dport;
1312 
1313 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1314 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1315 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1316 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1317 					 sizeof(uint16_t)));
1318 
1319 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1320 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1321 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1322 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1323 		return true;
1324 	else
1325 		return false;
1326 }
1327 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1328 
1329 /**
1330  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1331  * @data: Pointer to network data buffer
1332  *
1333  * This api is for ipv4 packet.
1334  *
1335  * Return: true if packet is EAPOL packet
1336  *	   false otherwise.
1337  */
1338 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1339 {
1340 	uint16_t ether_type;
1341 
1342 	ether_type = (uint16_t)(*(uint16_t *)(data +
1343 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1344 
1345 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1346 		return true;
1347 	else
1348 		return false;
1349 }
1350 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1351 
1352 /**
1353  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1354  * @skb: Pointer to network buffer
1355  *
1356  * This api is for ipv4 packet.
1357  *
1358  * Return: true if packet is WAPI packet
1359  *	   false otherwise.
1360  */
1361 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1362 {
1363 	uint16_t ether_type;
1364 
1365 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1366 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1367 
1368 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1369 		return true;
1370 	else
1371 		return false;
1372 }
1373 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1374 
1375 /**
1376  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1377  * @skb: Pointer to network buffer
1378  *
1379  * This api is for ipv4 packet.
1380  *
1381  * Return: true if packet is tdls packet
1382  *	   false otherwise.
1383  */
1384 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1385 {
1386 	uint16_t ether_type;
1387 
1388 	ether_type = *(uint16_t *)(skb->data +
1389 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1390 
1391 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1392 		return true;
1393 	else
1394 		return false;
1395 }
1396 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1397 
1398 /**
1399  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1400  * @data: Pointer to network data buffer
1401  *
1402  * This api is for ipv4 packet.
1403  *
1404  * Return: true if packet is ARP packet
1405  *	   false otherwise.
1406  */
1407 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1408 {
1409 	uint16_t ether_type;
1410 
1411 	ether_type = (uint16_t)(*(uint16_t *)(data +
1412 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1413 
1414 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1415 		return true;
1416 	else
1417 		return false;
1418 }
1419 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1420 
1421 /**
1422  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1423  * @data: Pointer to network data buffer
1424  *
1425  * This api is for ipv4 packet.
1426  *
1427  * Return: true if packet is ARP request
1428  *	   false otherwise.
1429  */
1430 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1431 {
1432 	uint16_t op_code;
1433 
1434 	op_code = (uint16_t)(*(uint16_t *)(data +
1435 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1436 
1437 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1438 		return true;
1439 	return false;
1440 }
1441 
1442 /**
1443  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1444  * @data: Pointer to network data buffer
1445  *
1446  * This api is for ipv4 packet.
1447  *
1448  * Return: true if packet is ARP response
1449  *	   false otherwise.
1450  */
1451 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1452 {
1453 	uint16_t op_code;
1454 
1455 	op_code = (uint16_t)(*(uint16_t *)(data +
1456 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1457 
1458 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1459 		return true;
1460 	return false;
1461 }
1462 
1463 /**
1464  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1465  * @data: Pointer to network data buffer
1466  *
1467  * This api is for ipv4 packet.
1468  *
1469  * Return: ARP packet source IP value.
1470  */
1471 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1472 {
1473 	uint32_t src_ip;
1474 
1475 	src_ip = (uint32_t)(*(uint32_t *)(data +
1476 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1477 
1478 	return src_ip;
1479 }
1480 
1481 /**
1482  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1483  * @data: Pointer to network data buffer
1484  *
1485  * This api is for ipv4 packet.
1486  *
1487  * Return: ARP packet target IP value.
1488  */
1489 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1490 {
1491 	uint32_t tgt_ip;
1492 
1493 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1494 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1495 
1496 	return tgt_ip;
1497 }
1498 
1499 /**
1500  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1501  * @data: Pointer to network data buffer
1502  * @len: length to copy
1503  *
1504  * This api is for dns domain name
1505  *
1506  * Return: dns domain name.
1507  */
1508 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1509 {
1510 	uint8_t *domain_name;
1511 
1512 	domain_name = (uint8_t *)
1513 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1514 	return domain_name;
1515 }
1516 
1517 
1518 /**
1519  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1520  * @data: Pointer to network data buffer
1521  *
1522  * This api is for dns query packet.
1523  *
1524  * Return: true if packet is dns query packet.
1525  *	   false otherwise.
1526  */
1527 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1528 {
1529 	uint16_t op_code;
1530 	uint16_t tgt_port;
1531 
1532 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1533 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1534 	/* Standard DNS query always happen on Dest Port 53. */
1535 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1536 		op_code = (uint16_t)(*(uint16_t *)(data +
1537 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1538 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1539 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1540 			return true;
1541 	}
1542 	return false;
1543 }
1544 
1545 /**
1546  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1547  * @data: Pointer to network data buffer
1548  *
1549  * This api is for dns query response.
1550  *
1551  * Return: true if packet is dns response packet.
1552  *	   false otherwise.
1553  */
1554 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1555 {
1556 	uint16_t op_code;
1557 	uint16_t src_port;
1558 
1559 	src_port = (uint16_t)(*(uint16_t *)(data +
1560 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1561 	/* Standard DNS response always comes on Src Port 53. */
1562 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1563 		op_code = (uint16_t)(*(uint16_t *)(data +
1564 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1565 
1566 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1567 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1568 			return true;
1569 	}
1570 	return false;
1571 }
1572 
1573 /**
1574  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1575  * @data: Pointer to network data buffer
1576  *
1577  * This api is for tcp syn packet.
1578  *
1579  * Return: true if packet is tcp syn packet.
1580  *	   false otherwise.
1581  */
1582 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1583 {
1584 	uint8_t op_code;
1585 
1586 	op_code = (uint8_t)(*(uint8_t *)(data +
1587 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1588 
1589 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1590 		return true;
1591 	return false;
1592 }
1593 
1594 /**
1595  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1596  * @data: Pointer to network data buffer
1597  *
1598  * This api is for tcp syn ack packet.
1599  *
1600  * Return: true if packet is tcp syn ack packet.
1601  *	   false otherwise.
1602  */
1603 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1604 {
1605 	uint8_t op_code;
1606 
1607 	op_code = (uint8_t)(*(uint8_t *)(data +
1608 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1609 
1610 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1611 		return true;
1612 	return false;
1613 }
1614 
1615 /**
1616  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1617  * @data: Pointer to network data buffer
1618  *
1619  * This api is for tcp ack packet.
1620  *
1621  * Return: true if packet is tcp ack packet.
1622  *	   false otherwise.
1623  */
1624 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1625 {
1626 	uint8_t op_code;
1627 
1628 	op_code = (uint8_t)(*(uint8_t *)(data +
1629 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1630 
1631 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1632 		return true;
1633 	return false;
1634 }
1635 
1636 /**
1637  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1638  * @data: Pointer to network data buffer
1639  *
1640  * This api is for tcp packet.
1641  *
1642  * Return: tcp source port value.
1643  */
1644 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1645 {
1646 	uint16_t src_port;
1647 
1648 	src_port = (uint16_t)(*(uint16_t *)(data +
1649 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1650 
1651 	return src_port;
1652 }
1653 
1654 /**
1655  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1656  * @data: Pointer to network data buffer
1657  *
1658  * This api is for tcp packet.
1659  *
1660  * Return: tcp destination port value.
1661  */
1662 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1663 {
1664 	uint16_t tgt_port;
1665 
1666 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1667 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1668 
1669 	return tgt_port;
1670 }
1671 
1672 /**
1673  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1674  * @data: Pointer to network data buffer
1675  *
1676  * This api is for ipv4 req packet.
1677  *
1678  * Return: true if packet is icmpv4 request
1679  *	   false otherwise.
1680  */
1681 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1682 {
1683 	uint8_t op_code;
1684 
1685 	op_code = (uint8_t)(*(uint8_t *)(data +
1686 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1687 
1688 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1689 		return true;
1690 	return false;
1691 }
1692 
1693 /**
1694  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1695  * @data: Pointer to network data buffer
1696  *
1697  * This api is for ipv4 res packet.
1698  *
1699  * Return: true if packet is icmpv4 response
1700  *	   false otherwise.
1701  */
1702 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1703 {
1704 	uint8_t op_code;
1705 
1706 	op_code = (uint8_t)(*(uint8_t *)(data +
1707 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1708 
1709 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1710 		return true;
1711 	return false;
1712 }
1713 
1714 /**
1715  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1716  * @data: Pointer to network data buffer
1717  *
1718  * This api is for ipv4 packet.
1719  *
1720  * Return: icmpv4 packet source IP value.
1721  */
1722 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1723 {
1724 	uint32_t src_ip;
1725 
1726 	src_ip = (uint32_t)(*(uint32_t *)(data +
1727 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1728 
1729 	return src_ip;
1730 }
1731 
1732 /**
1733  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1734  * @data: Pointer to network data buffer
1735  *
1736  * This api is for ipv4 packet.
1737  *
1738  * Return: icmpv4 packet target IP value.
1739  */
1740 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1741 {
1742 	uint32_t tgt_ip;
1743 
1744 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1745 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1746 
1747 	return tgt_ip;
1748 }
1749 
1750 
1751 /**
1752  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1753  * @data: Pointer to IPV6 packet data buffer
1754  *
1755  * This func. checks whether it is a IPV6 packet or not.
1756  *
1757  * Return: TRUE if it is a IPV6 packet
1758  *         FALSE if not
1759  */
1760 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1761 {
1762 	uint16_t ether_type;
1763 
1764 	ether_type = (uint16_t)(*(uint16_t *)(data +
1765 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1766 
1767 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1768 		return true;
1769 	else
1770 		return false;
1771 }
1772 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1773 
1774 /**
1775  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1776  * @data: Pointer to network data buffer
1777  *
1778  * This api is for ipv6 packet.
1779  *
1780  * Return: true if packet is DHCP packet
1781  *	   false otherwise
1782  */
1783 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1784 {
1785 	uint16_t sport;
1786 	uint16_t dport;
1787 
1788 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1789 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1790 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1791 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1792 					sizeof(uint16_t));
1793 
1794 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1795 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1796 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1797 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1798 		return true;
1799 	else
1800 		return false;
1801 }
1802 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1803 
1804 /**
1805  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
1806  * @data: Pointer to network data buffer
1807  *
1808  * This api is for ipv6 packet.
1809  *
1810  * Return: true if packet is MDNS packet
1811  *	   false otherwise
1812  */
1813 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
1814 {
1815 	uint16_t sport;
1816 	uint16_t dport;
1817 
1818 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1819 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1820 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1821 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1822 					sizeof(uint16_t));
1823 
1824 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
1825 	    dport == sport)
1826 		return true;
1827 	else
1828 		return false;
1829 }
1830 
1831 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
1832 
1833 /**
1834  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1835  * @data: Pointer to IPV4 packet data buffer
1836  *
1837  * This func. checks whether it is a IPV4 multicast packet or not.
1838  *
1839  * Return: TRUE if it is a IPV4 multicast packet
1840  *         FALSE if not
1841  */
1842 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1843 {
1844 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1845 		uint32_t *dst_addr =
1846 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1847 
1848 		/*
1849 		 * Check first word of the IPV4 address and if it is
1850 		 * equal to 0xE then it represents multicast IP.
1851 		 */
1852 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1853 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1854 			return true;
1855 		else
1856 			return false;
1857 	} else
1858 		return false;
1859 }
1860 
1861 /**
1862  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1863  * @data: Pointer to IPV6 packet data buffer
1864  *
1865  * This func. checks whether it is a IPV6 multicast packet or not.
1866  *
1867  * Return: TRUE if it is a IPV6 multicast packet
1868  *         FALSE if not
1869  */
1870 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1871 {
1872 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1873 		uint16_t *dst_addr;
1874 
1875 		dst_addr = (uint16_t *)
1876 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1877 
1878 		/*
1879 		 * Check first byte of the IP address and if it
1880 		 * 0xFF00 then it is a IPV6 mcast packet.
1881 		 */
1882 		if (*dst_addr ==
1883 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1884 			return true;
1885 		else
1886 			return false;
1887 	} else
1888 		return false;
1889 }
1890 
1891 /**
1892  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1893  * @data: Pointer to IPV4 ICMP packet data buffer
1894  *
1895  * This func. checks whether it is a ICMP packet or not.
1896  *
1897  * Return: TRUE if it is a ICMP packet
1898  *         FALSE if not
1899  */
1900 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1901 {
1902 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1903 		uint8_t pkt_type;
1904 
1905 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1906 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1907 
1908 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1909 			return true;
1910 		else
1911 			return false;
1912 	} else
1913 		return false;
1914 }
1915 
1916 /**
1917  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1918  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1919  *
1920  * This func. checks whether it is a ICMPV6 packet or not.
1921  *
1922  * Return: TRUE if it is a ICMPV6 packet
1923  *         FALSE if not
1924  */
1925 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1926 {
1927 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1928 		uint8_t pkt_type;
1929 
1930 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1931 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1932 
1933 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1934 			return true;
1935 		else
1936 			return false;
1937 	} else
1938 		return false;
1939 }
1940 
1941 /**
1942  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1943  * @data: Pointer to IPV4 UDP packet data buffer
1944  *
1945  * This func. checks whether it is a IPV4 UDP packet or not.
1946  *
1947  * Return: TRUE if it is a IPV4 UDP packet
1948  *         FALSE if not
1949  */
1950 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1951 {
1952 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1953 		uint8_t pkt_type;
1954 
1955 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1956 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1957 
1958 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1959 			return true;
1960 		else
1961 			return false;
1962 	} else
1963 		return false;
1964 }
1965 
1966 /**
1967  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1968  * @data: Pointer to IPV4 TCP packet data buffer
1969  *
1970  * This func. checks whether it is a IPV4 TCP packet or not.
1971  *
1972  * Return: TRUE if it is a IPV4 TCP packet
1973  *         FALSE if not
1974  */
1975 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1976 {
1977 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1978 		uint8_t pkt_type;
1979 
1980 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1981 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1982 
1983 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1984 			return true;
1985 		else
1986 			return false;
1987 	} else
1988 		return false;
1989 }
1990 
1991 /**
1992  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
1993  * @data: Pointer to IPV6 UDP packet data buffer
1994  *
1995  * This func. checks whether it is a IPV6 UDP packet or not.
1996  *
1997  * Return: TRUE if it is a IPV6 UDP packet
1998  *         FALSE if not
1999  */
2000 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2001 {
2002 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2003 		uint8_t pkt_type;
2004 
2005 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2006 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2007 
2008 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2009 			return true;
2010 		else
2011 			return false;
2012 	} else
2013 		return false;
2014 }
2015 
2016 /**
2017  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2018  * @data: Pointer to IPV6 TCP packet data buffer
2019  *
2020  * This func. checks whether it is a IPV6 TCP packet or not.
2021  *
2022  * Return: TRUE if it is a IPV6 TCP packet
2023  *         FALSE if not
2024  */
2025 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2026 {
2027 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2028 		uint8_t pkt_type;
2029 
2030 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2031 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2032 
2033 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2034 			return true;
2035 		else
2036 			return false;
2037 	} else
2038 		return false;
2039 }
2040 
2041 /**
2042  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2043  * @nbuf - sk buff
2044  *
2045  * Return: true if packet is broadcast
2046  *	   false otherwise
2047  */
2048 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2049 {
2050 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2051 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2052 }
2053 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2054 
2055 #ifdef NBUF_MEMORY_DEBUG
2056 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2057 
2058 /**
2059  * struct qdf_nbuf_track_t - Network buffer track structure
2060  *
2061  * @p_next: Pointer to next
2062  * @net_buf: Pointer to network buffer
2063  * @func_name: Function name
2064  * @line_num: Line number
2065  * @size: Size
2066  */
2067 struct qdf_nbuf_track_t {
2068 	struct qdf_nbuf_track_t *p_next;
2069 	qdf_nbuf_t net_buf;
2070 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2071 	uint32_t line_num;
2072 	size_t size;
2073 };
2074 
2075 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2076 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2077 
2078 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2079 static struct kmem_cache *nbuf_tracking_cache;
2080 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2081 static spinlock_t qdf_net_buf_track_free_list_lock;
2082 static uint32_t qdf_net_buf_track_free_list_count;
2083 static uint32_t qdf_net_buf_track_used_list_count;
2084 static uint32_t qdf_net_buf_track_max_used;
2085 static uint32_t qdf_net_buf_track_max_free;
2086 static uint32_t qdf_net_buf_track_max_allocated;
2087 
2088 /**
2089  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2090  *
2091  * tracks the max number of network buffers that the wlan driver was tracking
2092  * at any one time.
2093  *
2094  * Return: none
2095  */
2096 static inline void update_max_used(void)
2097 {
2098 	int sum;
2099 
2100 	if (qdf_net_buf_track_max_used <
2101 	    qdf_net_buf_track_used_list_count)
2102 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2103 	sum = qdf_net_buf_track_free_list_count +
2104 		qdf_net_buf_track_used_list_count;
2105 	if (qdf_net_buf_track_max_allocated < sum)
2106 		qdf_net_buf_track_max_allocated = sum;
2107 }
2108 
2109 /**
2110  * update_max_free() - update qdf_net_buf_track_free_list_count
2111  *
2112  * tracks the max number tracking buffers kept in the freelist.
2113  *
2114  * Return: none
2115  */
2116 static inline void update_max_free(void)
2117 {
2118 	if (qdf_net_buf_track_max_free <
2119 	    qdf_net_buf_track_free_list_count)
2120 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2121 }
2122 
2123 /**
2124  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2125  *
2126  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2127  * This function also ads fexibility to adjust the allocation and freelist
2128  * scheems.
2129  *
2130  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2131  */
2132 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2133 {
2134 	int flags = GFP_KERNEL;
2135 	unsigned long irq_flag;
2136 	QDF_NBUF_TRACK *new_node = NULL;
2137 
2138 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2139 	qdf_net_buf_track_used_list_count++;
2140 	if (qdf_net_buf_track_free_list) {
2141 		new_node = qdf_net_buf_track_free_list;
2142 		qdf_net_buf_track_free_list =
2143 			qdf_net_buf_track_free_list->p_next;
2144 		qdf_net_buf_track_free_list_count--;
2145 	}
2146 	update_max_used();
2147 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2148 
2149 	if (new_node)
2150 		return new_node;
2151 
2152 	if (in_interrupt() || irqs_disabled() || in_atomic())
2153 		flags = GFP_ATOMIC;
2154 
2155 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2156 }
2157 
2158 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2159 #define FREEQ_POOLSIZE 2048
2160 
2161 /**
2162  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2163  *
2164  * Matches calls to qdf_nbuf_track_alloc.
2165  * Either frees the tracking cookie to kernel or an internal
2166  * freelist based on the size of the freelist.
2167  *
2168  * Return: none
2169  */
2170 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2171 {
2172 	unsigned long irq_flag;
2173 
2174 	if (!node)
2175 		return;
2176 
2177 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2178 	 * only shrink the freelist if it is bigger than twice the number of
2179 	 * nbufs in use. If the driver is stalling in a consistent bursty
2180 	 * fasion, this will keep 3/4 of thee allocations from the free list
2181 	 * while also allowing the system to recover memory as less frantic
2182 	 * traffic occurs.
2183 	 */
2184 
2185 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2186 
2187 	qdf_net_buf_track_used_list_count--;
2188 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2189 	   (qdf_net_buf_track_free_list_count >
2190 	    qdf_net_buf_track_used_list_count << 1)) {
2191 		kmem_cache_free(nbuf_tracking_cache, node);
2192 	} else {
2193 		node->p_next = qdf_net_buf_track_free_list;
2194 		qdf_net_buf_track_free_list = node;
2195 		qdf_net_buf_track_free_list_count++;
2196 	}
2197 	update_max_free();
2198 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2199 }
2200 
2201 /**
2202  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2203  *
2204  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2205  * the freelist first makes it performant for the first iperf udp burst
2206  * as well as steady state.
2207  *
2208  * Return: None
2209  */
2210 static void qdf_nbuf_track_prefill(void)
2211 {
2212 	int i;
2213 	QDF_NBUF_TRACK *node, *head;
2214 
2215 	/* prepopulate the freelist */
2216 	head = NULL;
2217 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2218 		node = qdf_nbuf_track_alloc();
2219 		if (!node)
2220 			continue;
2221 		node->p_next = head;
2222 		head = node;
2223 	}
2224 	while (head) {
2225 		node = head->p_next;
2226 		qdf_nbuf_track_free(head);
2227 		head = node;
2228 	}
2229 
2230 	/* prefilled buffers should not count as used */
2231 	qdf_net_buf_track_max_used = 0;
2232 }
2233 
2234 /**
2235  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2236  *
2237  * This initializes the memory manager for the nbuf tracking cookies.  Because
2238  * these cookies are all the same size and only used in this feature, we can
2239  * use a kmem_cache to provide tracking as well as to speed up allocations.
2240  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2241  * features) a freelist is prepopulated here.
2242  *
2243  * Return: None
2244  */
2245 static void qdf_nbuf_track_memory_manager_create(void)
2246 {
2247 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2248 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2249 						sizeof(QDF_NBUF_TRACK),
2250 						0, 0, NULL);
2251 
2252 	qdf_nbuf_track_prefill();
2253 }
2254 
2255 /**
2256  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2257  *
2258  * Empty the freelist and print out usage statistics when it is no longer
2259  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2260  * any nbuf tracking cookies were leaked.
2261  *
2262  * Return: None
2263  */
2264 static void qdf_nbuf_track_memory_manager_destroy(void)
2265 {
2266 	QDF_NBUF_TRACK *node, *tmp;
2267 	unsigned long irq_flag;
2268 
2269 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2270 	node = qdf_net_buf_track_free_list;
2271 
2272 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2273 		qdf_print("%s: unexpectedly large max_used count %d",
2274 			  __func__, qdf_net_buf_track_max_used);
2275 
2276 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2277 		qdf_print("%s: %d unused trackers were allocated",
2278 			  __func__,
2279 			  qdf_net_buf_track_max_allocated -
2280 			  qdf_net_buf_track_max_used);
2281 
2282 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2283 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2284 		qdf_print("%s: check freelist shrinking functionality",
2285 			  __func__);
2286 
2287 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2288 		  "%s: %d residual freelist size",
2289 		  __func__, qdf_net_buf_track_free_list_count);
2290 
2291 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2292 		  "%s: %d max freelist size observed",
2293 		  __func__, qdf_net_buf_track_max_free);
2294 
2295 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2296 		  "%s: %d max buffers used observed",
2297 		  __func__, qdf_net_buf_track_max_used);
2298 
2299 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2300 		  "%s: %d max buffers allocated observed",
2301 		  __func__, qdf_net_buf_track_max_allocated);
2302 
2303 	while (node) {
2304 		tmp = node;
2305 		node = node->p_next;
2306 		kmem_cache_free(nbuf_tracking_cache, tmp);
2307 		qdf_net_buf_track_free_list_count--;
2308 	}
2309 
2310 	if (qdf_net_buf_track_free_list_count != 0)
2311 		qdf_info("%d unfreed tracking memory lost in freelist",
2312 			 qdf_net_buf_track_free_list_count);
2313 
2314 	if (qdf_net_buf_track_used_list_count != 0)
2315 		qdf_info("%d unfreed tracking memory still in use",
2316 			 qdf_net_buf_track_used_list_count);
2317 
2318 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2319 	kmem_cache_destroy(nbuf_tracking_cache);
2320 	qdf_net_buf_track_free_list = NULL;
2321 }
2322 
2323 /**
2324  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2325  *
2326  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2327  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2328  * WLAN driver module whose allocated SKB is freed by network stack are
2329  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2330  * reported as memory leak.
2331  *
2332  * Return: none
2333  */
2334 void qdf_net_buf_debug_init(void)
2335 {
2336 	uint32_t i;
2337 
2338 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2339 
2340 	if (is_initial_mem_debug_disabled)
2341 		return;
2342 
2343 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2344 
2345 	qdf_nbuf_map_tracking_init();
2346 	qdf_nbuf_track_memory_manager_create();
2347 
2348 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2349 		gp_qdf_net_buf_track_tbl[i] = NULL;
2350 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2351 	}
2352 }
2353 qdf_export_symbol(qdf_net_buf_debug_init);
2354 
2355 /**
2356  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2357  *
2358  * Exit network buffer tracking debug functionality and log SKB memory leaks
2359  * As part of exiting the functionality, free the leaked memory and
2360  * cleanup the tracking buffers.
2361  *
2362  * Return: none
2363  */
2364 void qdf_net_buf_debug_exit(void)
2365 {
2366 	uint32_t i;
2367 	uint32_t count = 0;
2368 	unsigned long irq_flag;
2369 	QDF_NBUF_TRACK *p_node;
2370 	QDF_NBUF_TRACK *p_prev;
2371 
2372 	if (is_initial_mem_debug_disabled)
2373 		return;
2374 
2375 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2376 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2377 		p_node = gp_qdf_net_buf_track_tbl[i];
2378 		while (p_node) {
2379 			p_prev = p_node;
2380 			p_node = p_node->p_next;
2381 			count++;
2382 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2383 				 p_prev->func_name, p_prev->line_num,
2384 				 p_prev->size, p_prev->net_buf);
2385 			qdf_nbuf_track_free(p_prev);
2386 		}
2387 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2388 	}
2389 
2390 	qdf_nbuf_track_memory_manager_destroy();
2391 	qdf_nbuf_map_tracking_deinit();
2392 
2393 #ifdef CONFIG_HALT_KMEMLEAK
2394 	if (count) {
2395 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2396 		QDF_BUG(0);
2397 	}
2398 #endif
2399 }
2400 qdf_export_symbol(qdf_net_buf_debug_exit);
2401 
2402 /**
2403  * qdf_net_buf_debug_hash() - hash network buffer pointer
2404  *
2405  * Return: hash value
2406  */
2407 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2408 {
2409 	uint32_t i;
2410 
2411 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2412 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2413 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2414 
2415 	return i;
2416 }
2417 
2418 /**
2419  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2420  *
2421  * Return: If skb is found in hash table then return pointer to network buffer
2422  *	else return %NULL
2423  */
2424 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2425 {
2426 	uint32_t i;
2427 	QDF_NBUF_TRACK *p_node;
2428 
2429 	i = qdf_net_buf_debug_hash(net_buf);
2430 	p_node = gp_qdf_net_buf_track_tbl[i];
2431 
2432 	while (p_node) {
2433 		if (p_node->net_buf == net_buf)
2434 			return p_node;
2435 		p_node = p_node->p_next;
2436 	}
2437 
2438 	return NULL;
2439 }
2440 
2441 /**
2442  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2443  *
2444  * Return: none
2445  */
2446 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2447 				const char *func_name, uint32_t line_num)
2448 {
2449 	uint32_t i;
2450 	unsigned long irq_flag;
2451 	QDF_NBUF_TRACK *p_node;
2452 	QDF_NBUF_TRACK *new_node;
2453 
2454 	if (is_initial_mem_debug_disabled)
2455 		return;
2456 
2457 	new_node = qdf_nbuf_track_alloc();
2458 
2459 	i = qdf_net_buf_debug_hash(net_buf);
2460 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2461 
2462 	p_node = qdf_net_buf_debug_look_up(net_buf);
2463 
2464 	if (p_node) {
2465 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2466 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2467 			  net_buf, func_name, line_num);
2468 		qdf_nbuf_track_free(new_node);
2469 	} else {
2470 		p_node = new_node;
2471 		if (p_node) {
2472 			p_node->net_buf = net_buf;
2473 			qdf_str_lcopy(p_node->func_name, func_name,
2474 				      QDF_MEM_FUNC_NAME_SIZE);
2475 			p_node->line_num = line_num;
2476 			p_node->size = size;
2477 			qdf_mem_skb_inc(size);
2478 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2479 			gp_qdf_net_buf_track_tbl[i] = p_node;
2480 		} else
2481 			qdf_print(
2482 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2483 				  func_name, line_num, size);
2484 	}
2485 
2486 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2487 }
2488 qdf_export_symbol(qdf_net_buf_debug_add_node);
2489 
2490 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2491 				   uint32_t line_num)
2492 {
2493 	uint32_t i;
2494 	unsigned long irq_flag;
2495 	QDF_NBUF_TRACK *p_node;
2496 
2497 	if (is_initial_mem_debug_disabled)
2498 		return;
2499 
2500 	i = qdf_net_buf_debug_hash(net_buf);
2501 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2502 
2503 	p_node = qdf_net_buf_debug_look_up(net_buf);
2504 
2505 	if (p_node) {
2506 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2507 			      QDF_MEM_FUNC_NAME_SIZE);
2508 		p_node->line_num = line_num;
2509 	}
2510 
2511 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2512 }
2513 
2514 qdf_export_symbol(qdf_net_buf_debug_update_node);
2515 
2516 /**
2517  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2518  *
2519  * Return: none
2520  */
2521 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2522 {
2523 	uint32_t i;
2524 	QDF_NBUF_TRACK *p_head;
2525 	QDF_NBUF_TRACK *p_node = NULL;
2526 	unsigned long irq_flag;
2527 	QDF_NBUF_TRACK *p_prev;
2528 
2529 	if (is_initial_mem_debug_disabled)
2530 		return;
2531 
2532 	i = qdf_net_buf_debug_hash(net_buf);
2533 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2534 
2535 	p_head = gp_qdf_net_buf_track_tbl[i];
2536 
2537 	/* Unallocated SKB */
2538 	if (!p_head)
2539 		goto done;
2540 
2541 	p_node = p_head;
2542 	/* Found at head of the table */
2543 	if (p_head->net_buf == net_buf) {
2544 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2545 		goto done;
2546 	}
2547 
2548 	/* Search in collision list */
2549 	while (p_node) {
2550 		p_prev = p_node;
2551 		p_node = p_node->p_next;
2552 		if ((p_node) && (p_node->net_buf == net_buf)) {
2553 			p_prev->p_next = p_node->p_next;
2554 			break;
2555 		}
2556 	}
2557 
2558 done:
2559 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2560 
2561 	if (p_node) {
2562 		qdf_mem_skb_dec(p_node->size);
2563 		qdf_nbuf_track_free(p_node);
2564 	} else {
2565 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2566 			  net_buf);
2567 		QDF_BUG(0);
2568 	}
2569 }
2570 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2571 
2572 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2573 				   const char *func_name, uint32_t line_num)
2574 {
2575 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2576 
2577 	if (is_initial_mem_debug_disabled)
2578 		return;
2579 
2580 	while (ext_list) {
2581 		/*
2582 		 * Take care to add if it is Jumbo packet connected using
2583 		 * frag_list
2584 		 */
2585 		qdf_nbuf_t next;
2586 
2587 		next = qdf_nbuf_queue_next(ext_list);
2588 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2589 		ext_list = next;
2590 	}
2591 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2592 }
2593 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2594 
2595 /**
2596  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2597  * @net_buf: Network buf holding head segment (single)
2598  *
2599  * WLAN driver module whose allocated SKB is freed by network stack are
2600  * suppose to call this API before returning SKB to network stack such
2601  * that the SKB is not reported as memory leak.
2602  *
2603  * Return: none
2604  */
2605 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2606 {
2607 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2608 
2609 	if (is_initial_mem_debug_disabled)
2610 		return;
2611 
2612 	while (ext_list) {
2613 		/*
2614 		 * Take care to free if it is Jumbo packet connected using
2615 		 * frag_list
2616 		 */
2617 		qdf_nbuf_t next;
2618 
2619 		next = qdf_nbuf_queue_next(ext_list);
2620 
2621 		if (qdf_nbuf_get_users(ext_list) > 1) {
2622 			ext_list = next;
2623 			continue;
2624 		}
2625 
2626 		qdf_net_buf_debug_delete_node(ext_list);
2627 		ext_list = next;
2628 	}
2629 
2630 	if (qdf_nbuf_get_users(net_buf) > 1)
2631 		return;
2632 
2633 	qdf_net_buf_debug_delete_node(net_buf);
2634 }
2635 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2636 
2637 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2638 				int reserve, int align, int prio,
2639 				const char *func, uint32_t line)
2640 {
2641 	qdf_nbuf_t nbuf;
2642 
2643 	if (is_initial_mem_debug_disabled)
2644 		return __qdf_nbuf_alloc(osdev, size,
2645 					reserve, align,
2646 					prio, func, line);
2647 
2648 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2649 
2650 	/* Store SKB in internal QDF tracking table */
2651 	if (qdf_likely(nbuf)) {
2652 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2653 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2654 	} else {
2655 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2656 	}
2657 
2658 	return nbuf;
2659 }
2660 qdf_export_symbol(qdf_nbuf_alloc_debug);
2661 
2662 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2663 {
2664 	qdf_nbuf_t ext_list;
2665 
2666 	if (qdf_unlikely(!nbuf))
2667 		return;
2668 
2669 	if (is_initial_mem_debug_disabled)
2670 		goto free_buf;
2671 
2672 	if (qdf_nbuf_get_users(nbuf) > 1)
2673 		goto free_buf;
2674 
2675 	/* Remove SKB from internal QDF tracking table */
2676 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2677 	qdf_net_buf_debug_delete_node(nbuf);
2678 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2679 
2680 	/* Take care to delete the debug entries for frag_list */
2681 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2682 	while (ext_list) {
2683 		if (qdf_nbuf_get_users(ext_list) == 1) {
2684 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2685 			qdf_net_buf_debug_delete_node(ext_list);
2686 		}
2687 
2688 		ext_list = qdf_nbuf_queue_next(ext_list);
2689 	}
2690 
2691 free_buf:
2692 	__qdf_nbuf_free(nbuf);
2693 }
2694 qdf_export_symbol(qdf_nbuf_free_debug);
2695 
2696 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2697 {
2698 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2699 
2700 	if (is_initial_mem_debug_disabled)
2701 		return cloned_buf;
2702 
2703 	if (qdf_unlikely(!cloned_buf))
2704 		return NULL;
2705 
2706 	/* Store SKB in internal QDF tracking table */
2707 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2708 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2709 
2710 	return cloned_buf;
2711 }
2712 qdf_export_symbol(qdf_nbuf_clone_debug);
2713 
2714 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2715 {
2716 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2717 
2718 	if (is_initial_mem_debug_disabled)
2719 		return copied_buf;
2720 
2721 	if (qdf_unlikely(!copied_buf))
2722 		return NULL;
2723 
2724 	/* Store SKB in internal QDF tracking table */
2725 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2726 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2727 
2728 	return copied_buf;
2729 }
2730 qdf_export_symbol(qdf_nbuf_copy_debug);
2731 
2732 qdf_nbuf_t
2733 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
2734 			   const char *func, uint32_t line)
2735 {
2736 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
2737 
2738 	if (qdf_unlikely(!copied_buf))
2739 		return NULL;
2740 
2741 	if (is_initial_mem_debug_disabled)
2742 		return copied_buf;
2743 
2744 	/* Store SKB in internal QDF tracking table */
2745 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2746 	qdf_nbuf_history_add(copied_buf, func, line,
2747 			     QDF_NBUF_ALLOC_COPY_EXPAND);
2748 
2749 	return copied_buf;
2750 }
2751 
2752 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
2753 
2754 #endif /* NBUF_MEMORY_DEBUG */
2755 
2756 #if defined(FEATURE_TSO)
2757 
2758 /**
2759  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2760  *
2761  * @ethproto: ethernet type of the msdu
2762  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2763  * @l2_len: L2 length for the msdu
2764  * @eit_hdr: pointer to EIT header
2765  * @eit_hdr_len: EIT header length for the msdu
2766  * @eit_hdr_dma_map_addr: dma addr for EIT header
2767  * @tcphdr: pointer to tcp header
2768  * @ipv4_csum_en: ipv4 checksum enable
2769  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2770  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2771  * @ip_id: IP id
2772  * @tcp_seq_num: TCP sequence number
2773  *
2774  * This structure holds the TSO common info that is common
2775  * across all the TCP segments of the jumbo packet.
2776  */
2777 struct qdf_tso_cmn_seg_info_t {
2778 	uint16_t ethproto;
2779 	uint16_t ip_tcp_hdr_len;
2780 	uint16_t l2_len;
2781 	uint8_t *eit_hdr;
2782 	uint32_t eit_hdr_len;
2783 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2784 	struct tcphdr *tcphdr;
2785 	uint16_t ipv4_csum_en;
2786 	uint16_t tcp_ipv4_csum_en;
2787 	uint16_t tcp_ipv6_csum_en;
2788 	uint16_t ip_id;
2789 	uint32_t tcp_seq_num;
2790 };
2791 
2792 /**
2793  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2794  * information
2795  * @osdev: qdf device handle
2796  * @skb: skb buffer
2797  * @tso_info: Parameters common to all segements
2798  *
2799  * Get the TSO information that is common across all the TCP
2800  * segments of the jumbo packet
2801  *
2802  * Return: 0 - success 1 - failure
2803  */
2804 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2805 			struct sk_buff *skb,
2806 			struct qdf_tso_cmn_seg_info_t *tso_info)
2807 {
2808 	/* Get ethernet type and ethernet header length */
2809 	tso_info->ethproto = vlan_get_protocol(skb);
2810 
2811 	/* Determine whether this is an IPv4 or IPv6 packet */
2812 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2813 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2814 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2815 
2816 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2817 		tso_info->ipv4_csum_en = 1;
2818 		tso_info->tcp_ipv4_csum_en = 1;
2819 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2820 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2821 				ipv4_hdr->protocol);
2822 			return 1;
2823 		}
2824 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2825 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2826 		tso_info->tcp_ipv6_csum_en = 1;
2827 	} else {
2828 		qdf_err("TSO: ethertype 0x%x is not supported!",
2829 			tso_info->ethproto);
2830 		return 1;
2831 	}
2832 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2833 	tso_info->tcphdr = tcp_hdr(skb);
2834 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2835 	/* get pointer to the ethernet + IP + TCP header and their length */
2836 	tso_info->eit_hdr = skb->data;
2837 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2838 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2839 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2840 							tso_info->eit_hdr,
2841 							tso_info->eit_hdr_len,
2842 							DMA_TO_DEVICE);
2843 	if (unlikely(dma_mapping_error(osdev->dev,
2844 				       tso_info->eit_hdr_dma_map_addr))) {
2845 		qdf_err("DMA mapping error!");
2846 		qdf_assert(0);
2847 		return 1;
2848 	}
2849 
2850 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2851 		/* inlcude IPv4 header length for IPV4 (total length) */
2852 		tso_info->ip_tcp_hdr_len =
2853 			tso_info->eit_hdr_len - tso_info->l2_len;
2854 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2855 		/* exclude IPv6 header length for IPv6 (payload length) */
2856 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2857 	}
2858 	/*
2859 	 * The length of the payload (application layer data) is added to
2860 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2861 	 * descriptor.
2862 	 */
2863 
2864 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2865 		tso_info->tcp_seq_num,
2866 		tso_info->eit_hdr_len,
2867 		tso_info->l2_len,
2868 		skb->len);
2869 	return 0;
2870 }
2871 
2872 
2873 /**
2874  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2875  *
2876  * @curr_seg: Segment whose contents are initialized
2877  * @tso_cmn_info: Parameters common to all segements
2878  *
2879  * Return: None
2880  */
2881 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2882 				struct qdf_tso_seg_elem_t *curr_seg,
2883 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2884 {
2885 	/* Initialize the flags to 0 */
2886 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2887 
2888 	/*
2889 	 * The following fields remain the same across all segments of
2890 	 * a jumbo packet
2891 	 */
2892 	curr_seg->seg.tso_flags.tso_enable = 1;
2893 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2894 		tso_cmn_info->ipv4_csum_en;
2895 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2896 		tso_cmn_info->tcp_ipv6_csum_en;
2897 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2898 		tso_cmn_info->tcp_ipv4_csum_en;
2899 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2900 
2901 	/* The following fields change for the segments */
2902 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2903 	tso_cmn_info->ip_id++;
2904 
2905 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2906 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2907 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2908 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2909 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2910 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2911 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2912 
2913 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2914 
2915 	/*
2916 	 * First fragment for each segment always contains the ethernet,
2917 	 * IP and TCP header
2918 	 */
2919 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2920 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2921 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2922 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2923 
2924 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2925 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2926 		   tso_cmn_info->eit_hdr_len,
2927 		   curr_seg->seg.tso_flags.tcp_seq_num,
2928 		   curr_seg->seg.total_len);
2929 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2930 }
2931 
2932 /**
2933  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2934  * into segments
2935  * @nbuf: network buffer to be segmented
2936  * @tso_info: This is the output. The information about the
2937  *           TSO segments will be populated within this.
2938  *
2939  * This function fragments a TCP jumbo packet into smaller
2940  * segments to be transmitted by the driver. It chains the TSO
2941  * segments created into a list.
2942  *
2943  * Return: number of TSO segments
2944  */
2945 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2946 		struct qdf_tso_info_t *tso_info)
2947 {
2948 	/* common across all segments */
2949 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2950 	/* segment specific */
2951 	void *tso_frag_vaddr;
2952 	qdf_dma_addr_t tso_frag_paddr = 0;
2953 	uint32_t num_seg = 0;
2954 	struct qdf_tso_seg_elem_t *curr_seg;
2955 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2956 	skb_frag_t *frag = NULL;
2957 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2958 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2959 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2960 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2961 	int j = 0; /* skb fragment index */
2962 
2963 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2964 	total_num_seg = tso_info->tso_num_seg_list;
2965 	curr_seg = tso_info->tso_seg_list;
2966 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2967 
2968 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2969 						skb, &tso_cmn_info))) {
2970 		qdf_warn("TSO: error getting common segment info");
2971 		return 0;
2972 	}
2973 
2974 	/* length of the first chunk of data in the skb */
2975 	skb_frag_len = skb_headlen(skb);
2976 
2977 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2978 	/* update the remaining skb fragment length and TSO segment length */
2979 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2980 	skb_proc -= tso_cmn_info.eit_hdr_len;
2981 
2982 	/* get the address to the next tso fragment */
2983 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2984 	/* get the length of the next tso fragment */
2985 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2986 
2987 	if (tso_frag_len != 0) {
2988 		tso_frag_paddr = dma_map_single(osdev->dev,
2989 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
2990 	}
2991 
2992 	if (unlikely(dma_mapping_error(osdev->dev,
2993 					tso_frag_paddr))) {
2994 		qdf_err("DMA mapping error!");
2995 		qdf_assert(0);
2996 		return 0;
2997 	}
2998 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
2999 		__LINE__, skb_frag_len, tso_frag_len);
3000 	num_seg = tso_info->num_segs;
3001 	tso_info->num_segs = 0;
3002 	tso_info->is_tso = 1;
3003 
3004 	while (num_seg && curr_seg) {
3005 		int i = 1; /* tso fragment index */
3006 		uint8_t more_tso_frags = 1;
3007 
3008 		curr_seg->seg.num_frags = 0;
3009 		tso_info->num_segs++;
3010 		total_num_seg->num_seg.tso_cmn_num_seg++;
3011 
3012 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3013 						 &tso_cmn_info);
3014 
3015 		if (unlikely(skb_proc == 0))
3016 			return tso_info->num_segs;
3017 
3018 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3019 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3020 		/* frag len is added to ip_len in while loop below*/
3021 
3022 		curr_seg->seg.num_frags++;
3023 
3024 		while (more_tso_frags) {
3025 			if (tso_frag_len != 0) {
3026 				curr_seg->seg.tso_frags[i].vaddr =
3027 					tso_frag_vaddr;
3028 				curr_seg->seg.tso_frags[i].length =
3029 					tso_frag_len;
3030 				curr_seg->seg.total_len += tso_frag_len;
3031 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3032 				curr_seg->seg.num_frags++;
3033 				skb_proc = skb_proc - tso_frag_len;
3034 
3035 				/* increment the TCP sequence number */
3036 
3037 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3038 				curr_seg->seg.tso_frags[i].paddr =
3039 					tso_frag_paddr;
3040 			}
3041 
3042 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3043 					__func__, __LINE__,
3044 					i,
3045 					tso_frag_len,
3046 					curr_seg->seg.total_len,
3047 					curr_seg->seg.tso_frags[i].vaddr);
3048 
3049 			/* if there is no more data left in the skb */
3050 			if (!skb_proc)
3051 				return tso_info->num_segs;
3052 
3053 			/* get the next payload fragment information */
3054 			/* check if there are more fragments in this segment */
3055 			if (tso_frag_len < tso_seg_size) {
3056 				more_tso_frags = 1;
3057 				if (tso_frag_len != 0) {
3058 					tso_seg_size = tso_seg_size -
3059 						tso_frag_len;
3060 					i++;
3061 					if (curr_seg->seg.num_frags ==
3062 								FRAG_NUM_MAX) {
3063 						more_tso_frags = 0;
3064 						/*
3065 						 * reset i and the tso
3066 						 * payload size
3067 						 */
3068 						i = 1;
3069 						tso_seg_size =
3070 							skb_shinfo(skb)->
3071 								gso_size;
3072 					}
3073 				}
3074 			} else {
3075 				more_tso_frags = 0;
3076 				/* reset i and the tso payload size */
3077 				i = 1;
3078 				tso_seg_size = skb_shinfo(skb)->gso_size;
3079 			}
3080 
3081 			/* if the next fragment is contiguous */
3082 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3083 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3084 				skb_frag_len = skb_frag_len - tso_frag_len;
3085 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3086 
3087 			} else { /* the next fragment is not contiguous */
3088 				if (skb_shinfo(skb)->nr_frags == 0) {
3089 					qdf_info("TSO: nr_frags == 0!");
3090 					qdf_assert(0);
3091 					return 0;
3092 				}
3093 				if (j >= skb_shinfo(skb)->nr_frags) {
3094 					qdf_info("TSO: nr_frags %d j %d",
3095 						 skb_shinfo(skb)->nr_frags, j);
3096 					qdf_assert(0);
3097 					return 0;
3098 				}
3099 				frag = &skb_shinfo(skb)->frags[j];
3100 				skb_frag_len = skb_frag_size(frag);
3101 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3102 				tso_frag_vaddr = skb_frag_address_safe(frag);
3103 				j++;
3104 			}
3105 
3106 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3107 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3108 				tso_seg_size);
3109 
3110 			if (!(tso_frag_vaddr)) {
3111 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3112 						__func__);
3113 				return 0;
3114 			}
3115 
3116 			tso_frag_paddr =
3117 					 dma_map_single(osdev->dev,
3118 						 tso_frag_vaddr,
3119 						 tso_frag_len,
3120 						 DMA_TO_DEVICE);
3121 			if (unlikely(dma_mapping_error(osdev->dev,
3122 							tso_frag_paddr))) {
3123 				qdf_err("DMA mapping error!");
3124 				qdf_assert(0);
3125 				return 0;
3126 			}
3127 		}
3128 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3129 				curr_seg->seg.tso_flags.tcp_seq_num);
3130 		num_seg--;
3131 		/* if TCP FIN flag was set, set it in the last segment */
3132 		if (!num_seg)
3133 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3134 
3135 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3136 		curr_seg = curr_seg->next;
3137 	}
3138 	return tso_info->num_segs;
3139 }
3140 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3141 
3142 /**
3143  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3144  *
3145  * @osdev: qdf device handle
3146  * @tso_seg: TSO segment element to be unmapped
3147  * @is_last_seg: whether this is last tso seg or not
3148  *
3149  * Return: none
3150  */
3151 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3152 			  struct qdf_tso_seg_elem_t *tso_seg,
3153 			  bool is_last_seg)
3154 {
3155 	uint32_t num_frags = 0;
3156 
3157 	if (tso_seg->seg.num_frags > 0)
3158 		num_frags = tso_seg->seg.num_frags - 1;
3159 
3160 	/*Num of frags in a tso seg cannot be less than 2 */
3161 	if (num_frags < 1) {
3162 		/*
3163 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3164 		 * this may happen when qdf_nbuf_get_tso_info failed,
3165 		 * do dma unmap for the 0th frag in this seg.
3166 		 */
3167 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3168 			goto last_seg_free_first_frag;
3169 
3170 		qdf_assert(0);
3171 		qdf_err("ERROR: num of frags in a tso segment is %d",
3172 			(num_frags + 1));
3173 		return;
3174 	}
3175 
3176 	while (num_frags) {
3177 		/*Do dma unmap the tso seg except the 0th frag */
3178 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3179 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3180 				num_frags);
3181 			qdf_assert(0);
3182 			return;
3183 		}
3184 		dma_unmap_single(osdev->dev,
3185 				 tso_seg->seg.tso_frags[num_frags].paddr,
3186 				 tso_seg->seg.tso_frags[num_frags].length,
3187 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3188 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3189 		num_frags--;
3190 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3191 	}
3192 
3193 last_seg_free_first_frag:
3194 	if (is_last_seg) {
3195 		/*Do dma unmap for the tso seg 0th frag */
3196 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3197 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3198 			qdf_assert(0);
3199 			return;
3200 		}
3201 		dma_unmap_single(osdev->dev,
3202 				 tso_seg->seg.tso_frags[0].paddr,
3203 				 tso_seg->seg.tso_frags[0].length,
3204 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3205 		tso_seg->seg.tso_frags[0].paddr = 0;
3206 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3207 	}
3208 }
3209 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3210 
3211 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3212 {
3213 	size_t packet_len;
3214 
3215 	packet_len = skb->len -
3216 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3217 		 tcp_hdrlen(skb));
3218 
3219 	return packet_len;
3220 }
3221 
3222 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3223 
3224 /**
3225  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3226  * into segments
3227  * @nbuf:   network buffer to be segmented
3228  * @tso_info:  This is the output. The information about the
3229  *      TSO segments will be populated within this.
3230  *
3231  * This function fragments a TCP jumbo packet into smaller
3232  * segments to be transmitted by the driver. It chains the TSO
3233  * segments created into a list.
3234  *
3235  * Return: 0 - success, 1 - failure
3236  */
3237 #ifndef BUILD_X86
3238 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3239 {
3240 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3241 	uint32_t remainder, num_segs = 0;
3242 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3243 	uint8_t frags_per_tso = 0;
3244 	uint32_t skb_frag_len = 0;
3245 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3246 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3247 	skb_frag_t *frag = NULL;
3248 	int j = 0;
3249 	uint32_t temp_num_seg = 0;
3250 
3251 	/* length of the first chunk of data in the skb minus eit header*/
3252 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3253 
3254 	/* Calculate num of segs for skb's first chunk of data*/
3255 	remainder = skb_frag_len % tso_seg_size;
3256 	num_segs = skb_frag_len / tso_seg_size;
3257 	/**
3258 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3259 	 * In that case, one more tso seg is required to accommodate
3260 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3261 	 * then remaining data will be accomodated while doing the calculation
3262 	 * for nr_frags data. Hence, frags_per_tso++.
3263 	 */
3264 	if (remainder) {
3265 		if (!skb_nr_frags)
3266 			num_segs++;
3267 		else
3268 			frags_per_tso++;
3269 	}
3270 
3271 	while (skb_nr_frags) {
3272 		if (j >= skb_shinfo(skb)->nr_frags) {
3273 			qdf_info("TSO: nr_frags %d j %d",
3274 				 skb_shinfo(skb)->nr_frags, j);
3275 			qdf_assert(0);
3276 			return 0;
3277 		}
3278 		/**
3279 		 * Calculate the number of tso seg for nr_frags data:
3280 		 * Get the length of each frag in skb_frag_len, add to
3281 		 * remainder.Get the number of segments by dividing it to
3282 		 * tso_seg_size and calculate the new remainder.
3283 		 * Decrement the nr_frags value and keep
3284 		 * looping all the skb_fragments.
3285 		 */
3286 		frag = &skb_shinfo(skb)->frags[j];
3287 		skb_frag_len = skb_frag_size(frag);
3288 		temp_num_seg = num_segs;
3289 		remainder += skb_frag_len;
3290 		num_segs += remainder / tso_seg_size;
3291 		remainder = remainder % tso_seg_size;
3292 		skb_nr_frags--;
3293 		if (remainder) {
3294 			if (num_segs > temp_num_seg)
3295 				frags_per_tso = 0;
3296 			/**
3297 			 * increment the tso per frags whenever remainder is
3298 			 * positive. If frags_per_tso reaches the (max-1),
3299 			 * [First frags always have EIT header, therefore max-1]
3300 			 * increment the num_segs as no more data can be
3301 			 * accomodated in the curr tso seg. Reset the remainder
3302 			 * and frags per tso and keep looping.
3303 			 */
3304 			frags_per_tso++;
3305 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3306 				num_segs++;
3307 				frags_per_tso = 0;
3308 				remainder = 0;
3309 			}
3310 			/**
3311 			 * If this is the last skb frag and still remainder is
3312 			 * non-zero(frags_per_tso is not reached to the max-1)
3313 			 * then increment the num_segs to take care of the
3314 			 * remaining length.
3315 			 */
3316 			if (!skb_nr_frags && remainder) {
3317 				num_segs++;
3318 				frags_per_tso = 0;
3319 			}
3320 		} else {
3321 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3322 			frags_per_tso = 0;
3323 		}
3324 		j++;
3325 	}
3326 
3327 	return num_segs;
3328 }
3329 #elif !defined(QCA_WIFI_QCN9000)
3330 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3331 {
3332 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3333 	skb_frag_t *frag = NULL;
3334 
3335 	/*
3336 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3337 	 * region which cannot be accessed by Target
3338 	 */
3339 	if (virt_to_phys(skb->data) < 0x50000040) {
3340 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3341 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3342 				virt_to_phys(skb->data));
3343 		goto fail;
3344 
3345 	}
3346 
3347 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3348 		frag = &skb_shinfo(skb)->frags[i];
3349 
3350 		if (!frag)
3351 			goto fail;
3352 
3353 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3354 			goto fail;
3355 	}
3356 
3357 
3358 	gso_size = skb_shinfo(skb)->gso_size;
3359 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3360 			+ tcp_hdrlen(skb));
3361 	while (tmp_len) {
3362 		num_segs++;
3363 		if (tmp_len > gso_size)
3364 			tmp_len -= gso_size;
3365 		else
3366 			break;
3367 	}
3368 
3369 	return num_segs;
3370 
3371 	/*
3372 	 * Do not free this frame, just do socket level accounting
3373 	 * so that this is not reused.
3374 	 */
3375 fail:
3376 	if (skb->sk)
3377 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3378 
3379 	return 0;
3380 }
3381 #else
3382 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3383 {
3384 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3385 	skb_frag_t *frag = NULL;
3386 
3387 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3388 		frag = &skb_shinfo(skb)->frags[i];
3389 
3390 		if (!frag)
3391 			goto fail;
3392 	}
3393 
3394 	gso_size = skb_shinfo(skb)->gso_size;
3395 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3396 			+ tcp_hdrlen(skb));
3397 	while (tmp_len) {
3398 		num_segs++;
3399 		if (tmp_len > gso_size)
3400 			tmp_len -= gso_size;
3401 		else
3402 			break;
3403 	}
3404 
3405 	return num_segs;
3406 
3407 	/*
3408 	 * Do not free this frame, just do socket level accounting
3409 	 * so that this is not reused.
3410 	 */
3411 fail:
3412 	if (skb->sk)
3413 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3414 
3415 	return 0;
3416 }
3417 #endif
3418 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3419 
3420 #endif /* FEATURE_TSO */
3421 
3422 /**
3423  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3424  *
3425  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3426  *
3427  * Return: N/A
3428  */
3429 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3430 			  uint32_t *lo, uint32_t *hi)
3431 {
3432 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3433 		*lo = lower_32_bits(dmaaddr);
3434 		*hi = upper_32_bits(dmaaddr);
3435 	} else {
3436 		*lo = dmaaddr;
3437 		*hi = 0;
3438 	}
3439 }
3440 
3441 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3442 
3443 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3444 {
3445 	qdf_nbuf_users_inc(&skb->users);
3446 	return skb;
3447 }
3448 qdf_export_symbol(__qdf_nbuf_inc_users);
3449 
3450 int __qdf_nbuf_get_users(struct sk_buff *skb)
3451 {
3452 	return qdf_nbuf_users_read(&skb->users);
3453 }
3454 qdf_export_symbol(__qdf_nbuf_get_users);
3455 
3456 /**
3457  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3458  * @skb: sk_buff handle
3459  *
3460  * Return: none
3461  */
3462 
3463 void __qdf_nbuf_ref(struct sk_buff *skb)
3464 {
3465 	skb_get(skb);
3466 }
3467 qdf_export_symbol(__qdf_nbuf_ref);
3468 
3469 /**
3470  * __qdf_nbuf_shared() - Check whether the buffer is shared
3471  *  @skb: sk_buff buffer
3472  *
3473  *  Return: true if more than one person has a reference to this buffer.
3474  */
3475 int __qdf_nbuf_shared(struct sk_buff *skb)
3476 {
3477 	return skb_shared(skb);
3478 }
3479 qdf_export_symbol(__qdf_nbuf_shared);
3480 
3481 /**
3482  * __qdf_nbuf_dmamap_create() - create a DMA map.
3483  * @osdev: qdf device handle
3484  * @dmap: dma map handle
3485  *
3486  * This can later be used to map networking buffers. They :
3487  * - need space in adf_drv's software descriptor
3488  * - are typically created during adf_drv_create
3489  * - need to be created before any API(qdf_nbuf_map) that uses them
3490  *
3491  * Return: QDF STATUS
3492  */
3493 QDF_STATUS
3494 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3495 {
3496 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3497 	/*
3498 	 * driver can tell its SG capablity, it must be handled.
3499 	 * Bounce buffers if they are there
3500 	 */
3501 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3502 	if (!(*dmap))
3503 		error = QDF_STATUS_E_NOMEM;
3504 
3505 	return error;
3506 }
3507 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3508 /**
3509  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3510  * @osdev: qdf device handle
3511  * @dmap: dma map handle
3512  *
3513  * Return: none
3514  */
3515 void
3516 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3517 {
3518 	kfree(dmap);
3519 }
3520 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3521 
3522 /**
3523  * __qdf_nbuf_map_nbytes_single() - map nbytes
3524  * @osdev: os device
3525  * @buf: buffer
3526  * @dir: direction
3527  * @nbytes: number of bytes
3528  *
3529  * Return: QDF_STATUS
3530  */
3531 #ifdef A_SIMOS_DEVHOST
3532 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3533 		qdf_device_t osdev, struct sk_buff *buf,
3534 		 qdf_dma_dir_t dir, int nbytes)
3535 {
3536 	qdf_dma_addr_t paddr;
3537 
3538 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3539 	return QDF_STATUS_SUCCESS;
3540 }
3541 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3542 #else
3543 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3544 		qdf_device_t osdev, struct sk_buff *buf,
3545 		 qdf_dma_dir_t dir, int nbytes)
3546 {
3547 	qdf_dma_addr_t paddr;
3548 
3549 	/* assume that the OS only provides a single fragment */
3550 	QDF_NBUF_CB_PADDR(buf) = paddr =
3551 		dma_map_single(osdev->dev, buf->data,
3552 			nbytes, __qdf_dma_dir_to_os(dir));
3553 	return dma_mapping_error(osdev->dev, paddr) ?
3554 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3555 }
3556 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3557 #endif
3558 /**
3559  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3560  * @osdev: os device
3561  * @buf: buffer
3562  * @dir: direction
3563  * @nbytes: number of bytes
3564  *
3565  * Return: none
3566  */
3567 #if defined(A_SIMOS_DEVHOST)
3568 void
3569 __qdf_nbuf_unmap_nbytes_single(
3570 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3571 {
3572 }
3573 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3574 
3575 #else
3576 void
3577 __qdf_nbuf_unmap_nbytes_single(
3578 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3579 {
3580 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3581 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3582 		return;
3583 	}
3584 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3585 			nbytes, __qdf_dma_dir_to_os(dir));
3586 }
3587 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3588 #endif
3589 /**
3590  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3591  * @osdev: os device
3592  * @skb: skb handle
3593  * @dir: dma direction
3594  * @nbytes: number of bytes to be mapped
3595  *
3596  * Return: QDF_STATUS
3597  */
3598 #ifdef QDF_OS_DEBUG
3599 QDF_STATUS
3600 __qdf_nbuf_map_nbytes(
3601 	qdf_device_t osdev,
3602 	struct sk_buff *skb,
3603 	qdf_dma_dir_t dir,
3604 	int nbytes)
3605 {
3606 	struct skb_shared_info  *sh = skb_shinfo(skb);
3607 
3608 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3609 
3610 	/*
3611 	 * Assume there's only a single fragment.
3612 	 * To support multiple fragments, it would be necessary to change
3613 	 * adf_nbuf_t to be a separate object that stores meta-info
3614 	 * (including the bus address for each fragment) and a pointer
3615 	 * to the underlying sk_buff.
3616 	 */
3617 	qdf_assert(sh->nr_frags == 0);
3618 
3619 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3620 }
3621 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3622 #else
3623 QDF_STATUS
3624 __qdf_nbuf_map_nbytes(
3625 	qdf_device_t osdev,
3626 	struct sk_buff *skb,
3627 	qdf_dma_dir_t dir,
3628 	int nbytes)
3629 {
3630 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3631 }
3632 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3633 #endif
3634 /**
3635  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3636  * @osdev: OS device
3637  * @skb: skb handle
3638  * @dir: direction
3639  * @nbytes: number of bytes
3640  *
3641  * Return: none
3642  */
3643 void
3644 __qdf_nbuf_unmap_nbytes(
3645 	qdf_device_t osdev,
3646 	struct sk_buff *skb,
3647 	qdf_dma_dir_t dir,
3648 	int nbytes)
3649 {
3650 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3651 
3652 	/*
3653 	 * Assume there's a single fragment.
3654 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3655 	 */
3656 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3657 }
3658 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3659 
3660 /**
3661  * __qdf_nbuf_dma_map_info() - return the dma map info
3662  * @bmap: dma map
3663  * @sg: dma map info
3664  *
3665  * Return: none
3666  */
3667 void
3668 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3669 {
3670 	qdf_assert(bmap->mapped);
3671 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3672 
3673 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3674 			sizeof(struct __qdf_segment));
3675 	sg->nsegs = bmap->nsegs;
3676 }
3677 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3678 /**
3679  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3680  *			specified by the index
3681  * @skb: sk buff
3682  * @sg: scatter/gather list of all the frags
3683  *
3684  * Return: none
3685  */
3686 #if defined(__QDF_SUPPORT_FRAG_MEM)
3687 void
3688 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3689 {
3690 	qdf_assert(skb);
3691 	sg->sg_segs[0].vaddr = skb->data;
3692 	sg->sg_segs[0].len   = skb->len;
3693 	sg->nsegs            = 1;
3694 
3695 	for (int i = 1; i <= sh->nr_frags; i++) {
3696 		skb_frag_t    *f        = &sh->frags[i - 1];
3697 
3698 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3699 			f->page_offset);
3700 		sg->sg_segs[i].len      = f->size;
3701 
3702 		qdf_assert(i < QDF_MAX_SGLIST);
3703 	}
3704 	sg->nsegs += i;
3705 
3706 }
3707 qdf_export_symbol(__qdf_nbuf_frag_info);
3708 #else
3709 #ifdef QDF_OS_DEBUG
3710 void
3711 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3712 {
3713 
3714 	struct skb_shared_info  *sh = skb_shinfo(skb);
3715 
3716 	qdf_assert(skb);
3717 	sg->sg_segs[0].vaddr = skb->data;
3718 	sg->sg_segs[0].len   = skb->len;
3719 	sg->nsegs            = 1;
3720 
3721 	qdf_assert(sh->nr_frags == 0);
3722 }
3723 qdf_export_symbol(__qdf_nbuf_frag_info);
3724 #else
3725 void
3726 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3727 {
3728 	sg->sg_segs[0].vaddr = skb->data;
3729 	sg->sg_segs[0].len   = skb->len;
3730 	sg->nsegs            = 1;
3731 }
3732 qdf_export_symbol(__qdf_nbuf_frag_info);
3733 #endif
3734 #endif
3735 /**
3736  * __qdf_nbuf_get_frag_size() - get frag size
3737  * @nbuf: sk buffer
3738  * @cur_frag: current frag
3739  *
3740  * Return: frag size
3741  */
3742 uint32_t
3743 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3744 {
3745 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3746 	const skb_frag_t *frag = sh->frags + cur_frag;
3747 
3748 	return skb_frag_size(frag);
3749 }
3750 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3751 
3752 /**
3753  * __qdf_nbuf_frag_map() - dma map frag
3754  * @osdev: os device
3755  * @nbuf: sk buff
3756  * @offset: offset
3757  * @dir: direction
3758  * @cur_frag: current fragment
3759  *
3760  * Return: QDF status
3761  */
3762 #ifdef A_SIMOS_DEVHOST
3763 QDF_STATUS __qdf_nbuf_frag_map(
3764 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3765 	int offset, qdf_dma_dir_t dir, int cur_frag)
3766 {
3767 	int32_t paddr, frag_len;
3768 
3769 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3770 	return QDF_STATUS_SUCCESS;
3771 }
3772 qdf_export_symbol(__qdf_nbuf_frag_map);
3773 #else
3774 QDF_STATUS __qdf_nbuf_frag_map(
3775 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3776 	int offset, qdf_dma_dir_t dir, int cur_frag)
3777 {
3778 	dma_addr_t paddr, frag_len;
3779 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3780 	const skb_frag_t *frag = sh->frags + cur_frag;
3781 
3782 	frag_len = skb_frag_size(frag);
3783 
3784 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3785 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3786 					__qdf_dma_dir_to_os(dir));
3787 	return dma_mapping_error(osdev->dev, paddr) ?
3788 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3789 }
3790 qdf_export_symbol(__qdf_nbuf_frag_map);
3791 #endif
3792 /**
3793  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3794  * @dmap: dma map
3795  * @cb: callback
3796  * @arg: argument
3797  *
3798  * Return: none
3799  */
3800 void
3801 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3802 {
3803 	return;
3804 }
3805 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3806 
3807 
3808 /**
3809  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3810  * @osdev: os device
3811  * @buf: sk buff
3812  * @dir: direction
3813  *
3814  * Return: none
3815  */
3816 #if defined(A_SIMOS_DEVHOST)
3817 static void __qdf_nbuf_sync_single_for_cpu(
3818 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3819 {
3820 	return;
3821 }
3822 #else
3823 static void __qdf_nbuf_sync_single_for_cpu(
3824 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3825 {
3826 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3827 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3828 		return;
3829 	}
3830 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3831 		skb_end_offset(buf) - skb_headroom(buf),
3832 		__qdf_dma_dir_to_os(dir));
3833 }
3834 #endif
3835 /**
3836  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3837  * @osdev: os device
3838  * @skb: sk buff
3839  * @dir: direction
3840  *
3841  * Return: none
3842  */
3843 void
3844 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3845 	struct sk_buff *skb, qdf_dma_dir_t dir)
3846 {
3847 	qdf_assert(
3848 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3849 
3850 	/*
3851 	 * Assume there's a single fragment.
3852 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3853 	 */
3854 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3855 }
3856 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3857 
3858 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3859 /**
3860  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3861  * @rx_status: Pointer to rx_status.
3862  * @rtap_buf: Buf to which VHT info has to be updated.
3863  * @rtap_len: Current length of radiotap buffer
3864  *
3865  * Return: Length of radiotap after VHT flags updated.
3866  */
3867 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3868 					struct mon_rx_status *rx_status,
3869 					int8_t *rtap_buf,
3870 					uint32_t rtap_len)
3871 {
3872 	uint16_t vht_flags = 0;
3873 
3874 	rtap_len = qdf_align(rtap_len, 2);
3875 
3876 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3877 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3878 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3879 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3880 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3881 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3882 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3883 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3884 	rtap_len += 2;
3885 
3886 	rtap_buf[rtap_len] |=
3887 		(rx_status->is_stbc ?
3888 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3889 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3890 		(rx_status->ldpc ?
3891 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3892 		(rx_status->beamformed ?
3893 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3894 	rtap_len += 1;
3895 	switch (rx_status->vht_flag_values2) {
3896 	case IEEE80211_RADIOTAP_VHT_BW_20:
3897 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3898 		break;
3899 	case IEEE80211_RADIOTAP_VHT_BW_40:
3900 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3901 		break;
3902 	case IEEE80211_RADIOTAP_VHT_BW_80:
3903 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3904 		break;
3905 	case IEEE80211_RADIOTAP_VHT_BW_160:
3906 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3907 		break;
3908 	}
3909 	rtap_len += 1;
3910 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3911 	rtap_len += 1;
3912 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3913 	rtap_len += 1;
3914 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3915 	rtap_len += 1;
3916 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3917 	rtap_len += 1;
3918 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3919 	rtap_len += 1;
3920 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3921 	rtap_len += 1;
3922 	put_unaligned_le16(rx_status->vht_flag_values6,
3923 			   &rtap_buf[rtap_len]);
3924 	rtap_len += 2;
3925 
3926 	return rtap_len;
3927 }
3928 
3929 /**
3930  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3931  * @rx_status: Pointer to rx_status.
3932  * @rtap_buf: buffer to which radiotap has to be updated
3933  * @rtap_len: radiotap length
3934  *
3935  * API update high-efficiency (11ax) fields in the radiotap header
3936  *
3937  * Return: length of rtap_len updated.
3938  */
3939 static unsigned int
3940 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3941 				     int8_t *rtap_buf, uint32_t rtap_len)
3942 {
3943 	/*
3944 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3945 	 * Enable all "known" HE radiotap flags for now
3946 	 */
3947 	rtap_len = qdf_align(rtap_len, 2);
3948 
3949 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3950 	rtap_len += 2;
3951 
3952 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3953 	rtap_len += 2;
3954 
3955 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3956 	rtap_len += 2;
3957 
3958 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3959 	rtap_len += 2;
3960 
3961 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3962 	rtap_len += 2;
3963 
3964 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3965 	rtap_len += 2;
3966 	qdf_debug("he data %x %x %x %x %x %x",
3967 		  rx_status->he_data1,
3968 		  rx_status->he_data2, rx_status->he_data3,
3969 		  rx_status->he_data4, rx_status->he_data5,
3970 		  rx_status->he_data6);
3971 	return rtap_len;
3972 }
3973 
3974 
3975 /**
3976  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3977  * @rx_status: Pointer to rx_status.
3978  * @rtap_buf: buffer to which radiotap has to be updated
3979  * @rtap_len: radiotap length
3980  *
3981  * API update HE-MU fields in the radiotap header
3982  *
3983  * Return: length of rtap_len updated.
3984  */
3985 static unsigned int
3986 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3987 				     int8_t *rtap_buf, uint32_t rtap_len)
3988 {
3989 	rtap_len = qdf_align(rtap_len, 2);
3990 
3991 	/*
3992 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
3993 	 * Enable all "known" he-mu radiotap flags for now
3994 	 */
3995 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
3996 	rtap_len += 2;
3997 
3998 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
3999 	rtap_len += 2;
4000 
4001 	rtap_buf[rtap_len] = rx_status->he_RU[0];
4002 	rtap_len += 1;
4003 
4004 	rtap_buf[rtap_len] = rx_status->he_RU[1];
4005 	rtap_len += 1;
4006 
4007 	rtap_buf[rtap_len] = rx_status->he_RU[2];
4008 	rtap_len += 1;
4009 
4010 	rtap_buf[rtap_len] = rx_status->he_RU[3];
4011 	rtap_len += 1;
4012 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4013 		  rx_status->he_flags1,
4014 		  rx_status->he_flags2, rx_status->he_RU[0],
4015 		  rx_status->he_RU[1], rx_status->he_RU[2],
4016 		  rx_status->he_RU[3]);
4017 
4018 	return rtap_len;
4019 }
4020 
4021 /**
4022  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4023  * @rx_status: Pointer to rx_status.
4024  * @rtap_buf: buffer to which radiotap has to be updated
4025  * @rtap_len: radiotap length
4026  *
4027  * API update he-mu-other fields in the radiotap header
4028  *
4029  * Return: length of rtap_len updated.
4030  */
4031 static unsigned int
4032 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4033 				     int8_t *rtap_buf, uint32_t rtap_len)
4034 {
4035 	rtap_len = qdf_align(rtap_len, 2);
4036 
4037 	/*
4038 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4039 	 * Enable all "known" he-mu-other radiotap flags for now
4040 	 */
4041 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
4042 	rtap_len += 2;
4043 
4044 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
4045 	rtap_len += 2;
4046 
4047 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
4048 	rtap_len += 1;
4049 
4050 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4051 	rtap_len += 1;
4052 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4053 		  rx_status->he_per_user_1,
4054 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4055 		  rx_status->he_per_user_known);
4056 	return rtap_len;
4057 }
4058 
4059 
4060 /**
4061  * This is the length for radiotap, combined length
4062  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
4063  * cannot be more than available headroom_sz.
4064  * increase this when we add more radiotap elements.
4065  * Number after '+' indicates maximum possible increase due to alignment
4066  */
4067 
4068 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
4069 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4070 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4071 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4072 #define RADIOTAP_FIXED_HEADER_LEN 17
4073 #define RADIOTAP_HT_FLAGS_LEN 3
4074 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4075 #define RADIOTAP_VENDOR_NS_LEN \
4076 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4077 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4078 				RADIOTAP_FIXED_HEADER_LEN + \
4079 				RADIOTAP_HT_FLAGS_LEN + \
4080 				RADIOTAP_VHT_FLAGS_LEN + \
4081 				RADIOTAP_AMPDU_STATUS_LEN + \
4082 				RADIOTAP_HE_FLAGS_LEN + \
4083 				RADIOTAP_HE_MU_FLAGS_LEN + \
4084 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4085 				RADIOTAP_VENDOR_NS_LEN)
4086 
4087 #define IEEE80211_RADIOTAP_HE 23
4088 #define IEEE80211_RADIOTAP_HE_MU	24
4089 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4090 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4091 
4092 /**
4093  * radiotap_num_to_freq() - Get frequency from chan number
4094  * @chan_num - Input channel number
4095  *
4096  * Return - Channel frequency in Mhz
4097  */
4098 static uint16_t radiotap_num_to_freq (uint16_t chan_num)
4099 {
4100 	if (chan_num == CHANNEL_NUM_14)
4101 		return CHANNEL_FREQ_2484;
4102 	if (chan_num < CHANNEL_NUM_14)
4103 		return CHANNEL_FREQ_2407 +
4104 			(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4105 
4106 	if (chan_num < CHANNEL_NUM_27)
4107 		return CHANNEL_FREQ_2512 +
4108 			((chan_num - CHANNEL_NUM_15) *
4109 			 FREQ_MULTIPLIER_CONST_20MHZ);
4110 
4111 	if (chan_num > CHANNEL_NUM_182 &&
4112 			chan_num < CHANNEL_NUM_197)
4113 		return ((chan_num * FREQ_MULTIPLIER_CONST_5MHZ) +
4114 			CHANNEL_FREQ_4000);
4115 
4116 	return CHANNEL_FREQ_5000 +
4117 		(chan_num * FREQ_MULTIPLIER_CONST_5MHZ);
4118 }
4119 
4120 /**
4121  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4122  * @rx_status: Pointer to rx_status.
4123  * @rtap_buf: Buf to which AMPDU info has to be updated.
4124  * @rtap_len: Current length of radiotap buffer
4125  *
4126  * Return: Length of radiotap after AMPDU flags updated.
4127  */
4128 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4129 					struct mon_rx_status *rx_status,
4130 					uint8_t *rtap_buf,
4131 					uint32_t rtap_len)
4132 {
4133 	/*
4134 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4135 	 * First 32 bits of AMPDU represents the reference number
4136 	 */
4137 
4138 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4139 	uint16_t ampdu_flags = 0;
4140 	uint16_t ampdu_reserved_flags = 0;
4141 
4142 	rtap_len = qdf_align(rtap_len, 4);
4143 
4144 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4145 	rtap_len += 4;
4146 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4147 	rtap_len += 2;
4148 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4149 	rtap_len += 2;
4150 
4151 	return rtap_len;
4152 }
4153 
4154 /**
4155  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4156  * @rx_status: Pointer to rx_status.
4157  * @nbuf:      nbuf pointer to which radiotap has to be updated
4158  * @headroom_sz: Available headroom size.
4159  *
4160  * Return: length of rtap_len updated.
4161  */
4162 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4163 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4164 {
4165 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4166 	struct ieee80211_radiotap_header *rthdr =
4167 		(struct ieee80211_radiotap_header *)rtap_buf;
4168 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4169 	uint32_t rtap_len = rtap_hdr_len;
4170 	uint8_t length = rtap_len;
4171 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4172 
4173 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4174 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4175 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4176 	rtap_len += 8;
4177 
4178 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4179 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4180 
4181 	if (rx_status->rs_fcs_err)
4182 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4183 
4184 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4185 	rtap_len += 1;
4186 
4187 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4188 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4189 	    !rx_status->he_flags) {
4190 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4191 		rtap_buf[rtap_len] = rx_status->rate;
4192 	} else
4193 		rtap_buf[rtap_len] = 0;
4194 	rtap_len += 1;
4195 
4196 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4197 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4198 	rx_status->chan_freq = radiotap_num_to_freq(rx_status->chan_num);
4199 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4200 	rtap_len += 2;
4201 	/* Channel flags. */
4202 	if (rx_status->chan_num > CHANNEL_NUM_35)
4203 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4204 	else
4205 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4206 	if (rx_status->cck_flag)
4207 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4208 	if (rx_status->ofdm_flag)
4209 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4210 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4211 	rtap_len += 2;
4212 
4213 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4214 	 *					(dBm)
4215 	 */
4216 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4217 	/*
4218 	 * rssi_comb is int dB, need to convert it to dBm.
4219 	 * normalize value to noise floor of -96 dBm
4220 	 */
4221 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4222 	rtap_len += 1;
4223 
4224 	/* RX signal noise floor */
4225 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4226 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4227 	rtap_len += 1;
4228 
4229 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4230 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4231 	rtap_buf[rtap_len] = rx_status->nr_ant;
4232 	rtap_len += 1;
4233 
4234 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4235 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4236 		return 0;
4237 	}
4238 
4239 	if (rx_status->ht_flags) {
4240 		length = rtap_len;
4241 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4242 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4243 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4244 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4245 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4246 		rtap_len += 1;
4247 
4248 		if (rx_status->sgi)
4249 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4250 		if (rx_status->bw)
4251 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4252 		else
4253 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4254 		rtap_len += 1;
4255 
4256 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4257 		rtap_len += 1;
4258 
4259 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4260 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4261 			return 0;
4262 		}
4263 	}
4264 
4265 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4266 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4267 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4268 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4269 								rtap_buf,
4270 								rtap_len);
4271 	}
4272 
4273 	if (rx_status->vht_flags) {
4274 		length = rtap_len;
4275 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4276 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4277 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4278 								rtap_buf,
4279 								rtap_len);
4280 
4281 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4282 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4283 			return 0;
4284 		}
4285 	}
4286 
4287 	if (rx_status->he_flags) {
4288 		length = rtap_len;
4289 		/* IEEE80211_RADIOTAP_HE */
4290 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4291 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4292 								rtap_buf,
4293 								rtap_len);
4294 
4295 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4296 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4297 			return 0;
4298 		}
4299 	}
4300 
4301 	if (rx_status->he_mu_flags) {
4302 		length = rtap_len;
4303 		/* IEEE80211_RADIOTAP_HE-MU */
4304 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4305 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4306 								rtap_buf,
4307 								rtap_len);
4308 
4309 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4310 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4311 			return 0;
4312 		}
4313 	}
4314 
4315 	if (rx_status->he_mu_other_flags) {
4316 		length = rtap_len;
4317 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4318 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4319 		rtap_len =
4320 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4321 								rtap_buf,
4322 								rtap_len);
4323 
4324 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4325 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4326 			return 0;
4327 		}
4328 	}
4329 
4330 	rtap_len = qdf_align(rtap_len, 2);
4331 	/*
4332 	 * Radiotap Vendor Namespace
4333 	 */
4334 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4335 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4336 					(rtap_buf + rtap_len);
4337 	/*
4338 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4339 	 */
4340 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4341 	/*
4342 	 * Name space selector = 0
4343 	 * We only will have one namespace for now
4344 	 */
4345 	radiotap_vendor_ns_ath->hdr.selector = 0;
4346 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4347 					sizeof(*radiotap_vendor_ns_ath) -
4348 					sizeof(radiotap_vendor_ns_ath->hdr));
4349 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4350 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4351 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4352 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4353 				cpu_to_le32(rx_status->ppdu_timestamp);
4354 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4355 
4356 	rthdr->it_len = cpu_to_le16(rtap_len);
4357 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4358 
4359 	if (headroom_sz < rtap_len) {
4360 		qdf_err("ERROR: not enough space to update radiotap");
4361 		return 0;
4362 	}
4363 	qdf_nbuf_push_head(nbuf, rtap_len);
4364 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4365 	return rtap_len;
4366 }
4367 #else
4368 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4369 					struct mon_rx_status *rx_status,
4370 					int8_t *rtap_buf,
4371 					uint32_t rtap_len)
4372 {
4373 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4374 	return 0;
4375 }
4376 
4377 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4378 				      int8_t *rtap_buf, uint32_t rtap_len)
4379 {
4380 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4381 	return 0;
4382 }
4383 
4384 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4385 					struct mon_rx_status *rx_status,
4386 					uint8_t *rtap_buf,
4387 					uint32_t rtap_len)
4388 {
4389 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4390 	return 0;
4391 }
4392 
4393 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4394 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4395 {
4396 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4397 	return 0;
4398 }
4399 #endif
4400 qdf_export_symbol(qdf_nbuf_update_radiotap);
4401 
4402 /**
4403  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4404  * @cb_func_ptr: function pointer to the nbuf free callback
4405  *
4406  * This function registers a callback function for nbuf free.
4407  *
4408  * Return: none
4409  */
4410 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4411 {
4412 	nbuf_free_cb = cb_func_ptr;
4413 }
4414 
4415 /**
4416  * qdf_nbuf_classify_pkt() - classify packet
4417  * @skb - sk buff
4418  *
4419  * Return: none
4420  */
4421 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4422 {
4423 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4424 
4425 	/* check destination mac address is broadcast/multicast */
4426 	if (is_broadcast_ether_addr((uint8_t *)eh))
4427 		QDF_NBUF_CB_SET_BCAST(skb);
4428 	else if (is_multicast_ether_addr((uint8_t *)eh))
4429 		QDF_NBUF_CB_SET_MCAST(skb);
4430 
4431 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4432 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4433 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4434 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4435 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4436 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4437 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4438 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4439 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4440 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4441 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4442 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4443 }
4444 qdf_export_symbol(qdf_nbuf_classify_pkt);
4445 
4446 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4447 {
4448 	qdf_nbuf_users_set(&nbuf->users, 1);
4449 	nbuf->data = nbuf->head + NET_SKB_PAD;
4450 	skb_reset_tail_pointer(nbuf);
4451 }
4452 qdf_export_symbol(__qdf_nbuf_init);
4453 
4454 #ifdef WLAN_FEATURE_FASTPATH
4455 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4456 {
4457 	qdf_nbuf_users_set(&nbuf->users, 1);
4458 	nbuf->data = nbuf->head + NET_SKB_PAD;
4459 	skb_reset_tail_pointer(nbuf);
4460 }
4461 qdf_export_symbol(qdf_nbuf_init_fast);
4462 #endif /* WLAN_FEATURE_FASTPATH */
4463 
4464 
4465 #ifdef QDF_NBUF_GLOBAL_COUNT
4466 /**
4467  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4468  *
4469  * Return void
4470  */
4471 void __qdf_nbuf_mod_init(void)
4472 {
4473 	qdf_atomic_init(&nbuf_count);
4474 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4475 }
4476 
4477 /**
4478  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4479  *
4480  * Return void
4481  */
4482 void __qdf_nbuf_mod_exit(void)
4483 {
4484 }
4485 #endif
4486