xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1) !
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define CHANNEL_FREQ_5150 5150
88 #define FREQ_MULTIPLIER_CONST_5MHZ 5
89 #define FREQ_MULTIPLIER_CONST_20MHZ 20
90 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
91 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
92 #define RADIOTAP_CCK_CHANNEL 0x0020
93 #define RADIOTAP_OFDM_CHANNEL 0x0040
94 
95 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
96 #include <qdf_mc_timer.h>
97 
98 struct qdf_track_timer {
99 	qdf_mc_timer_t track_timer;
100 	qdf_atomic_t alloc_fail_cnt;
101 };
102 
103 static struct qdf_track_timer alloc_track_timer;
104 
105 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
106 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
107 #endif
108 
109 /* Packet Counter */
110 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
111 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
112 #ifdef QDF_NBUF_GLOBAL_COUNT
113 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
114 static qdf_atomic_t nbuf_count;
115 #endif
116 
117 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
118 static bool is_initial_mem_debug_disabled;
119 #endif
120 
121 /**
122  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
123  *
124  * Return: none
125  */
126 void qdf_nbuf_tx_desc_count_display(void)
127 {
128 	qdf_debug("Current Snapshot of the Driver:");
129 	qdf_debug("Data Packets:");
130 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
132 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
141 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
146 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
147 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
148 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
149 	qdf_debug("Mgmt Packets:");
150 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
161 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
162 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
163 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
164 }
165 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
166 
167 /**
168  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
169  * @packet_type   : packet type either mgmt/data
170  * @current_state : layer at which the packet currently present
171  *
172  * Return: none
173  */
174 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
175 			uint8_t current_state)
176 {
177 	switch (packet_type) {
178 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
179 		nbuf_tx_mgmt[current_state]++;
180 		break;
181 	case QDF_NBUF_TX_PKT_DATA_TRACK:
182 		nbuf_tx_data[current_state]++;
183 		break;
184 	default:
185 		break;
186 	}
187 }
188 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
189 
190 /**
191  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
192  *
193  * Return: none
194  */
195 void qdf_nbuf_tx_desc_count_clear(void)
196 {
197 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
198 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
199 }
200 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
201 
202 /**
203  * qdf_nbuf_set_state() - Updates the packet state
204  * @nbuf:            network buffer
205  * @current_state :  layer at which the packet currently is
206  *
207  * This function updates the packet state to the layer at which the packet
208  * currently is
209  *
210  * Return: none
211  */
212 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
213 {
214 	/*
215 	 * Only Mgmt, Data Packets are tracked. WMI messages
216 	 * such as scan commands are not tracked
217 	 */
218 	uint8_t packet_type;
219 
220 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
221 
222 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
223 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
224 		return;
225 	}
226 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
227 	qdf_nbuf_tx_desc_count_update(packet_type,
228 					current_state);
229 }
230 qdf_export_symbol(qdf_nbuf_set_state);
231 
232 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
233 /**
234  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
235  *
236  * This function starts the alloc fail replenish timer.
237  *
238  * Return: void
239  */
240 static void __qdf_nbuf_start_replenish_timer(void)
241 {
242 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
243 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
244 	    QDF_TIMER_STATE_RUNNING)
245 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
246 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
247 }
248 
249 /**
250  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
251  *
252  * This function stops the alloc fail replenish timer.
253  *
254  * Return: void
255  */
256 static void __qdf_nbuf_stop_replenish_timer(void)
257 {
258 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
259 		return;
260 
261 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
262 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
263 	    QDF_TIMER_STATE_RUNNING)
264 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
265 }
266 
267 /**
268  * qdf_replenish_expire_handler - Replenish expire handler
269  *
270  * This function triggers when the alloc fail replenish timer expires.
271  *
272  * Return: void
273  */
274 static void qdf_replenish_expire_handler(void *arg)
275 {
276 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
277 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
278 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
279 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
280 
281 		/* Error handling here */
282 	}
283 }
284 
285 /**
286  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
287  *
288  * This function initializes the nbuf alloc fail replenish timer.
289  *
290  * Return: void
291  */
292 void __qdf_nbuf_init_replenish_timer(void)
293 {
294 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
295 			  qdf_replenish_expire_handler, NULL);
296 }
297 
298 /**
299  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
300  *
301  * This function deinitializes the nbuf alloc fail replenish timer.
302  *
303  * Return: void
304  */
305 void __qdf_nbuf_deinit_replenish_timer(void)
306 {
307 	__qdf_nbuf_stop_replenish_timer();
308 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
309 }
310 #else
311 
312 static inline void __qdf_nbuf_start_replenish_timer(void) {}
313 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
314 #endif
315 
316 /* globals do not need to be initialized to NULL/0 */
317 qdf_nbuf_trace_update_t qdf_trace_update_cb;
318 qdf_nbuf_free_t nbuf_free_cb;
319 
320 #ifdef QDF_NBUF_GLOBAL_COUNT
321 
322 /**
323  * __qdf_nbuf_count_get() - get nbuf global count
324  *
325  * Return: nbuf global count
326  */
327 int __qdf_nbuf_count_get(void)
328 {
329 	return qdf_atomic_read(&nbuf_count);
330 }
331 qdf_export_symbol(__qdf_nbuf_count_get);
332 
333 /**
334  * __qdf_nbuf_count_inc() - increment nbuf global count
335  *
336  * @buf: sk buff
337  *
338  * Return: void
339  */
340 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
341 {
342 	int num_nbuf = 1;
343 	qdf_nbuf_t ext_list;
344 
345 	if (qdf_likely(is_initial_mem_debug_disabled))
346 		return;
347 
348 	ext_list = qdf_nbuf_get_ext_list(nbuf);
349 
350 	/* Take care to account for frag_list */
351 	while (ext_list) {
352 		++num_nbuf;
353 		ext_list = qdf_nbuf_queue_next(ext_list);
354 	}
355 
356 	qdf_atomic_add(num_nbuf, &nbuf_count);
357 }
358 qdf_export_symbol(__qdf_nbuf_count_inc);
359 
360 /**
361  * __qdf_nbuf_count_dec() - decrement nbuf global count
362  *
363  * @buf: sk buff
364  *
365  * Return: void
366  */
367 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
368 {
369 	qdf_nbuf_t ext_list;
370 	int num_nbuf;
371 
372 	if (qdf_likely(is_initial_mem_debug_disabled))
373 		return;
374 
375 	if (qdf_nbuf_get_users(nbuf) > 1)
376 		return;
377 
378 	num_nbuf = 1;
379 
380 	/* Take care to account for frag_list */
381 	ext_list = qdf_nbuf_get_ext_list(nbuf);
382 	while (ext_list) {
383 		if (qdf_nbuf_get_users(ext_list) == 1)
384 			++num_nbuf;
385 		ext_list = qdf_nbuf_queue_next(ext_list);
386 	}
387 
388 	qdf_atomic_sub(num_nbuf, &nbuf_count);
389 }
390 qdf_export_symbol(__qdf_nbuf_count_dec);
391 #endif
392 
393 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
394 	!defined(QCA_WIFI_QCN9000)
395 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
396 				 int align, int prio, const char *func,
397 				 uint32_t line)
398 {
399 	struct sk_buff *skb;
400 	unsigned long offset;
401 	uint32_t lowmem_alloc_tries = 0;
402 
403 	if (align)
404 		size += (align - 1);
405 
406 realloc:
407 	skb = dev_alloc_skb(size);
408 
409 	if (skb)
410 		goto skb_alloc;
411 
412 	skb = pld_nbuf_pre_alloc(size);
413 
414 	if (!skb) {
415 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
416 				size, func, line);
417 		return NULL;
418 	}
419 
420 skb_alloc:
421 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
422 	 * Though we are trying to reserve low memory upfront to prevent this,
423 	 * we sometimes see SKBs allocated from low memory.
424 	 */
425 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
426 		lowmem_alloc_tries++;
427 		if (lowmem_alloc_tries > 100) {
428 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
429 				     size, func, line);
430 			return NULL;
431 		} else {
432 			/* Not freeing to make sure it
433 			 * will not get allocated again
434 			 */
435 			goto realloc;
436 		}
437 	}
438 	memset(skb->cb, 0x0, sizeof(skb->cb));
439 
440 	/*
441 	 * The default is for netbuf fragments to be interpreted
442 	 * as wordstreams rather than bytestreams.
443 	 */
444 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
445 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
446 
447 	/*
448 	 * XXX:how about we reserve first then align
449 	 * Align & make sure that the tail & data are adjusted properly
450 	 */
451 
452 	if (align) {
453 		offset = ((unsigned long)skb->data) % align;
454 		if (offset)
455 			skb_reserve(skb, align - offset);
456 	}
457 
458 	/*
459 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
460 	 * pointer
461 	 */
462 	skb_reserve(skb, reserve);
463 	qdf_nbuf_count_inc(skb);
464 
465 	return skb;
466 }
467 #else
468 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
469 				 int align, int prio, const char *func,
470 				 uint32_t line)
471 {
472 	struct sk_buff *skb;
473 	unsigned long offset;
474 	int flags = GFP_KERNEL;
475 
476 	if (align)
477 		size += (align - 1);
478 
479 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
480 		flags = GFP_ATOMIC;
481 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
482 		/*
483 		 * Observed that kcompactd burns out CPU to make order-3 page.
484 		 *__netdev_alloc_skb has 4k page fallback option just in case of
485 		 * failing high order page allocation so we don't need to be
486 		 * hard. Make kcompactd rest in piece.
487 		 */
488 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
489 #endif
490 	}
491 
492 	skb = __netdev_alloc_skb(NULL, size, flags);
493 
494 	if (skb)
495 		goto skb_alloc;
496 
497 	skb = pld_nbuf_pre_alloc(size);
498 
499 	if (!skb) {
500 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
501 				size, func, line);
502 		__qdf_nbuf_start_replenish_timer();
503 		return NULL;
504 	} else {
505 		__qdf_nbuf_stop_replenish_timer();
506 	}
507 
508 skb_alloc:
509 	memset(skb->cb, 0x0, sizeof(skb->cb));
510 
511 	/*
512 	 * The default is for netbuf fragments to be interpreted
513 	 * as wordstreams rather than bytestreams.
514 	 */
515 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
516 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
517 
518 	/*
519 	 * XXX:how about we reserve first then align
520 	 * Align & make sure that the tail & data are adjusted properly
521 	 */
522 
523 	if (align) {
524 		offset = ((unsigned long)skb->data) % align;
525 		if (offset)
526 			skb_reserve(skb, align - offset);
527 	}
528 
529 	/*
530 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
531 	 * pointer
532 	 */
533 	skb_reserve(skb, reserve);
534 	qdf_nbuf_count_inc(skb);
535 
536 	return skb;
537 }
538 #endif
539 qdf_export_symbol(__qdf_nbuf_alloc);
540 
541 /**
542  * __qdf_nbuf_free() - free the nbuf its interrupt safe
543  * @skb: Pointer to network buffer
544  *
545  * Return: none
546  */
547 
548 void __qdf_nbuf_free(struct sk_buff *skb)
549 {
550 	if (pld_nbuf_pre_alloc_free(skb))
551 		return;
552 
553 	qdf_nbuf_count_dec(skb);
554 	if (nbuf_free_cb)
555 		nbuf_free_cb(skb);
556 	else
557 		dev_kfree_skb_any(skb);
558 }
559 
560 qdf_export_symbol(__qdf_nbuf_free);
561 
562 #ifdef NBUF_MEMORY_DEBUG
563 enum qdf_nbuf_event_type {
564 	QDF_NBUF_ALLOC,
565 	QDF_NBUF_ALLOC_CLONE,
566 	QDF_NBUF_ALLOC_COPY,
567 	QDF_NBUF_ALLOC_FAILURE,
568 	QDF_NBUF_FREE,
569 	QDF_NBUF_MAP,
570 	QDF_NBUF_UNMAP,
571 	QDF_NBUF_ALLOC_COPY_EXPAND,
572 };
573 
574 struct qdf_nbuf_event {
575 	qdf_nbuf_t nbuf;
576 	char func[QDF_MEM_FUNC_NAME_SIZE];
577 	uint32_t line;
578 	enum qdf_nbuf_event_type type;
579 	uint64_t timestamp;
580 };
581 
582 #define QDF_NBUF_HISTORY_SIZE 4096
583 static qdf_atomic_t qdf_nbuf_history_index;
584 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
585 
586 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
587 {
588 	int32_t next = qdf_atomic_inc_return(index);
589 
590 	if (next == size)
591 		qdf_atomic_sub(size, index);
592 
593 	return next % size;
594 }
595 
596 static void
597 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
598 		     enum qdf_nbuf_event_type type)
599 {
600 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
601 						   QDF_NBUF_HISTORY_SIZE);
602 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
603 
604 	event->nbuf = nbuf;
605 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
606 	event->line = line;
607 	event->type = type;
608 	event->timestamp = qdf_get_log_timestamp();
609 }
610 #endif /* NBUF_MEMORY_DEBUG */
611 
612 #ifdef NBUF_MAP_UNMAP_DEBUG
613 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
614 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
615 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
616 
617 static void qdf_nbuf_map_tracking_init(void)
618 {
619 	qdf_tracker_init(&qdf_nbuf_map_tracker);
620 }
621 
622 static void qdf_nbuf_map_tracking_deinit(void)
623 {
624 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
625 }
626 
627 static QDF_STATUS
628 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
629 {
630 	QDF_STATUS status;
631 
632 	if (is_initial_mem_debug_disabled)
633 		return QDF_STATUS_SUCCESS;
634 
635 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
636 	if (QDF_IS_STATUS_ERROR(status))
637 		return status;
638 
639 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
640 
641 	return QDF_STATUS_SUCCESS;
642 }
643 
644 static void
645 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
646 {
647 	if (is_initial_mem_debug_disabled)
648 		return;
649 
650 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
651 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
652 }
653 
654 void qdf_nbuf_map_check_for_leaks(void)
655 {
656 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
657 }
658 
659 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
660 			      qdf_nbuf_t buf,
661 			      qdf_dma_dir_t dir,
662 			      const char *func,
663 			      uint32_t line)
664 {
665 	QDF_STATUS status;
666 
667 	status = qdf_nbuf_track_map(buf, func, line);
668 	if (QDF_IS_STATUS_ERROR(status))
669 		return status;
670 
671 	status = __qdf_nbuf_map(osdev, buf, dir);
672 	if (QDF_IS_STATUS_ERROR(status))
673 		qdf_nbuf_untrack_map(buf, func, line);
674 
675 	return status;
676 }
677 
678 qdf_export_symbol(qdf_nbuf_map_debug);
679 
680 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
681 			  qdf_nbuf_t buf,
682 			  qdf_dma_dir_t dir,
683 			  const char *func,
684 			  uint32_t line)
685 {
686 	qdf_nbuf_untrack_map(buf, func, line);
687 	__qdf_nbuf_unmap_single(osdev, buf, dir);
688 }
689 
690 qdf_export_symbol(qdf_nbuf_unmap_debug);
691 
692 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
693 				     qdf_nbuf_t buf,
694 				     qdf_dma_dir_t dir,
695 				     const char *func,
696 				     uint32_t line)
697 {
698 	QDF_STATUS status;
699 
700 	status = qdf_nbuf_track_map(buf, func, line);
701 	if (QDF_IS_STATUS_ERROR(status))
702 		return status;
703 
704 	status = __qdf_nbuf_map_single(osdev, buf, dir);
705 	if (QDF_IS_STATUS_ERROR(status))
706 		qdf_nbuf_untrack_map(buf, func, line);
707 
708 	return status;
709 }
710 
711 qdf_export_symbol(qdf_nbuf_map_single_debug);
712 
713 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
714 				 qdf_nbuf_t buf,
715 				 qdf_dma_dir_t dir,
716 				 const char *func,
717 				 uint32_t line)
718 {
719 	qdf_nbuf_untrack_map(buf, func, line);
720 	__qdf_nbuf_unmap_single(osdev, buf, dir);
721 }
722 
723 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
724 
725 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
726 				     qdf_nbuf_t buf,
727 				     qdf_dma_dir_t dir,
728 				     int nbytes,
729 				     const char *func,
730 				     uint32_t line)
731 {
732 	QDF_STATUS status;
733 
734 	status = qdf_nbuf_track_map(buf, func, line);
735 	if (QDF_IS_STATUS_ERROR(status))
736 		return status;
737 
738 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
739 	if (QDF_IS_STATUS_ERROR(status))
740 		qdf_nbuf_untrack_map(buf, func, line);
741 
742 	return status;
743 }
744 
745 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
746 
747 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
748 				 qdf_nbuf_t buf,
749 				 qdf_dma_dir_t dir,
750 				 int nbytes,
751 				 const char *func,
752 				 uint32_t line)
753 {
754 	qdf_nbuf_untrack_map(buf, func, line);
755 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
756 }
757 
758 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
759 
760 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
761 					    qdf_nbuf_t buf,
762 					    qdf_dma_dir_t dir,
763 					    int nbytes,
764 					    const char *func,
765 					    uint32_t line)
766 {
767 	QDF_STATUS status;
768 
769 	status = qdf_nbuf_track_map(buf, func, line);
770 	if (QDF_IS_STATUS_ERROR(status))
771 		return status;
772 
773 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
774 	if (QDF_IS_STATUS_ERROR(status))
775 		qdf_nbuf_untrack_map(buf, func, line);
776 
777 	return status;
778 }
779 
780 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
781 
782 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
783 					qdf_nbuf_t buf,
784 					qdf_dma_dir_t dir,
785 					int nbytes,
786 					const char *func,
787 					uint32_t line)
788 {
789 	qdf_nbuf_untrack_map(buf, func, line);
790 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
791 }
792 
793 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
794 
795 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
796 					     const char *func,
797 					     uint32_t line)
798 {
799 	char map_func[QDF_TRACKER_FUNC_SIZE];
800 	uint32_t map_line;
801 
802 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
803 				&map_func, &map_line))
804 		return;
805 
806 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
807 			   func, line, map_func, map_line);
808 }
809 #else
810 static inline void qdf_nbuf_map_tracking_init(void)
811 {
812 }
813 
814 static inline void qdf_nbuf_map_tracking_deinit(void)
815 {
816 }
817 
818 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
819 						    const char *func,
820 						    uint32_t line)
821 {
822 }
823 #endif /* NBUF_MAP_UNMAP_DEBUG */
824 
825 /**
826  * __qdf_nbuf_map() - map a buffer to local bus address space
827  * @osdev: OS device
828  * @bmap: Bitmap
829  * @skb: Pointer to network buffer
830  * @dir: Direction
831  *
832  * Return: QDF_STATUS
833  */
834 #ifdef QDF_OS_DEBUG
835 QDF_STATUS
836 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
837 {
838 	struct skb_shared_info *sh = skb_shinfo(skb);
839 
840 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
841 			|| (dir == QDF_DMA_FROM_DEVICE));
842 
843 	/*
844 	 * Assume there's only a single fragment.
845 	 * To support multiple fragments, it would be necessary to change
846 	 * qdf_nbuf_t to be a separate object that stores meta-info
847 	 * (including the bus address for each fragment) and a pointer
848 	 * to the underlying sk_buff.
849 	 */
850 	qdf_assert(sh->nr_frags == 0);
851 
852 	return __qdf_nbuf_map_single(osdev, skb, dir);
853 }
854 qdf_export_symbol(__qdf_nbuf_map);
855 
856 #else
857 QDF_STATUS
858 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
859 {
860 	return __qdf_nbuf_map_single(osdev, skb, dir);
861 }
862 qdf_export_symbol(__qdf_nbuf_map);
863 #endif
864 /**
865  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
866  * @osdev: OS device
867  * @skb: Pointer to network buffer
868  * @dir: dma direction
869  *
870  * Return: none
871  */
872 void
873 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
874 			qdf_dma_dir_t dir)
875 {
876 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
877 		   || (dir == QDF_DMA_FROM_DEVICE));
878 
879 	/*
880 	 * Assume there's a single fragment.
881 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
882 	 */
883 	__qdf_nbuf_unmap_single(osdev, skb, dir);
884 }
885 qdf_export_symbol(__qdf_nbuf_unmap);
886 
887 /**
888  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
889  * @osdev: OS device
890  * @skb: Pointer to network buffer
891  * @dir: Direction
892  *
893  * Return: QDF_STATUS
894  */
895 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
896 QDF_STATUS
897 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
898 {
899 	qdf_dma_addr_t paddr;
900 
901 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
902 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
903 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
904 	return QDF_STATUS_SUCCESS;
905 }
906 qdf_export_symbol(__qdf_nbuf_map_single);
907 #else
908 QDF_STATUS
909 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
910 {
911 	qdf_dma_addr_t paddr;
912 
913 	/* assume that the OS only provides a single fragment */
914 	QDF_NBUF_CB_PADDR(buf) = paddr =
915 		dma_map_single(osdev->dev, buf->data,
916 				skb_end_pointer(buf) - buf->data,
917 				__qdf_dma_dir_to_os(dir));
918 	return dma_mapping_error(osdev->dev, paddr)
919 		? QDF_STATUS_E_FAILURE
920 		: QDF_STATUS_SUCCESS;
921 }
922 qdf_export_symbol(__qdf_nbuf_map_single);
923 #endif
924 /**
925  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
926  * @osdev: OS device
927  * @skb: Pointer to network buffer
928  * @dir: Direction
929  *
930  * Return: none
931  */
932 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
933 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
934 				qdf_dma_dir_t dir)
935 {
936 }
937 #else
938 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
939 					qdf_dma_dir_t dir)
940 {
941 	if (QDF_NBUF_CB_PADDR(buf))
942 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
943 			skb_end_pointer(buf) - buf->data,
944 			__qdf_dma_dir_to_os(dir));
945 }
946 #endif
947 qdf_export_symbol(__qdf_nbuf_unmap_single);
948 
949 /**
950  * __qdf_nbuf_set_rx_cksum() - set rx checksum
951  * @skb: Pointer to network buffer
952  * @cksum: Pointer to checksum value
953  *
954  * Return: QDF_STATUS
955  */
956 QDF_STATUS
957 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
958 {
959 	switch (cksum->l4_result) {
960 	case QDF_NBUF_RX_CKSUM_NONE:
961 		skb->ip_summed = CHECKSUM_NONE;
962 		break;
963 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
964 		skb->ip_summed = CHECKSUM_UNNECESSARY;
965 		break;
966 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
967 		skb->ip_summed = CHECKSUM_PARTIAL;
968 		skb->csum = cksum->val;
969 		break;
970 	default:
971 		pr_err("Unknown checksum type\n");
972 		qdf_assert(0);
973 		return QDF_STATUS_E_NOSUPPORT;
974 	}
975 	return QDF_STATUS_SUCCESS;
976 }
977 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
978 
979 /**
980  * __qdf_nbuf_get_tx_cksum() - get tx checksum
981  * @skb: Pointer to network buffer
982  *
983  * Return: TX checksum value
984  */
985 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
986 {
987 	switch (skb->ip_summed) {
988 	case CHECKSUM_NONE:
989 		return QDF_NBUF_TX_CKSUM_NONE;
990 	case CHECKSUM_PARTIAL:
991 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
992 	case CHECKSUM_COMPLETE:
993 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
994 	default:
995 		return QDF_NBUF_TX_CKSUM_NONE;
996 	}
997 }
998 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
999 
1000 /**
1001  * __qdf_nbuf_get_tid() - get tid
1002  * @skb: Pointer to network buffer
1003  *
1004  * Return: tid
1005  */
1006 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1007 {
1008 	return skb->priority;
1009 }
1010 qdf_export_symbol(__qdf_nbuf_get_tid);
1011 
1012 /**
1013  * __qdf_nbuf_set_tid() - set tid
1014  * @skb: Pointer to network buffer
1015  *
1016  * Return: none
1017  */
1018 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1019 {
1020 	skb->priority = tid;
1021 }
1022 qdf_export_symbol(__qdf_nbuf_set_tid);
1023 
1024 /**
1025  * __qdf_nbuf_set_tid() - set tid
1026  * @skb: Pointer to network buffer
1027  *
1028  * Return: none
1029  */
1030 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1031 {
1032 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1033 }
1034 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1035 
1036 /**
1037  * __qdf_nbuf_reg_trace_cb() - register trace callback
1038  * @cb_func_ptr: Pointer to trace callback function
1039  *
1040  * Return: none
1041  */
1042 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1043 {
1044 	qdf_trace_update_cb = cb_func_ptr;
1045 }
1046 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1047 
1048 /**
1049  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1050  *              of DHCP packet.
1051  * @data: Pointer to DHCP packet data buffer
1052  *
1053  * This func. returns the subtype of DHCP packet.
1054  *
1055  * Return: subtype of the DHCP packet.
1056  */
1057 enum qdf_proto_subtype
1058 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1059 {
1060 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1061 
1062 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1063 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1064 					QDF_DHCP_OPTION53_LENGTH)) {
1065 
1066 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1067 		case QDF_DHCP_DISCOVER:
1068 			subtype = QDF_PROTO_DHCP_DISCOVER;
1069 			break;
1070 		case QDF_DHCP_REQUEST:
1071 			subtype = QDF_PROTO_DHCP_REQUEST;
1072 			break;
1073 		case QDF_DHCP_OFFER:
1074 			subtype = QDF_PROTO_DHCP_OFFER;
1075 			break;
1076 		case QDF_DHCP_ACK:
1077 			subtype = QDF_PROTO_DHCP_ACK;
1078 			break;
1079 		case QDF_DHCP_NAK:
1080 			subtype = QDF_PROTO_DHCP_NACK;
1081 			break;
1082 		case QDF_DHCP_RELEASE:
1083 			subtype = QDF_PROTO_DHCP_RELEASE;
1084 			break;
1085 		case QDF_DHCP_INFORM:
1086 			subtype = QDF_PROTO_DHCP_INFORM;
1087 			break;
1088 		case QDF_DHCP_DECLINE:
1089 			subtype = QDF_PROTO_DHCP_DECLINE;
1090 			break;
1091 		default:
1092 			break;
1093 		}
1094 	}
1095 
1096 	return subtype;
1097 }
1098 
1099 /**
1100  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1101  *            of EAPOL packet.
1102  * @data: Pointer to EAPOL packet data buffer
1103  *
1104  * This func. returns the subtype of EAPOL packet.
1105  *
1106  * Return: subtype of the EAPOL packet.
1107  */
1108 enum qdf_proto_subtype
1109 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1110 {
1111 	uint16_t eapol_key_info;
1112 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1113 	uint16_t mask;
1114 
1115 	eapol_key_info = (uint16_t)(*(uint16_t *)
1116 			(data + EAPOL_KEY_INFO_OFFSET));
1117 
1118 	mask = eapol_key_info & EAPOL_MASK;
1119 	switch (mask) {
1120 	case EAPOL_M1_BIT_MASK:
1121 		subtype = QDF_PROTO_EAPOL_M1;
1122 		break;
1123 	case EAPOL_M2_BIT_MASK:
1124 		subtype = QDF_PROTO_EAPOL_M2;
1125 		break;
1126 	case EAPOL_M3_BIT_MASK:
1127 		subtype = QDF_PROTO_EAPOL_M3;
1128 		break;
1129 	case EAPOL_M4_BIT_MASK:
1130 		subtype = QDF_PROTO_EAPOL_M4;
1131 		break;
1132 	default:
1133 		break;
1134 	}
1135 
1136 	return subtype;
1137 }
1138 
1139 /**
1140  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1141  *            of ARP packet.
1142  * @data: Pointer to ARP packet data buffer
1143  *
1144  * This func. returns the subtype of ARP packet.
1145  *
1146  * Return: subtype of the ARP packet.
1147  */
1148 enum qdf_proto_subtype
1149 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1150 {
1151 	uint16_t subtype;
1152 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1153 
1154 	subtype = (uint16_t)(*(uint16_t *)
1155 			(data + ARP_SUB_TYPE_OFFSET));
1156 
1157 	switch (QDF_SWAP_U16(subtype)) {
1158 	case ARP_REQUEST:
1159 		proto_subtype = QDF_PROTO_ARP_REQ;
1160 		break;
1161 	case ARP_RESPONSE:
1162 		proto_subtype = QDF_PROTO_ARP_RES;
1163 		break;
1164 	default:
1165 		break;
1166 	}
1167 
1168 	return proto_subtype;
1169 }
1170 
1171 /**
1172  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1173  *            of IPV4 ICMP packet.
1174  * @data: Pointer to IPV4 ICMP packet data buffer
1175  *
1176  * This func. returns the subtype of ICMP packet.
1177  *
1178  * Return: subtype of the ICMP packet.
1179  */
1180 enum qdf_proto_subtype
1181 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1182 {
1183 	uint8_t subtype;
1184 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1185 
1186 	subtype = (uint8_t)(*(uint8_t *)
1187 			(data + ICMP_SUBTYPE_OFFSET));
1188 
1189 	switch (subtype) {
1190 	case ICMP_REQUEST:
1191 		proto_subtype = QDF_PROTO_ICMP_REQ;
1192 		break;
1193 	case ICMP_RESPONSE:
1194 		proto_subtype = QDF_PROTO_ICMP_RES;
1195 		break;
1196 	default:
1197 		break;
1198 	}
1199 
1200 	return proto_subtype;
1201 }
1202 
1203 /**
1204  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1205  *            of IPV6 ICMPV6 packet.
1206  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1207  *
1208  * This func. returns the subtype of ICMPV6 packet.
1209  *
1210  * Return: subtype of the ICMPV6 packet.
1211  */
1212 enum qdf_proto_subtype
1213 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1214 {
1215 	uint8_t subtype;
1216 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1217 
1218 	subtype = (uint8_t)(*(uint8_t *)
1219 			(data + ICMPV6_SUBTYPE_OFFSET));
1220 
1221 	switch (subtype) {
1222 	case ICMPV6_REQUEST:
1223 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1224 		break;
1225 	case ICMPV6_RESPONSE:
1226 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1227 		break;
1228 	case ICMPV6_RS:
1229 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1230 		break;
1231 	case ICMPV6_RA:
1232 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1233 		break;
1234 	case ICMPV6_NS:
1235 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1236 		break;
1237 	case ICMPV6_NA:
1238 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1239 		break;
1240 	default:
1241 		break;
1242 	}
1243 
1244 	return proto_subtype;
1245 }
1246 
1247 /**
1248  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1249  *            of IPV4 packet.
1250  * @data: Pointer to IPV4 packet data buffer
1251  *
1252  * This func. returns the proto type of IPV4 packet.
1253  *
1254  * Return: proto type of IPV4 packet.
1255  */
1256 uint8_t
1257 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1258 {
1259 	uint8_t proto_type;
1260 
1261 	proto_type = (uint8_t)(*(uint8_t *)(data +
1262 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1263 	return proto_type;
1264 }
1265 
1266 /**
1267  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1268  *            of IPV6 packet.
1269  * @data: Pointer to IPV6 packet data buffer
1270  *
1271  * This func. returns the proto type of IPV6 packet.
1272  *
1273  * Return: proto type of IPV6 packet.
1274  */
1275 uint8_t
1276 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1277 {
1278 	uint8_t proto_type;
1279 
1280 	proto_type = (uint8_t)(*(uint8_t *)(data +
1281 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1282 	return proto_type;
1283 }
1284 
1285 /**
1286  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1287  * @data: Pointer to network data
1288  *
1289  * This api is for Tx packets.
1290  *
1291  * Return: true if packet is ipv4 packet
1292  *	   false otherwise
1293  */
1294 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1295 {
1296 	uint16_t ether_type;
1297 
1298 	ether_type = (uint16_t)(*(uint16_t *)(data +
1299 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1300 
1301 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1302 		return true;
1303 	else
1304 		return false;
1305 }
1306 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1307 
1308 /**
1309  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1310  * @data: Pointer to network data buffer
1311  *
1312  * This api is for ipv4 packet.
1313  *
1314  * Return: true if packet is DHCP packet
1315  *	   false otherwise
1316  */
1317 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1318 {
1319 	uint16_t sport;
1320 	uint16_t dport;
1321 
1322 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1323 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1324 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1325 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1326 					 sizeof(uint16_t)));
1327 
1328 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1329 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1330 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1331 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1332 		return true;
1333 	else
1334 		return false;
1335 }
1336 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1337 
1338 /**
1339  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1340  * @data: Pointer to network data buffer
1341  *
1342  * This api is for ipv4 packet.
1343  *
1344  * Return: true if packet is EAPOL packet
1345  *	   false otherwise.
1346  */
1347 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1348 {
1349 	uint16_t ether_type;
1350 
1351 	ether_type = (uint16_t)(*(uint16_t *)(data +
1352 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1353 
1354 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1355 		return true;
1356 	else
1357 		return false;
1358 }
1359 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1360 
1361 /**
1362  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1363  * @skb: Pointer to network buffer
1364  *
1365  * This api is for ipv4 packet.
1366  *
1367  * Return: true if packet is WAPI packet
1368  *	   false otherwise.
1369  */
1370 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1371 {
1372 	uint16_t ether_type;
1373 
1374 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1375 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1376 
1377 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1378 		return true;
1379 	else
1380 		return false;
1381 }
1382 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1383 
1384 /**
1385  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1386  * @skb: Pointer to network buffer
1387  *
1388  * This api is for ipv4 packet.
1389  *
1390  * Return: true if packet is tdls packet
1391  *	   false otherwise.
1392  */
1393 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1394 {
1395 	uint16_t ether_type;
1396 
1397 	ether_type = *(uint16_t *)(skb->data +
1398 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1399 
1400 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1401 		return true;
1402 	else
1403 		return false;
1404 }
1405 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1406 
1407 /**
1408  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1409  * @data: Pointer to network data buffer
1410  *
1411  * This api is for ipv4 packet.
1412  *
1413  * Return: true if packet is ARP packet
1414  *	   false otherwise.
1415  */
1416 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1417 {
1418 	uint16_t ether_type;
1419 
1420 	ether_type = (uint16_t)(*(uint16_t *)(data +
1421 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1422 
1423 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1424 		return true;
1425 	else
1426 		return false;
1427 }
1428 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1429 
1430 /**
1431  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1432  * @data: Pointer to network data buffer
1433  *
1434  * This api is for ipv4 packet.
1435  *
1436  * Return: true if packet is ARP request
1437  *	   false otherwise.
1438  */
1439 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1440 {
1441 	uint16_t op_code;
1442 
1443 	op_code = (uint16_t)(*(uint16_t *)(data +
1444 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1445 
1446 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1447 		return true;
1448 	return false;
1449 }
1450 
1451 /**
1452  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1453  * @data: Pointer to network data buffer
1454  *
1455  * This api is for ipv4 packet.
1456  *
1457  * Return: true if packet is ARP response
1458  *	   false otherwise.
1459  */
1460 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1461 {
1462 	uint16_t op_code;
1463 
1464 	op_code = (uint16_t)(*(uint16_t *)(data +
1465 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1466 
1467 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1468 		return true;
1469 	return false;
1470 }
1471 
1472 /**
1473  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1474  * @data: Pointer to network data buffer
1475  *
1476  * This api is for ipv4 packet.
1477  *
1478  * Return: ARP packet source IP value.
1479  */
1480 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1481 {
1482 	uint32_t src_ip;
1483 
1484 	src_ip = (uint32_t)(*(uint32_t *)(data +
1485 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1486 
1487 	return src_ip;
1488 }
1489 
1490 /**
1491  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1492  * @data: Pointer to network data buffer
1493  *
1494  * This api is for ipv4 packet.
1495  *
1496  * Return: ARP packet target IP value.
1497  */
1498 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1499 {
1500 	uint32_t tgt_ip;
1501 
1502 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1503 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1504 
1505 	return tgt_ip;
1506 }
1507 
1508 /**
1509  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1510  * @data: Pointer to network data buffer
1511  * @len: length to copy
1512  *
1513  * This api is for dns domain name
1514  *
1515  * Return: dns domain name.
1516  */
1517 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1518 {
1519 	uint8_t *domain_name;
1520 
1521 	domain_name = (uint8_t *)
1522 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1523 	return domain_name;
1524 }
1525 
1526 
1527 /**
1528  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1529  * @data: Pointer to network data buffer
1530  *
1531  * This api is for dns query packet.
1532  *
1533  * Return: true if packet is dns query packet.
1534  *	   false otherwise.
1535  */
1536 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1537 {
1538 	uint16_t op_code;
1539 	uint16_t tgt_port;
1540 
1541 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1542 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1543 	/* Standard DNS query always happen on Dest Port 53. */
1544 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1545 		op_code = (uint16_t)(*(uint16_t *)(data +
1546 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1547 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1548 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1549 			return true;
1550 	}
1551 	return false;
1552 }
1553 
1554 /**
1555  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1556  * @data: Pointer to network data buffer
1557  *
1558  * This api is for dns query response.
1559  *
1560  * Return: true if packet is dns response packet.
1561  *	   false otherwise.
1562  */
1563 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1564 {
1565 	uint16_t op_code;
1566 	uint16_t src_port;
1567 
1568 	src_port = (uint16_t)(*(uint16_t *)(data +
1569 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1570 	/* Standard DNS response always comes on Src Port 53. */
1571 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1572 		op_code = (uint16_t)(*(uint16_t *)(data +
1573 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1574 
1575 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1576 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1577 			return true;
1578 	}
1579 	return false;
1580 }
1581 
1582 /**
1583  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1584  * @data: Pointer to network data buffer
1585  *
1586  * This api is for tcp syn packet.
1587  *
1588  * Return: true if packet is tcp syn packet.
1589  *	   false otherwise.
1590  */
1591 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1592 {
1593 	uint8_t op_code;
1594 
1595 	op_code = (uint8_t)(*(uint8_t *)(data +
1596 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1597 
1598 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1599 		return true;
1600 	return false;
1601 }
1602 
1603 /**
1604  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1605  * @data: Pointer to network data buffer
1606  *
1607  * This api is for tcp syn ack packet.
1608  *
1609  * Return: true if packet is tcp syn ack packet.
1610  *	   false otherwise.
1611  */
1612 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1613 {
1614 	uint8_t op_code;
1615 
1616 	op_code = (uint8_t)(*(uint8_t *)(data +
1617 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1618 
1619 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1620 		return true;
1621 	return false;
1622 }
1623 
1624 /**
1625  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1626  * @data: Pointer to network data buffer
1627  *
1628  * This api is for tcp ack packet.
1629  *
1630  * Return: true if packet is tcp ack packet.
1631  *	   false otherwise.
1632  */
1633 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1634 {
1635 	uint8_t op_code;
1636 
1637 	op_code = (uint8_t)(*(uint8_t *)(data +
1638 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1639 
1640 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1641 		return true;
1642 	return false;
1643 }
1644 
1645 /**
1646  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1647  * @data: Pointer to network data buffer
1648  *
1649  * This api is for tcp packet.
1650  *
1651  * Return: tcp source port value.
1652  */
1653 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1654 {
1655 	uint16_t src_port;
1656 
1657 	src_port = (uint16_t)(*(uint16_t *)(data +
1658 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1659 
1660 	return src_port;
1661 }
1662 
1663 /**
1664  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1665  * @data: Pointer to network data buffer
1666  *
1667  * This api is for tcp packet.
1668  *
1669  * Return: tcp destination port value.
1670  */
1671 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1672 {
1673 	uint16_t tgt_port;
1674 
1675 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1676 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1677 
1678 	return tgt_port;
1679 }
1680 
1681 /**
1682  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1683  * @data: Pointer to network data buffer
1684  *
1685  * This api is for ipv4 req packet.
1686  *
1687  * Return: true if packet is icmpv4 request
1688  *	   false otherwise.
1689  */
1690 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1691 {
1692 	uint8_t op_code;
1693 
1694 	op_code = (uint8_t)(*(uint8_t *)(data +
1695 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1696 
1697 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1698 		return true;
1699 	return false;
1700 }
1701 
1702 /**
1703  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1704  * @data: Pointer to network data buffer
1705  *
1706  * This api is for ipv4 res packet.
1707  *
1708  * Return: true if packet is icmpv4 response
1709  *	   false otherwise.
1710  */
1711 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1712 {
1713 	uint8_t op_code;
1714 
1715 	op_code = (uint8_t)(*(uint8_t *)(data +
1716 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1717 
1718 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1719 		return true;
1720 	return false;
1721 }
1722 
1723 /**
1724  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1725  * @data: Pointer to network data buffer
1726  *
1727  * This api is for ipv4 packet.
1728  *
1729  * Return: icmpv4 packet source IP value.
1730  */
1731 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1732 {
1733 	uint32_t src_ip;
1734 
1735 	src_ip = (uint32_t)(*(uint32_t *)(data +
1736 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1737 
1738 	return src_ip;
1739 }
1740 
1741 /**
1742  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1743  * @data: Pointer to network data buffer
1744  *
1745  * This api is for ipv4 packet.
1746  *
1747  * Return: icmpv4 packet target IP value.
1748  */
1749 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1750 {
1751 	uint32_t tgt_ip;
1752 
1753 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1754 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1755 
1756 	return tgt_ip;
1757 }
1758 
1759 
1760 /**
1761  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1762  * @data: Pointer to IPV6 packet data buffer
1763  *
1764  * This func. checks whether it is a IPV6 packet or not.
1765  *
1766  * Return: TRUE if it is a IPV6 packet
1767  *         FALSE if not
1768  */
1769 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1770 {
1771 	uint16_t ether_type;
1772 
1773 	ether_type = (uint16_t)(*(uint16_t *)(data +
1774 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1775 
1776 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1777 		return true;
1778 	else
1779 		return false;
1780 }
1781 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1782 
1783 /**
1784  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1785  * @data: Pointer to network data buffer
1786  *
1787  * This api is for ipv6 packet.
1788  *
1789  * Return: true if packet is DHCP packet
1790  *	   false otherwise
1791  */
1792 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1793 {
1794 	uint16_t sport;
1795 	uint16_t dport;
1796 
1797 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1798 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1799 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1800 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1801 					sizeof(uint16_t));
1802 
1803 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1804 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1805 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1806 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1807 		return true;
1808 	else
1809 		return false;
1810 }
1811 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1812 
1813 /**
1814  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
1815  * @data: Pointer to network data buffer
1816  *
1817  * This api is for ipv6 packet.
1818  *
1819  * Return: true if packet is MDNS packet
1820  *	   false otherwise
1821  */
1822 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
1823 {
1824 	uint16_t sport;
1825 	uint16_t dport;
1826 
1827 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1828 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1829 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1830 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1831 					sizeof(uint16_t));
1832 
1833 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
1834 	    dport == sport)
1835 		return true;
1836 	else
1837 		return false;
1838 }
1839 
1840 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
1841 
1842 /**
1843  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1844  * @data: Pointer to IPV4 packet data buffer
1845  *
1846  * This func. checks whether it is a IPV4 multicast packet or not.
1847  *
1848  * Return: TRUE if it is a IPV4 multicast packet
1849  *         FALSE if not
1850  */
1851 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1852 {
1853 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1854 		uint32_t *dst_addr =
1855 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1856 
1857 		/*
1858 		 * Check first word of the IPV4 address and if it is
1859 		 * equal to 0xE then it represents multicast IP.
1860 		 */
1861 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1862 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1863 			return true;
1864 		else
1865 			return false;
1866 	} else
1867 		return false;
1868 }
1869 
1870 /**
1871  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1872  * @data: Pointer to IPV6 packet data buffer
1873  *
1874  * This func. checks whether it is a IPV6 multicast packet or not.
1875  *
1876  * Return: TRUE if it is a IPV6 multicast packet
1877  *         FALSE if not
1878  */
1879 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1880 {
1881 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1882 		uint16_t *dst_addr;
1883 
1884 		dst_addr = (uint16_t *)
1885 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1886 
1887 		/*
1888 		 * Check first byte of the IP address and if it
1889 		 * 0xFF00 then it is a IPV6 mcast packet.
1890 		 */
1891 		if (*dst_addr ==
1892 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1893 			return true;
1894 		else
1895 			return false;
1896 	} else
1897 		return false;
1898 }
1899 
1900 /**
1901  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1902  * @data: Pointer to IPV4 ICMP packet data buffer
1903  *
1904  * This func. checks whether it is a ICMP packet or not.
1905  *
1906  * Return: TRUE if it is a ICMP packet
1907  *         FALSE if not
1908  */
1909 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1910 {
1911 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1912 		uint8_t pkt_type;
1913 
1914 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1915 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1916 
1917 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1918 			return true;
1919 		else
1920 			return false;
1921 	} else
1922 		return false;
1923 }
1924 
1925 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
1926 
1927 /**
1928  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1929  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1930  *
1931  * This func. checks whether it is a ICMPV6 packet or not.
1932  *
1933  * Return: TRUE if it is a ICMPV6 packet
1934  *         FALSE if not
1935  */
1936 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1937 {
1938 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1939 		uint8_t pkt_type;
1940 
1941 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1942 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1943 
1944 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1945 			return true;
1946 		else
1947 			return false;
1948 	} else
1949 		return false;
1950 }
1951 
1952 /**
1953  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1954  * @data: Pointer to IPV4 UDP packet data buffer
1955  *
1956  * This func. checks whether it is a IPV4 UDP packet or not.
1957  *
1958  * Return: TRUE if it is a IPV4 UDP packet
1959  *         FALSE if not
1960  */
1961 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
1962 {
1963 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1964 		uint8_t pkt_type;
1965 
1966 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1967 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1968 
1969 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
1970 			return true;
1971 		else
1972 			return false;
1973 	} else
1974 		return false;
1975 }
1976 
1977 /**
1978  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
1979  * @data: Pointer to IPV4 TCP packet data buffer
1980  *
1981  * This func. checks whether it is a IPV4 TCP packet or not.
1982  *
1983  * Return: TRUE if it is a IPV4 TCP packet
1984  *         FALSE if not
1985  */
1986 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
1987 {
1988 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1989 		uint8_t pkt_type;
1990 
1991 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1992 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1993 
1994 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
1995 			return true;
1996 		else
1997 			return false;
1998 	} else
1999 		return false;
2000 }
2001 
2002 /**
2003  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2004  * @data: Pointer to IPV6 UDP packet data buffer
2005  *
2006  * This func. checks whether it is a IPV6 UDP packet or not.
2007  *
2008  * Return: TRUE if it is a IPV6 UDP packet
2009  *         FALSE if not
2010  */
2011 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2012 {
2013 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2014 		uint8_t pkt_type;
2015 
2016 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2017 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2018 
2019 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2020 			return true;
2021 		else
2022 			return false;
2023 	} else
2024 		return false;
2025 }
2026 
2027 /**
2028  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2029  * @data: Pointer to IPV6 TCP packet data buffer
2030  *
2031  * This func. checks whether it is a IPV6 TCP packet or not.
2032  *
2033  * Return: TRUE if it is a IPV6 TCP packet
2034  *         FALSE if not
2035  */
2036 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2037 {
2038 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2039 		uint8_t pkt_type;
2040 
2041 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2042 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2043 
2044 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2045 			return true;
2046 		else
2047 			return false;
2048 	} else
2049 		return false;
2050 }
2051 
2052 /**
2053  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2054  * @nbuf - sk buff
2055  *
2056  * Return: true if packet is broadcast
2057  *	   false otherwise
2058  */
2059 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2060 {
2061 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2062 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2063 }
2064 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2065 
2066 #ifdef NBUF_MEMORY_DEBUG
2067 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2068 
2069 /**
2070  * struct qdf_nbuf_track_t - Network buffer track structure
2071  *
2072  * @p_next: Pointer to next
2073  * @net_buf: Pointer to network buffer
2074  * @func_name: Function name
2075  * @line_num: Line number
2076  * @size: Size
2077  */
2078 struct qdf_nbuf_track_t {
2079 	struct qdf_nbuf_track_t *p_next;
2080 	qdf_nbuf_t net_buf;
2081 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2082 	uint32_t line_num;
2083 	size_t size;
2084 };
2085 
2086 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2087 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2088 
2089 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2090 static struct kmem_cache *nbuf_tracking_cache;
2091 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2092 static spinlock_t qdf_net_buf_track_free_list_lock;
2093 static uint32_t qdf_net_buf_track_free_list_count;
2094 static uint32_t qdf_net_buf_track_used_list_count;
2095 static uint32_t qdf_net_buf_track_max_used;
2096 static uint32_t qdf_net_buf_track_max_free;
2097 static uint32_t qdf_net_buf_track_max_allocated;
2098 
2099 /**
2100  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2101  *
2102  * tracks the max number of network buffers that the wlan driver was tracking
2103  * at any one time.
2104  *
2105  * Return: none
2106  */
2107 static inline void update_max_used(void)
2108 {
2109 	int sum;
2110 
2111 	if (qdf_net_buf_track_max_used <
2112 	    qdf_net_buf_track_used_list_count)
2113 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2114 	sum = qdf_net_buf_track_free_list_count +
2115 		qdf_net_buf_track_used_list_count;
2116 	if (qdf_net_buf_track_max_allocated < sum)
2117 		qdf_net_buf_track_max_allocated = sum;
2118 }
2119 
2120 /**
2121  * update_max_free() - update qdf_net_buf_track_free_list_count
2122  *
2123  * tracks the max number tracking buffers kept in the freelist.
2124  *
2125  * Return: none
2126  */
2127 static inline void update_max_free(void)
2128 {
2129 	if (qdf_net_buf_track_max_free <
2130 	    qdf_net_buf_track_free_list_count)
2131 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2132 }
2133 
2134 /**
2135  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2136  *
2137  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2138  * This function also ads fexibility to adjust the allocation and freelist
2139  * scheems.
2140  *
2141  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2142  */
2143 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2144 {
2145 	int flags = GFP_KERNEL;
2146 	unsigned long irq_flag;
2147 	QDF_NBUF_TRACK *new_node = NULL;
2148 
2149 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2150 	qdf_net_buf_track_used_list_count++;
2151 	if (qdf_net_buf_track_free_list) {
2152 		new_node = qdf_net_buf_track_free_list;
2153 		qdf_net_buf_track_free_list =
2154 			qdf_net_buf_track_free_list->p_next;
2155 		qdf_net_buf_track_free_list_count--;
2156 	}
2157 	update_max_used();
2158 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2159 
2160 	if (new_node)
2161 		return new_node;
2162 
2163 	if (in_interrupt() || irqs_disabled() || in_atomic())
2164 		flags = GFP_ATOMIC;
2165 
2166 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2167 }
2168 
2169 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2170 #define FREEQ_POOLSIZE 2048
2171 
2172 /**
2173  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2174  *
2175  * Matches calls to qdf_nbuf_track_alloc.
2176  * Either frees the tracking cookie to kernel or an internal
2177  * freelist based on the size of the freelist.
2178  *
2179  * Return: none
2180  */
2181 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2182 {
2183 	unsigned long irq_flag;
2184 
2185 	if (!node)
2186 		return;
2187 
2188 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2189 	 * only shrink the freelist if it is bigger than twice the number of
2190 	 * nbufs in use. If the driver is stalling in a consistent bursty
2191 	 * fasion, this will keep 3/4 of thee allocations from the free list
2192 	 * while also allowing the system to recover memory as less frantic
2193 	 * traffic occurs.
2194 	 */
2195 
2196 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2197 
2198 	qdf_net_buf_track_used_list_count--;
2199 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2200 	   (qdf_net_buf_track_free_list_count >
2201 	    qdf_net_buf_track_used_list_count << 1)) {
2202 		kmem_cache_free(nbuf_tracking_cache, node);
2203 	} else {
2204 		node->p_next = qdf_net_buf_track_free_list;
2205 		qdf_net_buf_track_free_list = node;
2206 		qdf_net_buf_track_free_list_count++;
2207 	}
2208 	update_max_free();
2209 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2210 }
2211 
2212 /**
2213  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2214  *
2215  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2216  * the freelist first makes it performant for the first iperf udp burst
2217  * as well as steady state.
2218  *
2219  * Return: None
2220  */
2221 static void qdf_nbuf_track_prefill(void)
2222 {
2223 	int i;
2224 	QDF_NBUF_TRACK *node, *head;
2225 
2226 	/* prepopulate the freelist */
2227 	head = NULL;
2228 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2229 		node = qdf_nbuf_track_alloc();
2230 		if (!node)
2231 			continue;
2232 		node->p_next = head;
2233 		head = node;
2234 	}
2235 	while (head) {
2236 		node = head->p_next;
2237 		qdf_nbuf_track_free(head);
2238 		head = node;
2239 	}
2240 
2241 	/* prefilled buffers should not count as used */
2242 	qdf_net_buf_track_max_used = 0;
2243 }
2244 
2245 /**
2246  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2247  *
2248  * This initializes the memory manager for the nbuf tracking cookies.  Because
2249  * these cookies are all the same size and only used in this feature, we can
2250  * use a kmem_cache to provide tracking as well as to speed up allocations.
2251  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2252  * features) a freelist is prepopulated here.
2253  *
2254  * Return: None
2255  */
2256 static void qdf_nbuf_track_memory_manager_create(void)
2257 {
2258 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2259 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2260 						sizeof(QDF_NBUF_TRACK),
2261 						0, 0, NULL);
2262 
2263 	qdf_nbuf_track_prefill();
2264 }
2265 
2266 /**
2267  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2268  *
2269  * Empty the freelist and print out usage statistics when it is no longer
2270  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2271  * any nbuf tracking cookies were leaked.
2272  *
2273  * Return: None
2274  */
2275 static void qdf_nbuf_track_memory_manager_destroy(void)
2276 {
2277 	QDF_NBUF_TRACK *node, *tmp;
2278 	unsigned long irq_flag;
2279 
2280 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2281 	node = qdf_net_buf_track_free_list;
2282 
2283 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2284 		qdf_print("%s: unexpectedly large max_used count %d",
2285 			  __func__, qdf_net_buf_track_max_used);
2286 
2287 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2288 		qdf_print("%s: %d unused trackers were allocated",
2289 			  __func__,
2290 			  qdf_net_buf_track_max_allocated -
2291 			  qdf_net_buf_track_max_used);
2292 
2293 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2294 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2295 		qdf_print("%s: check freelist shrinking functionality",
2296 			  __func__);
2297 
2298 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2299 		  "%s: %d residual freelist size",
2300 		  __func__, qdf_net_buf_track_free_list_count);
2301 
2302 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2303 		  "%s: %d max freelist size observed",
2304 		  __func__, qdf_net_buf_track_max_free);
2305 
2306 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2307 		  "%s: %d max buffers used observed",
2308 		  __func__, qdf_net_buf_track_max_used);
2309 
2310 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2311 		  "%s: %d max buffers allocated observed",
2312 		  __func__, qdf_net_buf_track_max_allocated);
2313 
2314 	while (node) {
2315 		tmp = node;
2316 		node = node->p_next;
2317 		kmem_cache_free(nbuf_tracking_cache, tmp);
2318 		qdf_net_buf_track_free_list_count--;
2319 	}
2320 
2321 	if (qdf_net_buf_track_free_list_count != 0)
2322 		qdf_info("%d unfreed tracking memory lost in freelist",
2323 			 qdf_net_buf_track_free_list_count);
2324 
2325 	if (qdf_net_buf_track_used_list_count != 0)
2326 		qdf_info("%d unfreed tracking memory still in use",
2327 			 qdf_net_buf_track_used_list_count);
2328 
2329 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2330 	kmem_cache_destroy(nbuf_tracking_cache);
2331 	qdf_net_buf_track_free_list = NULL;
2332 }
2333 
2334 /**
2335  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2336  *
2337  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2338  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2339  * WLAN driver module whose allocated SKB is freed by network stack are
2340  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2341  * reported as memory leak.
2342  *
2343  * Return: none
2344  */
2345 void qdf_net_buf_debug_init(void)
2346 {
2347 	uint32_t i;
2348 
2349 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2350 
2351 	if (is_initial_mem_debug_disabled)
2352 		return;
2353 
2354 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2355 
2356 	qdf_nbuf_map_tracking_init();
2357 	qdf_nbuf_track_memory_manager_create();
2358 
2359 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2360 		gp_qdf_net_buf_track_tbl[i] = NULL;
2361 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2362 	}
2363 }
2364 qdf_export_symbol(qdf_net_buf_debug_init);
2365 
2366 /**
2367  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2368  *
2369  * Exit network buffer tracking debug functionality and log SKB memory leaks
2370  * As part of exiting the functionality, free the leaked memory and
2371  * cleanup the tracking buffers.
2372  *
2373  * Return: none
2374  */
2375 void qdf_net_buf_debug_exit(void)
2376 {
2377 	uint32_t i;
2378 	uint32_t count = 0;
2379 	unsigned long irq_flag;
2380 	QDF_NBUF_TRACK *p_node;
2381 	QDF_NBUF_TRACK *p_prev;
2382 
2383 	if (is_initial_mem_debug_disabled)
2384 		return;
2385 
2386 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2387 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2388 		p_node = gp_qdf_net_buf_track_tbl[i];
2389 		while (p_node) {
2390 			p_prev = p_node;
2391 			p_node = p_node->p_next;
2392 			count++;
2393 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2394 				 p_prev->func_name, p_prev->line_num,
2395 				 p_prev->size, p_prev->net_buf);
2396 			qdf_nbuf_track_free(p_prev);
2397 		}
2398 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2399 	}
2400 
2401 	qdf_nbuf_track_memory_manager_destroy();
2402 	qdf_nbuf_map_tracking_deinit();
2403 
2404 #ifdef CONFIG_HALT_KMEMLEAK
2405 	if (count) {
2406 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2407 		QDF_BUG(0);
2408 	}
2409 #endif
2410 }
2411 qdf_export_symbol(qdf_net_buf_debug_exit);
2412 
2413 /**
2414  * qdf_net_buf_debug_hash() - hash network buffer pointer
2415  *
2416  * Return: hash value
2417  */
2418 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2419 {
2420 	uint32_t i;
2421 
2422 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2423 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2424 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2425 
2426 	return i;
2427 }
2428 
2429 /**
2430  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2431  *
2432  * Return: If skb is found in hash table then return pointer to network buffer
2433  *	else return %NULL
2434  */
2435 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2436 {
2437 	uint32_t i;
2438 	QDF_NBUF_TRACK *p_node;
2439 
2440 	i = qdf_net_buf_debug_hash(net_buf);
2441 	p_node = gp_qdf_net_buf_track_tbl[i];
2442 
2443 	while (p_node) {
2444 		if (p_node->net_buf == net_buf)
2445 			return p_node;
2446 		p_node = p_node->p_next;
2447 	}
2448 
2449 	return NULL;
2450 }
2451 
2452 /**
2453  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2454  *
2455  * Return: none
2456  */
2457 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2458 				const char *func_name, uint32_t line_num)
2459 {
2460 	uint32_t i;
2461 	unsigned long irq_flag;
2462 	QDF_NBUF_TRACK *p_node;
2463 	QDF_NBUF_TRACK *new_node;
2464 
2465 	if (is_initial_mem_debug_disabled)
2466 		return;
2467 
2468 	new_node = qdf_nbuf_track_alloc();
2469 
2470 	i = qdf_net_buf_debug_hash(net_buf);
2471 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2472 
2473 	p_node = qdf_net_buf_debug_look_up(net_buf);
2474 
2475 	if (p_node) {
2476 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2477 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2478 			  net_buf, func_name, line_num);
2479 		qdf_nbuf_track_free(new_node);
2480 	} else {
2481 		p_node = new_node;
2482 		if (p_node) {
2483 			p_node->net_buf = net_buf;
2484 			qdf_str_lcopy(p_node->func_name, func_name,
2485 				      QDF_MEM_FUNC_NAME_SIZE);
2486 			p_node->line_num = line_num;
2487 			p_node->size = size;
2488 			qdf_mem_skb_inc(size);
2489 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2490 			gp_qdf_net_buf_track_tbl[i] = p_node;
2491 		} else
2492 			qdf_print(
2493 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2494 				  func_name, line_num, size);
2495 	}
2496 
2497 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2498 }
2499 qdf_export_symbol(qdf_net_buf_debug_add_node);
2500 
2501 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2502 				   uint32_t line_num)
2503 {
2504 	uint32_t i;
2505 	unsigned long irq_flag;
2506 	QDF_NBUF_TRACK *p_node;
2507 
2508 	if (is_initial_mem_debug_disabled)
2509 		return;
2510 
2511 	i = qdf_net_buf_debug_hash(net_buf);
2512 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2513 
2514 	p_node = qdf_net_buf_debug_look_up(net_buf);
2515 
2516 	if (p_node) {
2517 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2518 			      QDF_MEM_FUNC_NAME_SIZE);
2519 		p_node->line_num = line_num;
2520 	}
2521 
2522 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2523 }
2524 
2525 qdf_export_symbol(qdf_net_buf_debug_update_node);
2526 
2527 /**
2528  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2529  *
2530  * Return: none
2531  */
2532 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2533 {
2534 	uint32_t i;
2535 	QDF_NBUF_TRACK *p_head;
2536 	QDF_NBUF_TRACK *p_node = NULL;
2537 	unsigned long irq_flag;
2538 	QDF_NBUF_TRACK *p_prev;
2539 
2540 	if (is_initial_mem_debug_disabled)
2541 		return;
2542 
2543 	i = qdf_net_buf_debug_hash(net_buf);
2544 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2545 
2546 	p_head = gp_qdf_net_buf_track_tbl[i];
2547 
2548 	/* Unallocated SKB */
2549 	if (!p_head)
2550 		goto done;
2551 
2552 	p_node = p_head;
2553 	/* Found at head of the table */
2554 	if (p_head->net_buf == net_buf) {
2555 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2556 		goto done;
2557 	}
2558 
2559 	/* Search in collision list */
2560 	while (p_node) {
2561 		p_prev = p_node;
2562 		p_node = p_node->p_next;
2563 		if ((p_node) && (p_node->net_buf == net_buf)) {
2564 			p_prev->p_next = p_node->p_next;
2565 			break;
2566 		}
2567 	}
2568 
2569 done:
2570 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2571 
2572 	if (p_node) {
2573 		qdf_mem_skb_dec(p_node->size);
2574 		qdf_nbuf_track_free(p_node);
2575 	} else {
2576 		qdf_print("Unallocated buffer ! Double free of net_buf %pK ?",
2577 			  net_buf);
2578 		QDF_BUG(0);
2579 	}
2580 }
2581 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2582 
2583 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2584 				   const char *func_name, uint32_t line_num)
2585 {
2586 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2587 
2588 	if (is_initial_mem_debug_disabled)
2589 		return;
2590 
2591 	while (ext_list) {
2592 		/*
2593 		 * Take care to add if it is Jumbo packet connected using
2594 		 * frag_list
2595 		 */
2596 		qdf_nbuf_t next;
2597 
2598 		next = qdf_nbuf_queue_next(ext_list);
2599 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2600 		ext_list = next;
2601 	}
2602 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2603 }
2604 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2605 
2606 /**
2607  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2608  * @net_buf: Network buf holding head segment (single)
2609  *
2610  * WLAN driver module whose allocated SKB is freed by network stack are
2611  * suppose to call this API before returning SKB to network stack such
2612  * that the SKB is not reported as memory leak.
2613  *
2614  * Return: none
2615  */
2616 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2617 {
2618 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2619 
2620 	if (is_initial_mem_debug_disabled)
2621 		return;
2622 
2623 	while (ext_list) {
2624 		/*
2625 		 * Take care to free if it is Jumbo packet connected using
2626 		 * frag_list
2627 		 */
2628 		qdf_nbuf_t next;
2629 
2630 		next = qdf_nbuf_queue_next(ext_list);
2631 
2632 		if (qdf_nbuf_get_users(ext_list) > 1) {
2633 			ext_list = next;
2634 			continue;
2635 		}
2636 
2637 		qdf_net_buf_debug_delete_node(ext_list);
2638 		ext_list = next;
2639 	}
2640 
2641 	if (qdf_nbuf_get_users(net_buf) > 1)
2642 		return;
2643 
2644 	qdf_net_buf_debug_delete_node(net_buf);
2645 }
2646 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2647 
2648 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2649 				int reserve, int align, int prio,
2650 				const char *func, uint32_t line)
2651 {
2652 	qdf_nbuf_t nbuf;
2653 
2654 	if (is_initial_mem_debug_disabled)
2655 		return __qdf_nbuf_alloc(osdev, size,
2656 					reserve, align,
2657 					prio, func, line);
2658 
2659 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2660 
2661 	/* Store SKB in internal QDF tracking table */
2662 	if (qdf_likely(nbuf)) {
2663 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2664 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2665 	} else {
2666 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2667 	}
2668 
2669 	return nbuf;
2670 }
2671 qdf_export_symbol(qdf_nbuf_alloc_debug);
2672 
2673 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2674 {
2675 	qdf_nbuf_t ext_list;
2676 
2677 	if (qdf_unlikely(!nbuf))
2678 		return;
2679 
2680 	if (is_initial_mem_debug_disabled)
2681 		goto free_buf;
2682 
2683 	if (qdf_nbuf_get_users(nbuf) > 1)
2684 		goto free_buf;
2685 
2686 	/* Remove SKB from internal QDF tracking table */
2687 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2688 	qdf_net_buf_debug_delete_node(nbuf);
2689 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2690 
2691 	/* Take care to delete the debug entries for frag_list */
2692 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2693 	while (ext_list) {
2694 		if (qdf_nbuf_get_users(ext_list) == 1) {
2695 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2696 			qdf_net_buf_debug_delete_node(ext_list);
2697 		}
2698 
2699 		ext_list = qdf_nbuf_queue_next(ext_list);
2700 	}
2701 
2702 free_buf:
2703 	__qdf_nbuf_free(nbuf);
2704 }
2705 qdf_export_symbol(qdf_nbuf_free_debug);
2706 
2707 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2708 {
2709 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2710 
2711 	if (is_initial_mem_debug_disabled)
2712 		return cloned_buf;
2713 
2714 	if (qdf_unlikely(!cloned_buf))
2715 		return NULL;
2716 
2717 	/* Store SKB in internal QDF tracking table */
2718 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2719 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2720 
2721 	return cloned_buf;
2722 }
2723 qdf_export_symbol(qdf_nbuf_clone_debug);
2724 
2725 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2726 {
2727 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2728 
2729 	if (is_initial_mem_debug_disabled)
2730 		return copied_buf;
2731 
2732 	if (qdf_unlikely(!copied_buf))
2733 		return NULL;
2734 
2735 	/* Store SKB in internal QDF tracking table */
2736 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2737 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2738 
2739 	return copied_buf;
2740 }
2741 qdf_export_symbol(qdf_nbuf_copy_debug);
2742 
2743 qdf_nbuf_t
2744 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
2745 			   const char *func, uint32_t line)
2746 {
2747 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
2748 
2749 	if (qdf_unlikely(!copied_buf))
2750 		return NULL;
2751 
2752 	if (is_initial_mem_debug_disabled)
2753 		return copied_buf;
2754 
2755 	/* Store SKB in internal QDF tracking table */
2756 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2757 	qdf_nbuf_history_add(copied_buf, func, line,
2758 			     QDF_NBUF_ALLOC_COPY_EXPAND);
2759 
2760 	return copied_buf;
2761 }
2762 
2763 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
2764 
2765 #endif /* NBUF_MEMORY_DEBUG */
2766 
2767 #if defined(FEATURE_TSO)
2768 
2769 /**
2770  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2771  *
2772  * @ethproto: ethernet type of the msdu
2773  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2774  * @l2_len: L2 length for the msdu
2775  * @eit_hdr: pointer to EIT header
2776  * @eit_hdr_len: EIT header length for the msdu
2777  * @eit_hdr_dma_map_addr: dma addr for EIT header
2778  * @tcphdr: pointer to tcp header
2779  * @ipv4_csum_en: ipv4 checksum enable
2780  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2781  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2782  * @ip_id: IP id
2783  * @tcp_seq_num: TCP sequence number
2784  *
2785  * This structure holds the TSO common info that is common
2786  * across all the TCP segments of the jumbo packet.
2787  */
2788 struct qdf_tso_cmn_seg_info_t {
2789 	uint16_t ethproto;
2790 	uint16_t ip_tcp_hdr_len;
2791 	uint16_t l2_len;
2792 	uint8_t *eit_hdr;
2793 	uint32_t eit_hdr_len;
2794 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2795 	struct tcphdr *tcphdr;
2796 	uint16_t ipv4_csum_en;
2797 	uint16_t tcp_ipv4_csum_en;
2798 	uint16_t tcp_ipv6_csum_en;
2799 	uint16_t ip_id;
2800 	uint32_t tcp_seq_num;
2801 };
2802 
2803 /**
2804  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2805  * information
2806  * @osdev: qdf device handle
2807  * @skb: skb buffer
2808  * @tso_info: Parameters common to all segements
2809  *
2810  * Get the TSO information that is common across all the TCP
2811  * segments of the jumbo packet
2812  *
2813  * Return: 0 - success 1 - failure
2814  */
2815 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
2816 			struct sk_buff *skb,
2817 			struct qdf_tso_cmn_seg_info_t *tso_info)
2818 {
2819 	/* Get ethernet type and ethernet header length */
2820 	tso_info->ethproto = vlan_get_protocol(skb);
2821 
2822 	/* Determine whether this is an IPv4 or IPv6 packet */
2823 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
2824 		/* for IPv4, get the IP ID and enable TCP and IP csum */
2825 		struct iphdr *ipv4_hdr = ip_hdr(skb);
2826 
2827 		tso_info->ip_id = ntohs(ipv4_hdr->id);
2828 		tso_info->ipv4_csum_en = 1;
2829 		tso_info->tcp_ipv4_csum_en = 1;
2830 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
2831 			qdf_err("TSO IPV4 proto 0x%x not TCP",
2832 				ipv4_hdr->protocol);
2833 			return 1;
2834 		}
2835 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
2836 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
2837 		tso_info->tcp_ipv6_csum_en = 1;
2838 	} else {
2839 		qdf_err("TSO: ethertype 0x%x is not supported!",
2840 			tso_info->ethproto);
2841 		return 1;
2842 	}
2843 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
2844 	tso_info->tcphdr = tcp_hdr(skb);
2845 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
2846 	/* get pointer to the ethernet + IP + TCP header and their length */
2847 	tso_info->eit_hdr = skb->data;
2848 	tso_info->eit_hdr_len = (skb_transport_header(skb)
2849 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2850 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
2851 							tso_info->eit_hdr,
2852 							tso_info->eit_hdr_len,
2853 							DMA_TO_DEVICE);
2854 	if (unlikely(dma_mapping_error(osdev->dev,
2855 				       tso_info->eit_hdr_dma_map_addr))) {
2856 		qdf_err("DMA mapping error!");
2857 		qdf_assert(0);
2858 		return 1;
2859 	}
2860 
2861 	if (tso_info->ethproto == htons(ETH_P_IP)) {
2862 		/* inlcude IPv4 header length for IPV4 (total length) */
2863 		tso_info->ip_tcp_hdr_len =
2864 			tso_info->eit_hdr_len - tso_info->l2_len;
2865 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
2866 		/* exclude IPv6 header length for IPv6 (payload length) */
2867 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
2868 	}
2869 	/*
2870 	 * The length of the payload (application layer data) is added to
2871 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
2872 	 * descriptor.
2873 	 */
2874 
2875 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
2876 		tso_info->tcp_seq_num,
2877 		tso_info->eit_hdr_len,
2878 		tso_info->l2_len,
2879 		skb->len);
2880 	return 0;
2881 }
2882 
2883 
2884 /**
2885  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
2886  *
2887  * @curr_seg: Segment whose contents are initialized
2888  * @tso_cmn_info: Parameters common to all segements
2889  *
2890  * Return: None
2891  */
2892 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
2893 				struct qdf_tso_seg_elem_t *curr_seg,
2894 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
2895 {
2896 	/* Initialize the flags to 0 */
2897 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
2898 
2899 	/*
2900 	 * The following fields remain the same across all segments of
2901 	 * a jumbo packet
2902 	 */
2903 	curr_seg->seg.tso_flags.tso_enable = 1;
2904 	curr_seg->seg.tso_flags.ipv4_checksum_en =
2905 		tso_cmn_info->ipv4_csum_en;
2906 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
2907 		tso_cmn_info->tcp_ipv6_csum_en;
2908 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
2909 		tso_cmn_info->tcp_ipv4_csum_en;
2910 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
2911 
2912 	/* The following fields change for the segments */
2913 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
2914 	tso_cmn_info->ip_id++;
2915 
2916 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
2917 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
2918 	curr_seg->seg.tso_flags.psh = tso_cmn_info->tcphdr->psh;
2919 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
2920 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
2921 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
2922 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
2923 
2924 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
2925 
2926 	/*
2927 	 * First fragment for each segment always contains the ethernet,
2928 	 * IP and TCP header
2929 	 */
2930 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
2931 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
2932 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
2933 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
2934 
2935 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
2936 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
2937 		   tso_cmn_info->eit_hdr_len,
2938 		   curr_seg->seg.tso_flags.tcp_seq_num,
2939 		   curr_seg->seg.total_len);
2940 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
2941 }
2942 
2943 /**
2944  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
2945  * into segments
2946  * @nbuf: network buffer to be segmented
2947  * @tso_info: This is the output. The information about the
2948  *           TSO segments will be populated within this.
2949  *
2950  * This function fragments a TCP jumbo packet into smaller
2951  * segments to be transmitted by the driver. It chains the TSO
2952  * segments created into a list.
2953  *
2954  * Return: number of TSO segments
2955  */
2956 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
2957 		struct qdf_tso_info_t *tso_info)
2958 {
2959 	/* common across all segments */
2960 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
2961 	/* segment specific */
2962 	void *tso_frag_vaddr;
2963 	qdf_dma_addr_t tso_frag_paddr = 0;
2964 	uint32_t num_seg = 0;
2965 	struct qdf_tso_seg_elem_t *curr_seg;
2966 	struct qdf_tso_num_seg_elem_t *total_num_seg;
2967 	skb_frag_t *frag = NULL;
2968 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
2969 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
2970 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
2971 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
2972 	int j = 0; /* skb fragment index */
2973 
2974 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
2975 	total_num_seg = tso_info->tso_num_seg_list;
2976 	curr_seg = tso_info->tso_seg_list;
2977 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
2978 
2979 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
2980 						skb, &tso_cmn_info))) {
2981 		qdf_warn("TSO: error getting common segment info");
2982 		return 0;
2983 	}
2984 
2985 	/* length of the first chunk of data in the skb */
2986 	skb_frag_len = skb_headlen(skb);
2987 
2988 	/* the 0th tso segment's 0th fragment always contains the EIT header */
2989 	/* update the remaining skb fragment length and TSO segment length */
2990 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
2991 	skb_proc -= tso_cmn_info.eit_hdr_len;
2992 
2993 	/* get the address to the next tso fragment */
2994 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
2995 	/* get the length of the next tso fragment */
2996 	tso_frag_len = min(skb_frag_len, tso_seg_size);
2997 
2998 	if (tso_frag_len != 0) {
2999 		tso_frag_paddr = dma_map_single(osdev->dev,
3000 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
3001 	}
3002 
3003 	if (unlikely(dma_mapping_error(osdev->dev,
3004 					tso_frag_paddr))) {
3005 		qdf_err("DMA mapping error!");
3006 		qdf_assert(0);
3007 		return 0;
3008 	}
3009 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3010 		__LINE__, skb_frag_len, tso_frag_len);
3011 	num_seg = tso_info->num_segs;
3012 	tso_info->num_segs = 0;
3013 	tso_info->is_tso = 1;
3014 
3015 	while (num_seg && curr_seg) {
3016 		int i = 1; /* tso fragment index */
3017 		uint8_t more_tso_frags = 1;
3018 
3019 		curr_seg->seg.num_frags = 0;
3020 		tso_info->num_segs++;
3021 		total_num_seg->num_seg.tso_cmn_num_seg++;
3022 
3023 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3024 						 &tso_cmn_info);
3025 
3026 		if (unlikely(skb_proc == 0))
3027 			return tso_info->num_segs;
3028 
3029 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3030 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3031 		/* frag len is added to ip_len in while loop below*/
3032 
3033 		curr_seg->seg.num_frags++;
3034 
3035 		while (more_tso_frags) {
3036 			if (tso_frag_len != 0) {
3037 				curr_seg->seg.tso_frags[i].vaddr =
3038 					tso_frag_vaddr;
3039 				curr_seg->seg.tso_frags[i].length =
3040 					tso_frag_len;
3041 				curr_seg->seg.total_len += tso_frag_len;
3042 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3043 				curr_seg->seg.num_frags++;
3044 				skb_proc = skb_proc - tso_frag_len;
3045 
3046 				/* increment the TCP sequence number */
3047 
3048 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3049 				curr_seg->seg.tso_frags[i].paddr =
3050 					tso_frag_paddr;
3051 			}
3052 
3053 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3054 					__func__, __LINE__,
3055 					i,
3056 					tso_frag_len,
3057 					curr_seg->seg.total_len,
3058 					curr_seg->seg.tso_frags[i].vaddr);
3059 
3060 			/* if there is no more data left in the skb */
3061 			if (!skb_proc)
3062 				return tso_info->num_segs;
3063 
3064 			/* get the next payload fragment information */
3065 			/* check if there are more fragments in this segment */
3066 			if (tso_frag_len < tso_seg_size) {
3067 				more_tso_frags = 1;
3068 				if (tso_frag_len != 0) {
3069 					tso_seg_size = tso_seg_size -
3070 						tso_frag_len;
3071 					i++;
3072 					if (curr_seg->seg.num_frags ==
3073 								FRAG_NUM_MAX) {
3074 						more_tso_frags = 0;
3075 						/*
3076 						 * reset i and the tso
3077 						 * payload size
3078 						 */
3079 						i = 1;
3080 						tso_seg_size =
3081 							skb_shinfo(skb)->
3082 								gso_size;
3083 					}
3084 				}
3085 			} else {
3086 				more_tso_frags = 0;
3087 				/* reset i and the tso payload size */
3088 				i = 1;
3089 				tso_seg_size = skb_shinfo(skb)->gso_size;
3090 			}
3091 
3092 			/* if the next fragment is contiguous */
3093 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3094 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3095 				skb_frag_len = skb_frag_len - tso_frag_len;
3096 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3097 
3098 			} else { /* the next fragment is not contiguous */
3099 				if (skb_shinfo(skb)->nr_frags == 0) {
3100 					qdf_info("TSO: nr_frags == 0!");
3101 					qdf_assert(0);
3102 					return 0;
3103 				}
3104 				if (j >= skb_shinfo(skb)->nr_frags) {
3105 					qdf_info("TSO: nr_frags %d j %d",
3106 						 skb_shinfo(skb)->nr_frags, j);
3107 					qdf_assert(0);
3108 					return 0;
3109 				}
3110 				frag = &skb_shinfo(skb)->frags[j];
3111 				skb_frag_len = skb_frag_size(frag);
3112 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3113 				tso_frag_vaddr = skb_frag_address_safe(frag);
3114 				j++;
3115 			}
3116 
3117 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3118 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3119 				tso_seg_size);
3120 
3121 			if (!(tso_frag_vaddr)) {
3122 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3123 						__func__);
3124 				return 0;
3125 			}
3126 
3127 			tso_frag_paddr =
3128 					 dma_map_single(osdev->dev,
3129 						 tso_frag_vaddr,
3130 						 tso_frag_len,
3131 						 DMA_TO_DEVICE);
3132 			if (unlikely(dma_mapping_error(osdev->dev,
3133 							tso_frag_paddr))) {
3134 				qdf_err("DMA mapping error!");
3135 				qdf_assert(0);
3136 				return 0;
3137 			}
3138 		}
3139 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3140 				curr_seg->seg.tso_flags.tcp_seq_num);
3141 		num_seg--;
3142 		/* if TCP FIN flag was set, set it in the last segment */
3143 		if (!num_seg)
3144 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3145 
3146 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3147 		curr_seg = curr_seg->next;
3148 	}
3149 	return tso_info->num_segs;
3150 }
3151 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3152 
3153 /**
3154  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3155  *
3156  * @osdev: qdf device handle
3157  * @tso_seg: TSO segment element to be unmapped
3158  * @is_last_seg: whether this is last tso seg or not
3159  *
3160  * Return: none
3161  */
3162 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3163 			  struct qdf_tso_seg_elem_t *tso_seg,
3164 			  bool is_last_seg)
3165 {
3166 	uint32_t num_frags = 0;
3167 
3168 	if (tso_seg->seg.num_frags > 0)
3169 		num_frags = tso_seg->seg.num_frags - 1;
3170 
3171 	/*Num of frags in a tso seg cannot be less than 2 */
3172 	if (num_frags < 1) {
3173 		/*
3174 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3175 		 * this may happen when qdf_nbuf_get_tso_info failed,
3176 		 * do dma unmap for the 0th frag in this seg.
3177 		 */
3178 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3179 			goto last_seg_free_first_frag;
3180 
3181 		qdf_assert(0);
3182 		qdf_err("ERROR: num of frags in a tso segment is %d",
3183 			(num_frags + 1));
3184 		return;
3185 	}
3186 
3187 	while (num_frags) {
3188 		/*Do dma unmap the tso seg except the 0th frag */
3189 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3190 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3191 				num_frags);
3192 			qdf_assert(0);
3193 			return;
3194 		}
3195 		dma_unmap_single(osdev->dev,
3196 				 tso_seg->seg.tso_frags[num_frags].paddr,
3197 				 tso_seg->seg.tso_frags[num_frags].length,
3198 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3199 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3200 		num_frags--;
3201 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3202 	}
3203 
3204 last_seg_free_first_frag:
3205 	if (is_last_seg) {
3206 		/*Do dma unmap for the tso seg 0th frag */
3207 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3208 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3209 			qdf_assert(0);
3210 			return;
3211 		}
3212 		dma_unmap_single(osdev->dev,
3213 				 tso_seg->seg.tso_frags[0].paddr,
3214 				 tso_seg->seg.tso_frags[0].length,
3215 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3216 		tso_seg->seg.tso_frags[0].paddr = 0;
3217 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3218 	}
3219 }
3220 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3221 
3222 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3223 {
3224 	size_t packet_len;
3225 
3226 	packet_len = skb->len -
3227 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3228 		 tcp_hdrlen(skb));
3229 
3230 	return packet_len;
3231 }
3232 
3233 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3234 
3235 /**
3236  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3237  * into segments
3238  * @nbuf:   network buffer to be segmented
3239  * @tso_info:  This is the output. The information about the
3240  *      TSO segments will be populated within this.
3241  *
3242  * This function fragments a TCP jumbo packet into smaller
3243  * segments to be transmitted by the driver. It chains the TSO
3244  * segments created into a list.
3245  *
3246  * Return: 0 - success, 1 - failure
3247  */
3248 #ifndef BUILD_X86
3249 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3250 {
3251 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3252 	uint32_t remainder, num_segs = 0;
3253 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3254 	uint8_t frags_per_tso = 0;
3255 	uint32_t skb_frag_len = 0;
3256 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3257 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3258 	skb_frag_t *frag = NULL;
3259 	int j = 0;
3260 	uint32_t temp_num_seg = 0;
3261 
3262 	/* length of the first chunk of data in the skb minus eit header*/
3263 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3264 
3265 	/* Calculate num of segs for skb's first chunk of data*/
3266 	remainder = skb_frag_len % tso_seg_size;
3267 	num_segs = skb_frag_len / tso_seg_size;
3268 	/**
3269 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3270 	 * In that case, one more tso seg is required to accommodate
3271 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3272 	 * then remaining data will be accomodated while doing the calculation
3273 	 * for nr_frags data. Hence, frags_per_tso++.
3274 	 */
3275 	if (remainder) {
3276 		if (!skb_nr_frags)
3277 			num_segs++;
3278 		else
3279 			frags_per_tso++;
3280 	}
3281 
3282 	while (skb_nr_frags) {
3283 		if (j >= skb_shinfo(skb)->nr_frags) {
3284 			qdf_info("TSO: nr_frags %d j %d",
3285 				 skb_shinfo(skb)->nr_frags, j);
3286 			qdf_assert(0);
3287 			return 0;
3288 		}
3289 		/**
3290 		 * Calculate the number of tso seg for nr_frags data:
3291 		 * Get the length of each frag in skb_frag_len, add to
3292 		 * remainder.Get the number of segments by dividing it to
3293 		 * tso_seg_size and calculate the new remainder.
3294 		 * Decrement the nr_frags value and keep
3295 		 * looping all the skb_fragments.
3296 		 */
3297 		frag = &skb_shinfo(skb)->frags[j];
3298 		skb_frag_len = skb_frag_size(frag);
3299 		temp_num_seg = num_segs;
3300 		remainder += skb_frag_len;
3301 		num_segs += remainder / tso_seg_size;
3302 		remainder = remainder % tso_seg_size;
3303 		skb_nr_frags--;
3304 		if (remainder) {
3305 			if (num_segs > temp_num_seg)
3306 				frags_per_tso = 0;
3307 			/**
3308 			 * increment the tso per frags whenever remainder is
3309 			 * positive. If frags_per_tso reaches the (max-1),
3310 			 * [First frags always have EIT header, therefore max-1]
3311 			 * increment the num_segs as no more data can be
3312 			 * accomodated in the curr tso seg. Reset the remainder
3313 			 * and frags per tso and keep looping.
3314 			 */
3315 			frags_per_tso++;
3316 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3317 				num_segs++;
3318 				frags_per_tso = 0;
3319 				remainder = 0;
3320 			}
3321 			/**
3322 			 * If this is the last skb frag and still remainder is
3323 			 * non-zero(frags_per_tso is not reached to the max-1)
3324 			 * then increment the num_segs to take care of the
3325 			 * remaining length.
3326 			 */
3327 			if (!skb_nr_frags && remainder) {
3328 				num_segs++;
3329 				frags_per_tso = 0;
3330 			}
3331 		} else {
3332 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3333 			frags_per_tso = 0;
3334 		}
3335 		j++;
3336 	}
3337 
3338 	return num_segs;
3339 }
3340 #elif !defined(QCA_WIFI_QCN9000)
3341 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3342 {
3343 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3344 	skb_frag_t *frag = NULL;
3345 
3346 	/*
3347 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3348 	 * region which cannot be accessed by Target
3349 	 */
3350 	if (virt_to_phys(skb->data) < 0x50000040) {
3351 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3352 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3353 				virt_to_phys(skb->data));
3354 		goto fail;
3355 
3356 	}
3357 
3358 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3359 		frag = &skb_shinfo(skb)->frags[i];
3360 
3361 		if (!frag)
3362 			goto fail;
3363 
3364 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3365 			goto fail;
3366 	}
3367 
3368 
3369 	gso_size = skb_shinfo(skb)->gso_size;
3370 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3371 			+ tcp_hdrlen(skb));
3372 	while (tmp_len) {
3373 		num_segs++;
3374 		if (tmp_len > gso_size)
3375 			tmp_len -= gso_size;
3376 		else
3377 			break;
3378 	}
3379 
3380 	return num_segs;
3381 
3382 	/*
3383 	 * Do not free this frame, just do socket level accounting
3384 	 * so that this is not reused.
3385 	 */
3386 fail:
3387 	if (skb->sk)
3388 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3389 
3390 	return 0;
3391 }
3392 #else
3393 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3394 {
3395 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3396 	skb_frag_t *frag = NULL;
3397 
3398 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3399 		frag = &skb_shinfo(skb)->frags[i];
3400 
3401 		if (!frag)
3402 			goto fail;
3403 	}
3404 
3405 	gso_size = skb_shinfo(skb)->gso_size;
3406 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3407 			+ tcp_hdrlen(skb));
3408 	while (tmp_len) {
3409 		num_segs++;
3410 		if (tmp_len > gso_size)
3411 			tmp_len -= gso_size;
3412 		else
3413 			break;
3414 	}
3415 
3416 	return num_segs;
3417 
3418 	/*
3419 	 * Do not free this frame, just do socket level accounting
3420 	 * so that this is not reused.
3421 	 */
3422 fail:
3423 	if (skb->sk)
3424 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3425 
3426 	return 0;
3427 }
3428 #endif
3429 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3430 
3431 #endif /* FEATURE_TSO */
3432 
3433 /**
3434  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3435  *
3436  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3437  *
3438  * Return: N/A
3439  */
3440 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3441 			  uint32_t *lo, uint32_t *hi)
3442 {
3443 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3444 		*lo = lower_32_bits(dmaaddr);
3445 		*hi = upper_32_bits(dmaaddr);
3446 	} else {
3447 		*lo = dmaaddr;
3448 		*hi = 0;
3449 	}
3450 }
3451 
3452 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3453 
3454 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3455 {
3456 	qdf_nbuf_users_inc(&skb->users);
3457 	return skb;
3458 }
3459 qdf_export_symbol(__qdf_nbuf_inc_users);
3460 
3461 int __qdf_nbuf_get_users(struct sk_buff *skb)
3462 {
3463 	return qdf_nbuf_users_read(&skb->users);
3464 }
3465 qdf_export_symbol(__qdf_nbuf_get_users);
3466 
3467 /**
3468  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3469  * @skb: sk_buff handle
3470  *
3471  * Return: none
3472  */
3473 
3474 void __qdf_nbuf_ref(struct sk_buff *skb)
3475 {
3476 	skb_get(skb);
3477 }
3478 qdf_export_symbol(__qdf_nbuf_ref);
3479 
3480 /**
3481  * __qdf_nbuf_shared() - Check whether the buffer is shared
3482  *  @skb: sk_buff buffer
3483  *
3484  *  Return: true if more than one person has a reference to this buffer.
3485  */
3486 int __qdf_nbuf_shared(struct sk_buff *skb)
3487 {
3488 	return skb_shared(skb);
3489 }
3490 qdf_export_symbol(__qdf_nbuf_shared);
3491 
3492 /**
3493  * __qdf_nbuf_dmamap_create() - create a DMA map.
3494  * @osdev: qdf device handle
3495  * @dmap: dma map handle
3496  *
3497  * This can later be used to map networking buffers. They :
3498  * - need space in adf_drv's software descriptor
3499  * - are typically created during adf_drv_create
3500  * - need to be created before any API(qdf_nbuf_map) that uses them
3501  *
3502  * Return: QDF STATUS
3503  */
3504 QDF_STATUS
3505 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3506 {
3507 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3508 	/*
3509 	 * driver can tell its SG capablity, it must be handled.
3510 	 * Bounce buffers if they are there
3511 	 */
3512 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3513 	if (!(*dmap))
3514 		error = QDF_STATUS_E_NOMEM;
3515 
3516 	return error;
3517 }
3518 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3519 /**
3520  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3521  * @osdev: qdf device handle
3522  * @dmap: dma map handle
3523  *
3524  * Return: none
3525  */
3526 void
3527 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3528 {
3529 	kfree(dmap);
3530 }
3531 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3532 
3533 /**
3534  * __qdf_nbuf_map_nbytes_single() - map nbytes
3535  * @osdev: os device
3536  * @buf: buffer
3537  * @dir: direction
3538  * @nbytes: number of bytes
3539  *
3540  * Return: QDF_STATUS
3541  */
3542 #ifdef A_SIMOS_DEVHOST
3543 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3544 		qdf_device_t osdev, struct sk_buff *buf,
3545 		 qdf_dma_dir_t dir, int nbytes)
3546 {
3547 	qdf_dma_addr_t paddr;
3548 
3549 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
3550 	return QDF_STATUS_SUCCESS;
3551 }
3552 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3553 #else
3554 QDF_STATUS __qdf_nbuf_map_nbytes_single(
3555 		qdf_device_t osdev, struct sk_buff *buf,
3556 		 qdf_dma_dir_t dir, int nbytes)
3557 {
3558 	qdf_dma_addr_t paddr;
3559 
3560 	/* assume that the OS only provides a single fragment */
3561 	QDF_NBUF_CB_PADDR(buf) = paddr =
3562 		dma_map_single(osdev->dev, buf->data,
3563 			nbytes, __qdf_dma_dir_to_os(dir));
3564 	return dma_mapping_error(osdev->dev, paddr) ?
3565 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3566 }
3567 qdf_export_symbol(__qdf_nbuf_map_nbytes_single);
3568 #endif
3569 /**
3570  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
3571  * @osdev: os device
3572  * @buf: buffer
3573  * @dir: direction
3574  * @nbytes: number of bytes
3575  *
3576  * Return: none
3577  */
3578 #if defined(A_SIMOS_DEVHOST)
3579 void
3580 __qdf_nbuf_unmap_nbytes_single(
3581 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3582 {
3583 }
3584 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3585 
3586 #else
3587 void
3588 __qdf_nbuf_unmap_nbytes_single(
3589 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
3590 {
3591 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3592 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3593 		return;
3594 	}
3595 	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3596 			nbytes, __qdf_dma_dir_to_os(dir));
3597 }
3598 qdf_export_symbol(__qdf_nbuf_unmap_nbytes_single);
3599 #endif
3600 /**
3601  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3602  * @osdev: os device
3603  * @skb: skb handle
3604  * @dir: dma direction
3605  * @nbytes: number of bytes to be mapped
3606  *
3607  * Return: QDF_STATUS
3608  */
3609 #ifdef QDF_OS_DEBUG
3610 QDF_STATUS
3611 __qdf_nbuf_map_nbytes(
3612 	qdf_device_t osdev,
3613 	struct sk_buff *skb,
3614 	qdf_dma_dir_t dir,
3615 	int nbytes)
3616 {
3617 	struct skb_shared_info  *sh = skb_shinfo(skb);
3618 
3619 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3620 
3621 	/*
3622 	 * Assume there's only a single fragment.
3623 	 * To support multiple fragments, it would be necessary to change
3624 	 * adf_nbuf_t to be a separate object that stores meta-info
3625 	 * (including the bus address for each fragment) and a pointer
3626 	 * to the underlying sk_buff.
3627 	 */
3628 	qdf_assert(sh->nr_frags == 0);
3629 
3630 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3631 }
3632 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3633 #else
3634 QDF_STATUS
3635 __qdf_nbuf_map_nbytes(
3636 	qdf_device_t osdev,
3637 	struct sk_buff *skb,
3638 	qdf_dma_dir_t dir,
3639 	int nbytes)
3640 {
3641 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3642 }
3643 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3644 #endif
3645 /**
3646  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3647  * @osdev: OS device
3648  * @skb: skb handle
3649  * @dir: direction
3650  * @nbytes: number of bytes
3651  *
3652  * Return: none
3653  */
3654 void
3655 __qdf_nbuf_unmap_nbytes(
3656 	qdf_device_t osdev,
3657 	struct sk_buff *skb,
3658 	qdf_dma_dir_t dir,
3659 	int nbytes)
3660 {
3661 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3662 
3663 	/*
3664 	 * Assume there's a single fragment.
3665 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3666 	 */
3667 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3668 }
3669 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3670 
3671 /**
3672  * __qdf_nbuf_dma_map_info() - return the dma map info
3673  * @bmap: dma map
3674  * @sg: dma map info
3675  *
3676  * Return: none
3677  */
3678 void
3679 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3680 {
3681 	qdf_assert(bmap->mapped);
3682 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3683 
3684 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3685 			sizeof(struct __qdf_segment));
3686 	sg->nsegs = bmap->nsegs;
3687 }
3688 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3689 /**
3690  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3691  *			specified by the index
3692  * @skb: sk buff
3693  * @sg: scatter/gather list of all the frags
3694  *
3695  * Return: none
3696  */
3697 #if defined(__QDF_SUPPORT_FRAG_MEM)
3698 void
3699 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3700 {
3701 	qdf_assert(skb);
3702 	sg->sg_segs[0].vaddr = skb->data;
3703 	sg->sg_segs[0].len   = skb->len;
3704 	sg->nsegs            = 1;
3705 
3706 	for (int i = 1; i <= sh->nr_frags; i++) {
3707 		skb_frag_t    *f        = &sh->frags[i - 1];
3708 
3709 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3710 			f->page_offset);
3711 		sg->sg_segs[i].len      = f->size;
3712 
3713 		qdf_assert(i < QDF_MAX_SGLIST);
3714 	}
3715 	sg->nsegs += i;
3716 
3717 }
3718 qdf_export_symbol(__qdf_nbuf_frag_info);
3719 #else
3720 #ifdef QDF_OS_DEBUG
3721 void
3722 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3723 {
3724 
3725 	struct skb_shared_info  *sh = skb_shinfo(skb);
3726 
3727 	qdf_assert(skb);
3728 	sg->sg_segs[0].vaddr = skb->data;
3729 	sg->sg_segs[0].len   = skb->len;
3730 	sg->nsegs            = 1;
3731 
3732 	qdf_assert(sh->nr_frags == 0);
3733 }
3734 qdf_export_symbol(__qdf_nbuf_frag_info);
3735 #else
3736 void
3737 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3738 {
3739 	sg->sg_segs[0].vaddr = skb->data;
3740 	sg->sg_segs[0].len   = skb->len;
3741 	sg->nsegs            = 1;
3742 }
3743 qdf_export_symbol(__qdf_nbuf_frag_info);
3744 #endif
3745 #endif
3746 /**
3747  * __qdf_nbuf_get_frag_size() - get frag size
3748  * @nbuf: sk buffer
3749  * @cur_frag: current frag
3750  *
3751  * Return: frag size
3752  */
3753 uint32_t
3754 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3755 {
3756 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3757 	const skb_frag_t *frag = sh->frags + cur_frag;
3758 
3759 	return skb_frag_size(frag);
3760 }
3761 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3762 
3763 /**
3764  * __qdf_nbuf_frag_map() - dma map frag
3765  * @osdev: os device
3766  * @nbuf: sk buff
3767  * @offset: offset
3768  * @dir: direction
3769  * @cur_frag: current fragment
3770  *
3771  * Return: QDF status
3772  */
3773 #ifdef A_SIMOS_DEVHOST
3774 QDF_STATUS __qdf_nbuf_frag_map(
3775 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3776 	int offset, qdf_dma_dir_t dir, int cur_frag)
3777 {
3778 	int32_t paddr, frag_len;
3779 
3780 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3781 	return QDF_STATUS_SUCCESS;
3782 }
3783 qdf_export_symbol(__qdf_nbuf_frag_map);
3784 #else
3785 QDF_STATUS __qdf_nbuf_frag_map(
3786 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3787 	int offset, qdf_dma_dir_t dir, int cur_frag)
3788 {
3789 	dma_addr_t paddr, frag_len;
3790 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3791 	const skb_frag_t *frag = sh->frags + cur_frag;
3792 
3793 	frag_len = skb_frag_size(frag);
3794 
3795 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3796 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3797 					__qdf_dma_dir_to_os(dir));
3798 	return dma_mapping_error(osdev->dev, paddr) ?
3799 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3800 }
3801 qdf_export_symbol(__qdf_nbuf_frag_map);
3802 #endif
3803 /**
3804  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3805  * @dmap: dma map
3806  * @cb: callback
3807  * @arg: argument
3808  *
3809  * Return: none
3810  */
3811 void
3812 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3813 {
3814 	return;
3815 }
3816 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3817 
3818 
3819 /**
3820  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3821  * @osdev: os device
3822  * @buf: sk buff
3823  * @dir: direction
3824  *
3825  * Return: none
3826  */
3827 #if defined(A_SIMOS_DEVHOST)
3828 static void __qdf_nbuf_sync_single_for_cpu(
3829 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3830 {
3831 	return;
3832 }
3833 #else
3834 static void __qdf_nbuf_sync_single_for_cpu(
3835 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3836 {
3837 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3838 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3839 		return;
3840 	}
3841 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3842 		skb_end_offset(buf) - skb_headroom(buf),
3843 		__qdf_dma_dir_to_os(dir));
3844 }
3845 #endif
3846 /**
3847  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3848  * @osdev: os device
3849  * @skb: sk buff
3850  * @dir: direction
3851  *
3852  * Return: none
3853  */
3854 void
3855 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3856 	struct sk_buff *skb, qdf_dma_dir_t dir)
3857 {
3858 	qdf_assert(
3859 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3860 
3861 	/*
3862 	 * Assume there's a single fragment.
3863 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3864 	 */
3865 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3866 }
3867 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
3868 
3869 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
3870 /**
3871  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
3872  * @rx_status: Pointer to rx_status.
3873  * @rtap_buf: Buf to which VHT info has to be updated.
3874  * @rtap_len: Current length of radiotap buffer
3875  *
3876  * Return: Length of radiotap after VHT flags updated.
3877  */
3878 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
3879 					struct mon_rx_status *rx_status,
3880 					int8_t *rtap_buf,
3881 					uint32_t rtap_len)
3882 {
3883 	uint16_t vht_flags = 0;
3884 
3885 	rtap_len = qdf_align(rtap_len, 2);
3886 
3887 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
3888 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
3889 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
3890 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
3891 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
3892 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
3893 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
3894 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
3895 	rtap_len += 2;
3896 
3897 	rtap_buf[rtap_len] |=
3898 		(rx_status->is_stbc ?
3899 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
3900 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
3901 		(rx_status->ldpc ?
3902 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
3903 		(rx_status->beamformed ?
3904 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
3905 	rtap_len += 1;
3906 	switch (rx_status->vht_flag_values2) {
3907 	case IEEE80211_RADIOTAP_VHT_BW_20:
3908 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
3909 		break;
3910 	case IEEE80211_RADIOTAP_VHT_BW_40:
3911 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
3912 		break;
3913 	case IEEE80211_RADIOTAP_VHT_BW_80:
3914 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
3915 		break;
3916 	case IEEE80211_RADIOTAP_VHT_BW_160:
3917 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
3918 		break;
3919 	}
3920 	rtap_len += 1;
3921 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
3922 	rtap_len += 1;
3923 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
3924 	rtap_len += 1;
3925 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
3926 	rtap_len += 1;
3927 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
3928 	rtap_len += 1;
3929 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
3930 	rtap_len += 1;
3931 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
3932 	rtap_len += 1;
3933 	put_unaligned_le16(rx_status->vht_flag_values6,
3934 			   &rtap_buf[rtap_len]);
3935 	rtap_len += 2;
3936 
3937 	return rtap_len;
3938 }
3939 
3940 /**
3941  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
3942  * @rx_status: Pointer to rx_status.
3943  * @rtap_buf: buffer to which radiotap has to be updated
3944  * @rtap_len: radiotap length
3945  *
3946  * API update high-efficiency (11ax) fields in the radiotap header
3947  *
3948  * Return: length of rtap_len updated.
3949  */
3950 static unsigned int
3951 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
3952 				     int8_t *rtap_buf, uint32_t rtap_len)
3953 {
3954 	/*
3955 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
3956 	 * Enable all "known" HE radiotap flags for now
3957 	 */
3958 	rtap_len = qdf_align(rtap_len, 2);
3959 
3960 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
3961 	rtap_len += 2;
3962 
3963 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
3964 	rtap_len += 2;
3965 
3966 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
3967 	rtap_len += 2;
3968 
3969 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
3970 	rtap_len += 2;
3971 
3972 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
3973 	rtap_len += 2;
3974 
3975 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
3976 	rtap_len += 2;
3977 	qdf_debug("he data %x %x %x %x %x %x",
3978 		  rx_status->he_data1,
3979 		  rx_status->he_data2, rx_status->he_data3,
3980 		  rx_status->he_data4, rx_status->he_data5,
3981 		  rx_status->he_data6);
3982 	return rtap_len;
3983 }
3984 
3985 
3986 /**
3987  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
3988  * @rx_status: Pointer to rx_status.
3989  * @rtap_buf: buffer to which radiotap has to be updated
3990  * @rtap_len: radiotap length
3991  *
3992  * API update HE-MU fields in the radiotap header
3993  *
3994  * Return: length of rtap_len updated.
3995  */
3996 static unsigned int
3997 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
3998 				     int8_t *rtap_buf, uint32_t rtap_len)
3999 {
4000 	rtap_len = qdf_align(rtap_len, 2);
4001 
4002 	/*
4003 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4004 	 * Enable all "known" he-mu radiotap flags for now
4005 	 */
4006 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4007 	rtap_len += 2;
4008 
4009 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4010 	rtap_len += 2;
4011 
4012 	rtap_buf[rtap_len] = rx_status->he_RU[0];
4013 	rtap_len += 1;
4014 
4015 	rtap_buf[rtap_len] = rx_status->he_RU[1];
4016 	rtap_len += 1;
4017 
4018 	rtap_buf[rtap_len] = rx_status->he_RU[2];
4019 	rtap_len += 1;
4020 
4021 	rtap_buf[rtap_len] = rx_status->he_RU[3];
4022 	rtap_len += 1;
4023 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4024 		  rx_status->he_flags1,
4025 		  rx_status->he_flags2, rx_status->he_RU[0],
4026 		  rx_status->he_RU[1], rx_status->he_RU[2],
4027 		  rx_status->he_RU[3]);
4028 
4029 	return rtap_len;
4030 }
4031 
4032 /**
4033  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4034  * @rx_status: Pointer to rx_status.
4035  * @rtap_buf: buffer to which radiotap has to be updated
4036  * @rtap_len: radiotap length
4037  *
4038  * API update he-mu-other fields in the radiotap header
4039  *
4040  * Return: length of rtap_len updated.
4041  */
4042 static unsigned int
4043 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4044 				     int8_t *rtap_buf, uint32_t rtap_len)
4045 {
4046 	rtap_len = qdf_align(rtap_len, 2);
4047 
4048 	/*
4049 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4050 	 * Enable all "known" he-mu-other radiotap flags for now
4051 	 */
4052 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
4053 	rtap_len += 2;
4054 
4055 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
4056 	rtap_len += 2;
4057 
4058 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
4059 	rtap_len += 1;
4060 
4061 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4062 	rtap_len += 1;
4063 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4064 		  rx_status->he_per_user_1,
4065 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4066 		  rx_status->he_per_user_known);
4067 	return rtap_len;
4068 }
4069 
4070 
4071 /**
4072  * This is the length for radiotap, combined length
4073  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
4074  * cannot be more than available headroom_sz.
4075  * increase this when we add more radiotap elements.
4076  * Number after '+' indicates maximum possible increase due to alignment
4077  */
4078 
4079 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
4080 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4081 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4082 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4083 #define RADIOTAP_FIXED_HEADER_LEN 17
4084 #define RADIOTAP_HT_FLAGS_LEN 3
4085 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4086 #define RADIOTAP_VENDOR_NS_LEN \
4087 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4088 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4089 				RADIOTAP_FIXED_HEADER_LEN + \
4090 				RADIOTAP_HT_FLAGS_LEN + \
4091 				RADIOTAP_VHT_FLAGS_LEN + \
4092 				RADIOTAP_AMPDU_STATUS_LEN + \
4093 				RADIOTAP_HE_FLAGS_LEN + \
4094 				RADIOTAP_HE_MU_FLAGS_LEN + \
4095 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4096 				RADIOTAP_VENDOR_NS_LEN)
4097 
4098 #define IEEE80211_RADIOTAP_HE 23
4099 #define IEEE80211_RADIOTAP_HE_MU	24
4100 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4101 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4102 
4103 /**
4104  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4105  * @rx_status: Pointer to rx_status.
4106  * @rtap_buf: Buf to which AMPDU info has to be updated.
4107  * @rtap_len: Current length of radiotap buffer
4108  *
4109  * Return: Length of radiotap after AMPDU flags updated.
4110  */
4111 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4112 					struct mon_rx_status *rx_status,
4113 					uint8_t *rtap_buf,
4114 					uint32_t rtap_len)
4115 {
4116 	/*
4117 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4118 	 * First 32 bits of AMPDU represents the reference number
4119 	 */
4120 
4121 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4122 	uint16_t ampdu_flags = 0;
4123 	uint16_t ampdu_reserved_flags = 0;
4124 
4125 	rtap_len = qdf_align(rtap_len, 4);
4126 
4127 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4128 	rtap_len += 4;
4129 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4130 	rtap_len += 2;
4131 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4132 	rtap_len += 2;
4133 
4134 	return rtap_len;
4135 }
4136 
4137 /**
4138  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4139  * @rx_status: Pointer to rx_status.
4140  * @nbuf:      nbuf pointer to which radiotap has to be updated
4141  * @headroom_sz: Available headroom size.
4142  *
4143  * Return: length of rtap_len updated.
4144  */
4145 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4146 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4147 {
4148 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4149 	struct ieee80211_radiotap_header *rthdr =
4150 		(struct ieee80211_radiotap_header *)rtap_buf;
4151 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4152 	uint32_t rtap_len = rtap_hdr_len;
4153 	uint8_t length = rtap_len;
4154 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4155 
4156 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4157 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4158 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4159 	rtap_len += 8;
4160 
4161 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4162 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4163 
4164 	if (rx_status->rs_fcs_err)
4165 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4166 
4167 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4168 	rtap_len += 1;
4169 
4170 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4171 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4172 	    !rx_status->he_flags) {
4173 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4174 		rtap_buf[rtap_len] = rx_status->rate;
4175 	} else
4176 		rtap_buf[rtap_len] = 0;
4177 	rtap_len += 1;
4178 
4179 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4180 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4181 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4182 	rtap_len += 2;
4183 	/* Channel flags. */
4184 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
4185 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4186 	else
4187 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4188 	if (rx_status->cck_flag)
4189 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4190 	if (rx_status->ofdm_flag)
4191 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4192 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4193 	rtap_len += 2;
4194 
4195 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4196 	 *					(dBm)
4197 	 */
4198 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4199 	/*
4200 	 * rssi_comb is int dB, need to convert it to dBm.
4201 	 * normalize value to noise floor of -96 dBm
4202 	 */
4203 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4204 	rtap_len += 1;
4205 
4206 	/* RX signal noise floor */
4207 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4208 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4209 	rtap_len += 1;
4210 
4211 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4212 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4213 	rtap_buf[rtap_len] = rx_status->nr_ant;
4214 	rtap_len += 1;
4215 
4216 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4217 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4218 		return 0;
4219 	}
4220 
4221 	if (rx_status->ht_flags) {
4222 		length = rtap_len;
4223 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4224 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4225 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4226 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4227 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4228 		rtap_len += 1;
4229 
4230 		if (rx_status->sgi)
4231 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4232 		if (rx_status->bw)
4233 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4234 		else
4235 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4236 		rtap_len += 1;
4237 
4238 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4239 		rtap_len += 1;
4240 
4241 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4242 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4243 			return 0;
4244 		}
4245 	}
4246 
4247 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4248 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4249 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4250 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4251 								rtap_buf,
4252 								rtap_len);
4253 	}
4254 
4255 	if (rx_status->vht_flags) {
4256 		length = rtap_len;
4257 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4258 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4259 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4260 								rtap_buf,
4261 								rtap_len);
4262 
4263 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4264 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4265 			return 0;
4266 		}
4267 	}
4268 
4269 	if (rx_status->he_flags) {
4270 		length = rtap_len;
4271 		/* IEEE80211_RADIOTAP_HE */
4272 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4273 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4274 								rtap_buf,
4275 								rtap_len);
4276 
4277 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4278 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4279 			return 0;
4280 		}
4281 	}
4282 
4283 	if (rx_status->he_mu_flags) {
4284 		length = rtap_len;
4285 		/* IEEE80211_RADIOTAP_HE-MU */
4286 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4287 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4288 								rtap_buf,
4289 								rtap_len);
4290 
4291 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4292 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4293 			return 0;
4294 		}
4295 	}
4296 
4297 	if (rx_status->he_mu_other_flags) {
4298 		length = rtap_len;
4299 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4300 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4301 		rtap_len =
4302 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4303 								rtap_buf,
4304 								rtap_len);
4305 
4306 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4307 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4308 			return 0;
4309 		}
4310 	}
4311 
4312 	rtap_len = qdf_align(rtap_len, 2);
4313 	/*
4314 	 * Radiotap Vendor Namespace
4315 	 */
4316 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4317 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4318 					(rtap_buf + rtap_len);
4319 	/*
4320 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4321 	 */
4322 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4323 	/*
4324 	 * Name space selector = 0
4325 	 * We only will have one namespace for now
4326 	 */
4327 	radiotap_vendor_ns_ath->hdr.selector = 0;
4328 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4329 					sizeof(*radiotap_vendor_ns_ath) -
4330 					sizeof(radiotap_vendor_ns_ath->hdr));
4331 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4332 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4333 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4334 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4335 				cpu_to_le32(rx_status->ppdu_timestamp);
4336 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4337 
4338 	rthdr->it_len = cpu_to_le16(rtap_len);
4339 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4340 
4341 	if (headroom_sz < rtap_len) {
4342 		qdf_err("ERROR: not enough space to update radiotap");
4343 		return 0;
4344 	}
4345 	qdf_nbuf_push_head(nbuf, rtap_len);
4346 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4347 	return rtap_len;
4348 }
4349 #else
4350 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4351 					struct mon_rx_status *rx_status,
4352 					int8_t *rtap_buf,
4353 					uint32_t rtap_len)
4354 {
4355 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4356 	return 0;
4357 }
4358 
4359 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4360 				      int8_t *rtap_buf, uint32_t rtap_len)
4361 {
4362 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4363 	return 0;
4364 }
4365 
4366 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4367 					struct mon_rx_status *rx_status,
4368 					uint8_t *rtap_buf,
4369 					uint32_t rtap_len)
4370 {
4371 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4372 	return 0;
4373 }
4374 
4375 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4376 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4377 {
4378 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4379 	return 0;
4380 }
4381 #endif
4382 qdf_export_symbol(qdf_nbuf_update_radiotap);
4383 
4384 /**
4385  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4386  * @cb_func_ptr: function pointer to the nbuf free callback
4387  *
4388  * This function registers a callback function for nbuf free.
4389  *
4390  * Return: none
4391  */
4392 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4393 {
4394 	nbuf_free_cb = cb_func_ptr;
4395 }
4396 
4397 /**
4398  * qdf_nbuf_classify_pkt() - classify packet
4399  * @skb - sk buff
4400  *
4401  * Return: none
4402  */
4403 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4404 {
4405 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4406 
4407 	/* check destination mac address is broadcast/multicast */
4408 	if (is_broadcast_ether_addr((uint8_t *)eh))
4409 		QDF_NBUF_CB_SET_BCAST(skb);
4410 	else if (is_multicast_ether_addr((uint8_t *)eh))
4411 		QDF_NBUF_CB_SET_MCAST(skb);
4412 
4413 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4414 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4415 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4416 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4417 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4418 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4419 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4420 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4421 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4422 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4423 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4424 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4425 }
4426 qdf_export_symbol(qdf_nbuf_classify_pkt);
4427 
4428 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4429 {
4430 	qdf_nbuf_users_set(&nbuf->users, 1);
4431 	nbuf->data = nbuf->head + NET_SKB_PAD;
4432 	skb_reset_tail_pointer(nbuf);
4433 }
4434 qdf_export_symbol(__qdf_nbuf_init);
4435 
4436 #ifdef WLAN_FEATURE_FASTPATH
4437 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4438 {
4439 	qdf_nbuf_users_set(&nbuf->users, 1);
4440 	nbuf->data = nbuf->head + NET_SKB_PAD;
4441 	skb_reset_tail_pointer(nbuf);
4442 }
4443 qdf_export_symbol(qdf_nbuf_init_fast);
4444 #endif /* WLAN_FEATURE_FASTPATH */
4445 
4446 
4447 #ifdef QDF_NBUF_GLOBAL_COUNT
4448 /**
4449  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4450  *
4451  * Return void
4452  */
4453 void __qdf_nbuf_mod_init(void)
4454 {
4455 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
4456 	qdf_atomic_init(&nbuf_count);
4457 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4458 }
4459 
4460 /**
4461  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4462  *
4463  * Return void
4464  */
4465 void __qdf_nbuf_mod_exit(void)
4466 {
4467 }
4468 #endif
4469