xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define CHANNEL_FREQ_5150 5150
88 #define FREQ_MULTIPLIER_CONST_5MHZ 5
89 #define FREQ_MULTIPLIER_CONST_20MHZ 20
90 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
91 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
92 #define RADIOTAP_CCK_CHANNEL 0x0020
93 #define RADIOTAP_OFDM_CHANNEL 0x0040
94 
95 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
96 #include <qdf_mc_timer.h>
97 
98 struct qdf_track_timer {
99 	qdf_mc_timer_t track_timer;
100 	qdf_atomic_t alloc_fail_cnt;
101 };
102 
103 static struct qdf_track_timer alloc_track_timer;
104 
105 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
106 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
107 #endif
108 
109 /* Packet Counter */
110 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
111 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
112 #ifdef QDF_NBUF_GLOBAL_COUNT
113 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
114 static qdf_atomic_t nbuf_count;
115 #endif
116 
117 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
118 static bool is_initial_mem_debug_disabled;
119 #endif
120 
121 /**
122  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
123  *
124  * Return: none
125  */
126 void qdf_nbuf_tx_desc_count_display(void)
127 {
128 	qdf_debug("Current Snapshot of the Driver:");
129 	qdf_debug("Data Packets:");
130 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
131 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
132 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
133 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
134 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
135 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
136 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
137 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
138 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
139 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
140 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
141 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
142 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
143 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
144 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
145 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
146 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
147 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
148 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
149 	qdf_debug("Mgmt Packets:");
150 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
151 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
152 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
153 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
154 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
155 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
156 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
157 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
158 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
159 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
160 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
161 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
162 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
163 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
164 }
165 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
166 
167 /**
168  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
169  * @packet_type   : packet type either mgmt/data
170  * @current_state : layer at which the packet currently present
171  *
172  * Return: none
173  */
174 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
175 			uint8_t current_state)
176 {
177 	switch (packet_type) {
178 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
179 		nbuf_tx_mgmt[current_state]++;
180 		break;
181 	case QDF_NBUF_TX_PKT_DATA_TRACK:
182 		nbuf_tx_data[current_state]++;
183 		break;
184 	default:
185 		break;
186 	}
187 }
188 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
189 
190 /**
191  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
192  *
193  * Return: none
194  */
195 void qdf_nbuf_tx_desc_count_clear(void)
196 {
197 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
198 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
199 }
200 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
201 
202 /**
203  * qdf_nbuf_set_state() - Updates the packet state
204  * @nbuf:            network buffer
205  * @current_state :  layer at which the packet currently is
206  *
207  * This function updates the packet state to the layer at which the packet
208  * currently is
209  *
210  * Return: none
211  */
212 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
213 {
214 	/*
215 	 * Only Mgmt, Data Packets are tracked. WMI messages
216 	 * such as scan commands are not tracked
217 	 */
218 	uint8_t packet_type;
219 
220 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
221 
222 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
223 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
224 		return;
225 	}
226 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
227 	qdf_nbuf_tx_desc_count_update(packet_type,
228 					current_state);
229 }
230 qdf_export_symbol(qdf_nbuf_set_state);
231 
232 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
233 /**
234  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
235  *
236  * This function starts the alloc fail replenish timer.
237  *
238  * Return: void
239  */
240 static void __qdf_nbuf_start_replenish_timer(void)
241 {
242 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
243 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
244 	    QDF_TIMER_STATE_RUNNING)
245 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
246 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
247 }
248 
249 /**
250  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
251  *
252  * This function stops the alloc fail replenish timer.
253  *
254  * Return: void
255  */
256 static void __qdf_nbuf_stop_replenish_timer(void)
257 {
258 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
259 		return;
260 
261 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
262 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
263 	    QDF_TIMER_STATE_RUNNING)
264 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
265 }
266 
267 /**
268  * qdf_replenish_expire_handler - Replenish expire handler
269  *
270  * This function triggers when the alloc fail replenish timer expires.
271  *
272  * Return: void
273  */
274 static void qdf_replenish_expire_handler(void *arg)
275 {
276 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
277 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
278 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
279 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
280 
281 		/* Error handling here */
282 	}
283 }
284 
285 /**
286  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
287  *
288  * This function initializes the nbuf alloc fail replenish timer.
289  *
290  * Return: void
291  */
292 void __qdf_nbuf_init_replenish_timer(void)
293 {
294 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
295 			  qdf_replenish_expire_handler, NULL);
296 }
297 
298 /**
299  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
300  *
301  * This function deinitializes the nbuf alloc fail replenish timer.
302  *
303  * Return: void
304  */
305 void __qdf_nbuf_deinit_replenish_timer(void)
306 {
307 	__qdf_nbuf_stop_replenish_timer();
308 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
309 }
310 #else
311 
312 static inline void __qdf_nbuf_start_replenish_timer(void) {}
313 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
314 #endif
315 
316 /* globals do not need to be initialized to NULL/0 */
317 qdf_nbuf_trace_update_t qdf_trace_update_cb;
318 qdf_nbuf_free_t nbuf_free_cb;
319 
320 #ifdef QDF_NBUF_GLOBAL_COUNT
321 
322 /**
323  * __qdf_nbuf_count_get() - get nbuf global count
324  *
325  * Return: nbuf global count
326  */
327 int __qdf_nbuf_count_get(void)
328 {
329 	return qdf_atomic_read(&nbuf_count);
330 }
331 qdf_export_symbol(__qdf_nbuf_count_get);
332 
333 /**
334  * __qdf_nbuf_count_inc() - increment nbuf global count
335  *
336  * @buf: sk buff
337  *
338  * Return: void
339  */
340 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
341 {
342 	int num_nbuf = 1;
343 	qdf_nbuf_t ext_list;
344 
345 	if (qdf_likely(is_initial_mem_debug_disabled))
346 		return;
347 
348 	ext_list = qdf_nbuf_get_ext_list(nbuf);
349 
350 	/* Take care to account for frag_list */
351 	while (ext_list) {
352 		++num_nbuf;
353 		ext_list = qdf_nbuf_queue_next(ext_list);
354 	}
355 
356 	qdf_atomic_add(num_nbuf, &nbuf_count);
357 }
358 qdf_export_symbol(__qdf_nbuf_count_inc);
359 
360 /**
361  * __qdf_nbuf_count_dec() - decrement nbuf global count
362  *
363  * @buf: sk buff
364  *
365  * Return: void
366  */
367 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
368 {
369 	qdf_nbuf_t ext_list;
370 	int num_nbuf;
371 
372 	if (qdf_likely(is_initial_mem_debug_disabled))
373 		return;
374 
375 	if (qdf_nbuf_get_users(nbuf) > 1)
376 		return;
377 
378 	num_nbuf = 1;
379 
380 	/* Take care to account for frag_list */
381 	ext_list = qdf_nbuf_get_ext_list(nbuf);
382 	while (ext_list) {
383 		if (qdf_nbuf_get_users(ext_list) == 1)
384 			++num_nbuf;
385 		ext_list = qdf_nbuf_queue_next(ext_list);
386 	}
387 
388 	qdf_atomic_sub(num_nbuf, &nbuf_count);
389 }
390 qdf_export_symbol(__qdf_nbuf_count_dec);
391 #endif
392 
393 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
394 	!defined(QCA_WIFI_QCN9000)
395 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
396 				 int align, int prio, const char *func,
397 				 uint32_t line)
398 {
399 	struct sk_buff *skb;
400 	unsigned long offset;
401 	uint32_t lowmem_alloc_tries = 0;
402 
403 	if (align)
404 		size += (align - 1);
405 
406 realloc:
407 	skb = dev_alloc_skb(size);
408 
409 	if (skb)
410 		goto skb_alloc;
411 
412 	skb = pld_nbuf_pre_alloc(size);
413 
414 	if (!skb) {
415 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
416 				size, func, line);
417 		return NULL;
418 	}
419 
420 skb_alloc:
421 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
422 	 * Though we are trying to reserve low memory upfront to prevent this,
423 	 * we sometimes see SKBs allocated from low memory.
424 	 */
425 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
426 		lowmem_alloc_tries++;
427 		if (lowmem_alloc_tries > 100) {
428 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
429 				     size, func, line);
430 			return NULL;
431 		} else {
432 			/* Not freeing to make sure it
433 			 * will not get allocated again
434 			 */
435 			goto realloc;
436 		}
437 	}
438 	memset(skb->cb, 0x0, sizeof(skb->cb));
439 
440 	/*
441 	 * The default is for netbuf fragments to be interpreted
442 	 * as wordstreams rather than bytestreams.
443 	 */
444 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
445 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
446 
447 	/*
448 	 * XXX:how about we reserve first then align
449 	 * Align & make sure that the tail & data are adjusted properly
450 	 */
451 
452 	if (align) {
453 		offset = ((unsigned long)skb->data) % align;
454 		if (offset)
455 			skb_reserve(skb, align - offset);
456 	}
457 
458 	/*
459 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
460 	 * pointer
461 	 */
462 	skb_reserve(skb, reserve);
463 	qdf_nbuf_count_inc(skb);
464 
465 	return skb;
466 }
467 #else
468 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
469 				 int align, int prio, const char *func,
470 				 uint32_t line)
471 {
472 	struct sk_buff *skb;
473 	unsigned long offset;
474 	int flags = GFP_KERNEL;
475 
476 	if (align)
477 		size += (align - 1);
478 
479 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
480 		flags = GFP_ATOMIC;
481 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
482 		/*
483 		 * Observed that kcompactd burns out CPU to make order-3 page.
484 		 *__netdev_alloc_skb has 4k page fallback option just in case of
485 		 * failing high order page allocation so we don't need to be
486 		 * hard. Make kcompactd rest in piece.
487 		 */
488 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
489 #endif
490 	}
491 
492 	skb = __netdev_alloc_skb(NULL, size, flags);
493 
494 	if (skb)
495 		goto skb_alloc;
496 
497 	skb = pld_nbuf_pre_alloc(size);
498 
499 	if (!skb) {
500 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
501 				size, func, line);
502 		__qdf_nbuf_start_replenish_timer();
503 		return NULL;
504 	} else {
505 		__qdf_nbuf_stop_replenish_timer();
506 	}
507 
508 skb_alloc:
509 	memset(skb->cb, 0x0, sizeof(skb->cb));
510 
511 	/*
512 	 * The default is for netbuf fragments to be interpreted
513 	 * as wordstreams rather than bytestreams.
514 	 */
515 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
516 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
517 
518 	/*
519 	 * XXX:how about we reserve first then align
520 	 * Align & make sure that the tail & data are adjusted properly
521 	 */
522 
523 	if (align) {
524 		offset = ((unsigned long)skb->data) % align;
525 		if (offset)
526 			skb_reserve(skb, align - offset);
527 	}
528 
529 	/*
530 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
531 	 * pointer
532 	 */
533 	skb_reserve(skb, reserve);
534 	qdf_nbuf_count_inc(skb);
535 
536 	return skb;
537 }
538 #endif
539 qdf_export_symbol(__qdf_nbuf_alloc);
540 
541 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
542 					  const char *func, uint32_t line)
543 {
544 	qdf_nbuf_t nbuf;
545 	unsigned long offset;
546 
547 	if (align)
548 		size += (align - 1);
549 
550 	nbuf = alloc_skb(size, GFP_ATOMIC);
551 	if (!nbuf)
552 		goto ret_nbuf;
553 
554 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
555 
556 	skb_reserve(nbuf, reserve);
557 
558 	if (align) {
559 		offset = ((unsigned long)nbuf->data) % align;
560 		if (offset)
561 			skb_reserve(nbuf, align - offset);
562 	}
563 
564 	qdf_nbuf_count_inc(nbuf);
565 
566 ret_nbuf:
567 	return nbuf;
568 }
569 
570 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
571 
572 /**
573  * __qdf_nbuf_free() - free the nbuf its interrupt safe
574  * @skb: Pointer to network buffer
575  *
576  * Return: none
577  */
578 
579 void __qdf_nbuf_free(struct sk_buff *skb)
580 {
581 	if (pld_nbuf_pre_alloc_free(skb))
582 		return;
583 
584 	qdf_nbuf_count_dec(skb);
585 	qdf_mem_skb_dec(skb->truesize);
586 	if (nbuf_free_cb)
587 		nbuf_free_cb(skb);
588 	else
589 		dev_kfree_skb_any(skb);
590 }
591 
592 qdf_export_symbol(__qdf_nbuf_free);
593 
594 #ifdef NBUF_MEMORY_DEBUG
595 enum qdf_nbuf_event_type {
596 	QDF_NBUF_ALLOC,
597 	QDF_NBUF_ALLOC_CLONE,
598 	QDF_NBUF_ALLOC_COPY,
599 	QDF_NBUF_ALLOC_FAILURE,
600 	QDF_NBUF_FREE,
601 	QDF_NBUF_MAP,
602 	QDF_NBUF_UNMAP,
603 	QDF_NBUF_ALLOC_COPY_EXPAND,
604 };
605 
606 struct qdf_nbuf_event {
607 	qdf_nbuf_t nbuf;
608 	char func[QDF_MEM_FUNC_NAME_SIZE];
609 	uint32_t line;
610 	enum qdf_nbuf_event_type type;
611 	uint64_t timestamp;
612 };
613 
614 #define QDF_NBUF_HISTORY_SIZE 4096
615 static qdf_atomic_t qdf_nbuf_history_index;
616 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
617 
618 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
619 {
620 	int32_t next = qdf_atomic_inc_return(index);
621 
622 	if (next == size)
623 		qdf_atomic_sub(size, index);
624 
625 	return next % size;
626 }
627 
628 static void
629 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
630 		     enum qdf_nbuf_event_type type)
631 {
632 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
633 						   QDF_NBUF_HISTORY_SIZE);
634 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
635 
636 	event->nbuf = nbuf;
637 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
638 	event->line = line;
639 	event->type = type;
640 	event->timestamp = qdf_get_log_timestamp();
641 }
642 #endif /* NBUF_MEMORY_DEBUG */
643 
644 #ifdef NBUF_MAP_UNMAP_DEBUG
645 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
646 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
647 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
648 
649 static void qdf_nbuf_map_tracking_init(void)
650 {
651 	qdf_tracker_init(&qdf_nbuf_map_tracker);
652 }
653 
654 static void qdf_nbuf_map_tracking_deinit(void)
655 {
656 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
657 }
658 
659 static QDF_STATUS
660 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
661 {
662 	QDF_STATUS status;
663 
664 	if (is_initial_mem_debug_disabled)
665 		return QDF_STATUS_SUCCESS;
666 
667 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
668 	if (QDF_IS_STATUS_ERROR(status))
669 		return status;
670 
671 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
672 
673 	return QDF_STATUS_SUCCESS;
674 }
675 
676 static void
677 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
678 {
679 	if (is_initial_mem_debug_disabled)
680 		return;
681 
682 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
683 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
684 }
685 
686 void qdf_nbuf_map_check_for_leaks(void)
687 {
688 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
689 }
690 
691 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
692 			      qdf_nbuf_t buf,
693 			      qdf_dma_dir_t dir,
694 			      const char *func,
695 			      uint32_t line)
696 {
697 	QDF_STATUS status;
698 
699 	status = qdf_nbuf_track_map(buf, func, line);
700 	if (QDF_IS_STATUS_ERROR(status))
701 		return status;
702 
703 	status = __qdf_nbuf_map(osdev, buf, dir);
704 	if (QDF_IS_STATUS_ERROR(status))
705 		qdf_nbuf_untrack_map(buf, func, line);
706 	else
707 		qdf_net_buf_debug_update_map_node(buf, func, line);
708 
709 	return status;
710 }
711 
712 qdf_export_symbol(qdf_nbuf_map_debug);
713 
714 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
715 			  qdf_nbuf_t buf,
716 			  qdf_dma_dir_t dir,
717 			  const char *func,
718 			  uint32_t line)
719 {
720 	qdf_nbuf_untrack_map(buf, func, line);
721 	__qdf_nbuf_unmap_single(osdev, buf, dir);
722 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
723 }
724 
725 qdf_export_symbol(qdf_nbuf_unmap_debug);
726 
727 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
728 				     qdf_nbuf_t buf,
729 				     qdf_dma_dir_t dir,
730 				     const char *func,
731 				     uint32_t line)
732 {
733 	QDF_STATUS status;
734 
735 	status = qdf_nbuf_track_map(buf, func, line);
736 	if (QDF_IS_STATUS_ERROR(status))
737 		return status;
738 
739 	status = __qdf_nbuf_map_single(osdev, buf, dir);
740 	if (QDF_IS_STATUS_ERROR(status))
741 		qdf_nbuf_untrack_map(buf, func, line);
742 	else
743 		qdf_net_buf_debug_update_map_node(buf, func, line);
744 
745 	return status;
746 }
747 
748 qdf_export_symbol(qdf_nbuf_map_single_debug);
749 
750 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
751 				 qdf_nbuf_t buf,
752 				 qdf_dma_dir_t dir,
753 				 const char *func,
754 				 uint32_t line)
755 {
756 	qdf_nbuf_untrack_map(buf, func, line);
757 	__qdf_nbuf_unmap_single(osdev, buf, dir);
758 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
759 }
760 
761 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
762 
763 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
764 				     qdf_nbuf_t buf,
765 				     qdf_dma_dir_t dir,
766 				     int nbytes,
767 				     const char *func,
768 				     uint32_t line)
769 {
770 	QDF_STATUS status;
771 
772 	status = qdf_nbuf_track_map(buf, func, line);
773 	if (QDF_IS_STATUS_ERROR(status))
774 		return status;
775 
776 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
777 	if (QDF_IS_STATUS_ERROR(status))
778 		qdf_nbuf_untrack_map(buf, func, line);
779 	else
780 		qdf_net_buf_debug_update_map_node(buf, func, line);
781 
782 	return status;
783 }
784 
785 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
786 
787 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
788 				 qdf_nbuf_t buf,
789 				 qdf_dma_dir_t dir,
790 				 int nbytes,
791 				 const char *func,
792 				 uint32_t line)
793 {
794 	qdf_nbuf_untrack_map(buf, func, line);
795 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
796 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
797 }
798 
799 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
800 
801 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
802 					    qdf_nbuf_t buf,
803 					    qdf_dma_dir_t dir,
804 					    int nbytes,
805 					    const char *func,
806 					    uint32_t line)
807 {
808 	QDF_STATUS status;
809 
810 	status = qdf_nbuf_track_map(buf, func, line);
811 	if (QDF_IS_STATUS_ERROR(status))
812 		return status;
813 
814 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
815 	if (QDF_IS_STATUS_ERROR(status))
816 		qdf_nbuf_untrack_map(buf, func, line);
817 	else
818 		qdf_net_buf_debug_update_map_node(buf, func, line);
819 
820 	return status;
821 }
822 
823 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
824 
825 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
826 					qdf_nbuf_t buf,
827 					qdf_dma_dir_t dir,
828 					int nbytes,
829 					const char *func,
830 					uint32_t line)
831 {
832 	qdf_nbuf_untrack_map(buf, func, line);
833 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
834 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
835 }
836 
837 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
838 
839 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
840 					     const char *func,
841 					     uint32_t line)
842 {
843 	char map_func[QDF_TRACKER_FUNC_SIZE];
844 	uint32_t map_line;
845 
846 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
847 				&map_func, &map_line))
848 		return;
849 
850 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
851 			   func, line, map_func, map_line);
852 }
853 #else
854 static inline void qdf_nbuf_map_tracking_init(void)
855 {
856 }
857 
858 static inline void qdf_nbuf_map_tracking_deinit(void)
859 {
860 }
861 
862 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
863 						    const char *func,
864 						    uint32_t line)
865 {
866 }
867 #endif /* NBUF_MAP_UNMAP_DEBUG */
868 
869 /**
870  * __qdf_nbuf_map() - map a buffer to local bus address space
871  * @osdev: OS device
872  * @bmap: Bitmap
873  * @skb: Pointer to network buffer
874  * @dir: Direction
875  *
876  * Return: QDF_STATUS
877  */
878 #ifdef QDF_OS_DEBUG
879 QDF_STATUS
880 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
881 {
882 	struct skb_shared_info *sh = skb_shinfo(skb);
883 
884 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
885 			|| (dir == QDF_DMA_FROM_DEVICE));
886 
887 	/*
888 	 * Assume there's only a single fragment.
889 	 * To support multiple fragments, it would be necessary to change
890 	 * qdf_nbuf_t to be a separate object that stores meta-info
891 	 * (including the bus address for each fragment) and a pointer
892 	 * to the underlying sk_buff.
893 	 */
894 	qdf_assert(sh->nr_frags == 0);
895 
896 	return __qdf_nbuf_map_single(osdev, skb, dir);
897 }
898 qdf_export_symbol(__qdf_nbuf_map);
899 
900 #else
901 QDF_STATUS
902 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
903 {
904 	return __qdf_nbuf_map_single(osdev, skb, dir);
905 }
906 qdf_export_symbol(__qdf_nbuf_map);
907 #endif
908 /**
909  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
910  * @osdev: OS device
911  * @skb: Pointer to network buffer
912  * @dir: dma direction
913  *
914  * Return: none
915  */
916 void
917 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
918 			qdf_dma_dir_t dir)
919 {
920 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
921 		   || (dir == QDF_DMA_FROM_DEVICE));
922 
923 	/*
924 	 * Assume there's a single fragment.
925 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
926 	 */
927 	__qdf_nbuf_unmap_single(osdev, skb, dir);
928 }
929 qdf_export_symbol(__qdf_nbuf_unmap);
930 
931 /**
932  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
933  * @osdev: OS device
934  * @skb: Pointer to network buffer
935  * @dir: Direction
936  *
937  * Return: QDF_STATUS
938  */
939 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
940 QDF_STATUS
941 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
942 {
943 	qdf_dma_addr_t paddr;
944 
945 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
946 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
947 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
948 	return QDF_STATUS_SUCCESS;
949 }
950 qdf_export_symbol(__qdf_nbuf_map_single);
951 #else
952 QDF_STATUS
953 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
954 {
955 	qdf_dma_addr_t paddr;
956 
957 	/* assume that the OS only provides a single fragment */
958 	QDF_NBUF_CB_PADDR(buf) = paddr =
959 		dma_map_single(osdev->dev, buf->data,
960 				skb_end_pointer(buf) - buf->data,
961 				__qdf_dma_dir_to_os(dir));
962 	return dma_mapping_error(osdev->dev, paddr)
963 		? QDF_STATUS_E_FAILURE
964 		: QDF_STATUS_SUCCESS;
965 }
966 qdf_export_symbol(__qdf_nbuf_map_single);
967 #endif
968 /**
969  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
970  * @osdev: OS device
971  * @skb: Pointer to network buffer
972  * @dir: Direction
973  *
974  * Return: none
975  */
976 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
977 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
978 				qdf_dma_dir_t dir)
979 {
980 }
981 #else
982 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
983 					qdf_dma_dir_t dir)
984 {
985 	if (QDF_NBUF_CB_PADDR(buf))
986 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
987 			skb_end_pointer(buf) - buf->data,
988 			__qdf_dma_dir_to_os(dir));
989 }
990 #endif
991 qdf_export_symbol(__qdf_nbuf_unmap_single);
992 
993 /**
994  * __qdf_nbuf_set_rx_cksum() - set rx checksum
995  * @skb: Pointer to network buffer
996  * @cksum: Pointer to checksum value
997  *
998  * Return: QDF_STATUS
999  */
1000 QDF_STATUS
1001 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1002 {
1003 	switch (cksum->l4_result) {
1004 	case QDF_NBUF_RX_CKSUM_NONE:
1005 		skb->ip_summed = CHECKSUM_NONE;
1006 		break;
1007 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1008 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1009 		break;
1010 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1011 		skb->ip_summed = CHECKSUM_PARTIAL;
1012 		skb->csum = cksum->val;
1013 		break;
1014 	default:
1015 		pr_err("Unknown checksum type\n");
1016 		qdf_assert(0);
1017 		return QDF_STATUS_E_NOSUPPORT;
1018 	}
1019 	return QDF_STATUS_SUCCESS;
1020 }
1021 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1022 
1023 /**
1024  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1025  * @skb: Pointer to network buffer
1026  *
1027  * Return: TX checksum value
1028  */
1029 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1030 {
1031 	switch (skb->ip_summed) {
1032 	case CHECKSUM_NONE:
1033 		return QDF_NBUF_TX_CKSUM_NONE;
1034 	case CHECKSUM_PARTIAL:
1035 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1036 	case CHECKSUM_COMPLETE:
1037 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1038 	default:
1039 		return QDF_NBUF_TX_CKSUM_NONE;
1040 	}
1041 }
1042 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1043 
1044 /**
1045  * __qdf_nbuf_get_tid() - get tid
1046  * @skb: Pointer to network buffer
1047  *
1048  * Return: tid
1049  */
1050 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1051 {
1052 	return skb->priority;
1053 }
1054 qdf_export_symbol(__qdf_nbuf_get_tid);
1055 
1056 /**
1057  * __qdf_nbuf_set_tid() - set tid
1058  * @skb: Pointer to network buffer
1059  *
1060  * Return: none
1061  */
1062 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1063 {
1064 	skb->priority = tid;
1065 }
1066 qdf_export_symbol(__qdf_nbuf_set_tid);
1067 
1068 /**
1069  * __qdf_nbuf_set_tid() - set tid
1070  * @skb: Pointer to network buffer
1071  *
1072  * Return: none
1073  */
1074 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1075 {
1076 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1077 }
1078 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1079 
1080 /**
1081  * __qdf_nbuf_reg_trace_cb() - register trace callback
1082  * @cb_func_ptr: Pointer to trace callback function
1083  *
1084  * Return: none
1085  */
1086 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1087 {
1088 	qdf_trace_update_cb = cb_func_ptr;
1089 }
1090 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1091 
1092 /**
1093  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1094  *              of DHCP packet.
1095  * @data: Pointer to DHCP packet data buffer
1096  *
1097  * This func. returns the subtype of DHCP packet.
1098  *
1099  * Return: subtype of the DHCP packet.
1100  */
1101 enum qdf_proto_subtype
1102 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1103 {
1104 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1105 
1106 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1107 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1108 					QDF_DHCP_OPTION53_LENGTH)) {
1109 
1110 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1111 		case QDF_DHCP_DISCOVER:
1112 			subtype = QDF_PROTO_DHCP_DISCOVER;
1113 			break;
1114 		case QDF_DHCP_REQUEST:
1115 			subtype = QDF_PROTO_DHCP_REQUEST;
1116 			break;
1117 		case QDF_DHCP_OFFER:
1118 			subtype = QDF_PROTO_DHCP_OFFER;
1119 			break;
1120 		case QDF_DHCP_ACK:
1121 			subtype = QDF_PROTO_DHCP_ACK;
1122 			break;
1123 		case QDF_DHCP_NAK:
1124 			subtype = QDF_PROTO_DHCP_NACK;
1125 			break;
1126 		case QDF_DHCP_RELEASE:
1127 			subtype = QDF_PROTO_DHCP_RELEASE;
1128 			break;
1129 		case QDF_DHCP_INFORM:
1130 			subtype = QDF_PROTO_DHCP_INFORM;
1131 			break;
1132 		case QDF_DHCP_DECLINE:
1133 			subtype = QDF_PROTO_DHCP_DECLINE;
1134 			break;
1135 		default:
1136 			break;
1137 		}
1138 	}
1139 
1140 	return subtype;
1141 }
1142 
1143 /**
1144  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1145  *            of EAPOL packet.
1146  * @data: Pointer to EAPOL packet data buffer
1147  *
1148  * This func. returns the subtype of EAPOL packet.
1149  *
1150  * Return: subtype of the EAPOL packet.
1151  */
1152 enum qdf_proto_subtype
1153 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1154 {
1155 	uint16_t eapol_key_info;
1156 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1157 	uint16_t mask;
1158 
1159 	eapol_key_info = (uint16_t)(*(uint16_t *)
1160 			(data + EAPOL_KEY_INFO_OFFSET));
1161 
1162 	mask = eapol_key_info & EAPOL_MASK;
1163 	switch (mask) {
1164 	case EAPOL_M1_BIT_MASK:
1165 		subtype = QDF_PROTO_EAPOL_M1;
1166 		break;
1167 	case EAPOL_M2_BIT_MASK:
1168 		subtype = QDF_PROTO_EAPOL_M2;
1169 		break;
1170 	case EAPOL_M3_BIT_MASK:
1171 		subtype = QDF_PROTO_EAPOL_M3;
1172 		break;
1173 	case EAPOL_M4_BIT_MASK:
1174 		subtype = QDF_PROTO_EAPOL_M4;
1175 		break;
1176 	default:
1177 		break;
1178 	}
1179 
1180 	return subtype;
1181 }
1182 
1183 /**
1184  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1185  *            of ARP packet.
1186  * @data: Pointer to ARP packet data buffer
1187  *
1188  * This func. returns the subtype of ARP packet.
1189  *
1190  * Return: subtype of the ARP packet.
1191  */
1192 enum qdf_proto_subtype
1193 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1194 {
1195 	uint16_t subtype;
1196 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1197 
1198 	subtype = (uint16_t)(*(uint16_t *)
1199 			(data + ARP_SUB_TYPE_OFFSET));
1200 
1201 	switch (QDF_SWAP_U16(subtype)) {
1202 	case ARP_REQUEST:
1203 		proto_subtype = QDF_PROTO_ARP_REQ;
1204 		break;
1205 	case ARP_RESPONSE:
1206 		proto_subtype = QDF_PROTO_ARP_RES;
1207 		break;
1208 	default:
1209 		break;
1210 	}
1211 
1212 	return proto_subtype;
1213 }
1214 
1215 /**
1216  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1217  *            of IPV4 ICMP packet.
1218  * @data: Pointer to IPV4 ICMP packet data buffer
1219  *
1220  * This func. returns the subtype of ICMP packet.
1221  *
1222  * Return: subtype of the ICMP packet.
1223  */
1224 enum qdf_proto_subtype
1225 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1226 {
1227 	uint8_t subtype;
1228 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1229 
1230 	subtype = (uint8_t)(*(uint8_t *)
1231 			(data + ICMP_SUBTYPE_OFFSET));
1232 
1233 	switch (subtype) {
1234 	case ICMP_REQUEST:
1235 		proto_subtype = QDF_PROTO_ICMP_REQ;
1236 		break;
1237 	case ICMP_RESPONSE:
1238 		proto_subtype = QDF_PROTO_ICMP_RES;
1239 		break;
1240 	default:
1241 		break;
1242 	}
1243 
1244 	return proto_subtype;
1245 }
1246 
1247 /**
1248  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1249  *            of IPV6 ICMPV6 packet.
1250  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1251  *
1252  * This func. returns the subtype of ICMPV6 packet.
1253  *
1254  * Return: subtype of the ICMPV6 packet.
1255  */
1256 enum qdf_proto_subtype
1257 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1258 {
1259 	uint8_t subtype;
1260 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1261 
1262 	subtype = (uint8_t)(*(uint8_t *)
1263 			(data + ICMPV6_SUBTYPE_OFFSET));
1264 
1265 	switch (subtype) {
1266 	case ICMPV6_REQUEST:
1267 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1268 		break;
1269 	case ICMPV6_RESPONSE:
1270 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1271 		break;
1272 	case ICMPV6_RS:
1273 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1274 		break;
1275 	case ICMPV6_RA:
1276 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1277 		break;
1278 	case ICMPV6_NS:
1279 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1280 		break;
1281 	case ICMPV6_NA:
1282 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1283 		break;
1284 	default:
1285 		break;
1286 	}
1287 
1288 	return proto_subtype;
1289 }
1290 
1291 /**
1292  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1293  *            of IPV4 packet.
1294  * @data: Pointer to IPV4 packet data buffer
1295  *
1296  * This func. returns the proto type of IPV4 packet.
1297  *
1298  * Return: proto type of IPV4 packet.
1299  */
1300 uint8_t
1301 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1302 {
1303 	uint8_t proto_type;
1304 
1305 	proto_type = (uint8_t)(*(uint8_t *)(data +
1306 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1307 	return proto_type;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1312  *            of IPV6 packet.
1313  * @data: Pointer to IPV6 packet data buffer
1314  *
1315  * This func. returns the proto type of IPV6 packet.
1316  *
1317  * Return: proto type of IPV6 packet.
1318  */
1319 uint8_t
1320 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1321 {
1322 	uint8_t proto_type;
1323 
1324 	proto_type = (uint8_t)(*(uint8_t *)(data +
1325 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1326 	return proto_type;
1327 }
1328 
1329 /**
1330  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1331  * @data: Pointer to network data
1332  *
1333  * This api is for Tx packets.
1334  *
1335  * Return: true if packet is ipv4 packet
1336  *	   false otherwise
1337  */
1338 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1339 {
1340 	uint16_t ether_type;
1341 
1342 	ether_type = (uint16_t)(*(uint16_t *)(data +
1343 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1344 
1345 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1346 		return true;
1347 	else
1348 		return false;
1349 }
1350 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1351 
1352 /**
1353  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1354  * @data: Pointer to network data buffer
1355  *
1356  * This api is for ipv4 packet.
1357  *
1358  * Return: true if packet is DHCP packet
1359  *	   false otherwise
1360  */
1361 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1362 {
1363 	uint16_t sport;
1364 	uint16_t dport;
1365 
1366 	sport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1367 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE));
1368 	dport = (uint16_t)(*(uint16_t *)(data + QDF_NBUF_TRAC_IPV4_OFFSET +
1369 					 QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
1370 					 sizeof(uint16_t)));
1371 
1372 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1373 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1374 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1375 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1376 		return true;
1377 	else
1378 		return false;
1379 }
1380 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1381 
1382 /**
1383  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1384  * @data: Pointer to network data buffer
1385  *
1386  * This api is for ipv4 packet.
1387  *
1388  * Return: true if packet is EAPOL packet
1389  *	   false otherwise.
1390  */
1391 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1392 {
1393 	uint16_t ether_type;
1394 
1395 	ether_type = (uint16_t)(*(uint16_t *)(data +
1396 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1397 
1398 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1399 		return true;
1400 	else
1401 		return false;
1402 }
1403 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1404 
1405 /**
1406  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1407  * @skb: Pointer to network buffer
1408  *
1409  * This api is for ipv4 packet.
1410  *
1411  * Return: true if packet is WAPI packet
1412  *	   false otherwise.
1413  */
1414 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1415 {
1416 	uint16_t ether_type;
1417 
1418 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1419 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1420 
1421 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1422 		return true;
1423 	else
1424 		return false;
1425 }
1426 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1427 
1428 /**
1429  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1430  * @skb: Pointer to network buffer
1431  *
1432  * This api is for ipv4 packet.
1433  *
1434  * Return: true if packet is tdls packet
1435  *	   false otherwise.
1436  */
1437 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1438 {
1439 	uint16_t ether_type;
1440 
1441 	ether_type = *(uint16_t *)(skb->data +
1442 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1443 
1444 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1445 		return true;
1446 	else
1447 		return false;
1448 }
1449 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1450 
1451 /**
1452  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1453  * @data: Pointer to network data buffer
1454  *
1455  * This api is for ipv4 packet.
1456  *
1457  * Return: true if packet is ARP packet
1458  *	   false otherwise.
1459  */
1460 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1461 {
1462 	uint16_t ether_type;
1463 
1464 	ether_type = (uint16_t)(*(uint16_t *)(data +
1465 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1466 
1467 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1468 		return true;
1469 	else
1470 		return false;
1471 }
1472 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1473 
1474 /**
1475  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1476  * @data: Pointer to network data buffer
1477  *
1478  * This api is for ipv4 packet.
1479  *
1480  * Return: true if packet is ARP request
1481  *	   false otherwise.
1482  */
1483 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1484 {
1485 	uint16_t op_code;
1486 
1487 	op_code = (uint16_t)(*(uint16_t *)(data +
1488 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1489 
1490 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1491 		return true;
1492 	return false;
1493 }
1494 
1495 /**
1496  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1497  * @data: Pointer to network data buffer
1498  *
1499  * This api is for ipv4 packet.
1500  *
1501  * Return: true if packet is ARP response
1502  *	   false otherwise.
1503  */
1504 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1505 {
1506 	uint16_t op_code;
1507 
1508 	op_code = (uint16_t)(*(uint16_t *)(data +
1509 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1510 
1511 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1512 		return true;
1513 	return false;
1514 }
1515 
1516 /**
1517  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1518  * @data: Pointer to network data buffer
1519  *
1520  * This api is for ipv4 packet.
1521  *
1522  * Return: ARP packet source IP value.
1523  */
1524 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1525 {
1526 	uint32_t src_ip;
1527 
1528 	src_ip = (uint32_t)(*(uint32_t *)(data +
1529 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1530 
1531 	return src_ip;
1532 }
1533 
1534 /**
1535  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1536  * @data: Pointer to network data buffer
1537  *
1538  * This api is for ipv4 packet.
1539  *
1540  * Return: ARP packet target IP value.
1541  */
1542 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1543 {
1544 	uint32_t tgt_ip;
1545 
1546 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1547 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1548 
1549 	return tgt_ip;
1550 }
1551 
1552 /**
1553  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1554  * @data: Pointer to network data buffer
1555  * @len: length to copy
1556  *
1557  * This api is for dns domain name
1558  *
1559  * Return: dns domain name.
1560  */
1561 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1562 {
1563 	uint8_t *domain_name;
1564 
1565 	domain_name = (uint8_t *)
1566 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1567 	return domain_name;
1568 }
1569 
1570 
1571 /**
1572  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1573  * @data: Pointer to network data buffer
1574  *
1575  * This api is for dns query packet.
1576  *
1577  * Return: true if packet is dns query packet.
1578  *	   false otherwise.
1579  */
1580 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1581 {
1582 	uint16_t op_code;
1583 	uint16_t tgt_port;
1584 
1585 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1586 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1587 	/* Standard DNS query always happen on Dest Port 53. */
1588 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1589 		op_code = (uint16_t)(*(uint16_t *)(data +
1590 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1591 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1592 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1593 			return true;
1594 	}
1595 	return false;
1596 }
1597 
1598 /**
1599  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1600  * @data: Pointer to network data buffer
1601  *
1602  * This api is for dns query response.
1603  *
1604  * Return: true if packet is dns response packet.
1605  *	   false otherwise.
1606  */
1607 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1608 {
1609 	uint16_t op_code;
1610 	uint16_t src_port;
1611 
1612 	src_port = (uint16_t)(*(uint16_t *)(data +
1613 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1614 	/* Standard DNS response always comes on Src Port 53. */
1615 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1616 		op_code = (uint16_t)(*(uint16_t *)(data +
1617 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1618 
1619 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1620 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1621 			return true;
1622 	}
1623 	return false;
1624 }
1625 
1626 /**
1627  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1628  * @data: Pointer to network data buffer
1629  *
1630  * This api is for tcp syn packet.
1631  *
1632  * Return: true if packet is tcp syn packet.
1633  *	   false otherwise.
1634  */
1635 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1636 {
1637 	uint8_t op_code;
1638 
1639 	op_code = (uint8_t)(*(uint8_t *)(data +
1640 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1641 
1642 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1643 		return true;
1644 	return false;
1645 }
1646 
1647 /**
1648  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1649  * @data: Pointer to network data buffer
1650  *
1651  * This api is for tcp syn ack packet.
1652  *
1653  * Return: true if packet is tcp syn ack packet.
1654  *	   false otherwise.
1655  */
1656 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1657 {
1658 	uint8_t op_code;
1659 
1660 	op_code = (uint8_t)(*(uint8_t *)(data +
1661 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1662 
1663 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1664 		return true;
1665 	return false;
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1670  * @data: Pointer to network data buffer
1671  *
1672  * This api is for tcp ack packet.
1673  *
1674  * Return: true if packet is tcp ack packet.
1675  *	   false otherwise.
1676  */
1677 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1678 {
1679 	uint8_t op_code;
1680 
1681 	op_code = (uint8_t)(*(uint8_t *)(data +
1682 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1683 
1684 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1685 		return true;
1686 	return false;
1687 }
1688 
1689 /**
1690  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1691  * @data: Pointer to network data buffer
1692  *
1693  * This api is for tcp packet.
1694  *
1695  * Return: tcp source port value.
1696  */
1697 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1698 {
1699 	uint16_t src_port;
1700 
1701 	src_port = (uint16_t)(*(uint16_t *)(data +
1702 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1703 
1704 	return src_port;
1705 }
1706 
1707 /**
1708  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1709  * @data: Pointer to network data buffer
1710  *
1711  * This api is for tcp packet.
1712  *
1713  * Return: tcp destination port value.
1714  */
1715 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1716 {
1717 	uint16_t tgt_port;
1718 
1719 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1720 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1721 
1722 	return tgt_port;
1723 }
1724 
1725 /**
1726  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1727  * @data: Pointer to network data buffer
1728  *
1729  * This api is for ipv4 req packet.
1730  *
1731  * Return: true if packet is icmpv4 request
1732  *	   false otherwise.
1733  */
1734 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1735 {
1736 	uint8_t op_code;
1737 
1738 	op_code = (uint8_t)(*(uint8_t *)(data +
1739 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1740 
1741 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1742 		return true;
1743 	return false;
1744 }
1745 
1746 /**
1747  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1748  * @data: Pointer to network data buffer
1749  *
1750  * This api is for ipv4 res packet.
1751  *
1752  * Return: true if packet is icmpv4 response
1753  *	   false otherwise.
1754  */
1755 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1756 {
1757 	uint8_t op_code;
1758 
1759 	op_code = (uint8_t)(*(uint8_t *)(data +
1760 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1761 
1762 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1763 		return true;
1764 	return false;
1765 }
1766 
1767 /**
1768  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1769  * @data: Pointer to network data buffer
1770  *
1771  * This api is for ipv4 packet.
1772  *
1773  * Return: icmpv4 packet source IP value.
1774  */
1775 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1776 {
1777 	uint32_t src_ip;
1778 
1779 	src_ip = (uint32_t)(*(uint32_t *)(data +
1780 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1781 
1782 	return src_ip;
1783 }
1784 
1785 /**
1786  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1787  * @data: Pointer to network data buffer
1788  *
1789  * This api is for ipv4 packet.
1790  *
1791  * Return: icmpv4 packet target IP value.
1792  */
1793 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1794 {
1795 	uint32_t tgt_ip;
1796 
1797 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1798 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1799 
1800 	return tgt_ip;
1801 }
1802 
1803 
1804 /**
1805  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1806  * @data: Pointer to IPV6 packet data buffer
1807  *
1808  * This func. checks whether it is a IPV6 packet or not.
1809  *
1810  * Return: TRUE if it is a IPV6 packet
1811  *         FALSE if not
1812  */
1813 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1814 {
1815 	uint16_t ether_type;
1816 
1817 	ether_type = (uint16_t)(*(uint16_t *)(data +
1818 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1819 
1820 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1821 		return true;
1822 	else
1823 		return false;
1824 }
1825 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1826 
1827 /**
1828  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1829  * @data: Pointer to network data buffer
1830  *
1831  * This api is for ipv6 packet.
1832  *
1833  * Return: true if packet is DHCP packet
1834  *	   false otherwise
1835  */
1836 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1837 {
1838 	uint16_t sport;
1839 	uint16_t dport;
1840 
1841 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1842 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1843 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1844 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1845 					sizeof(uint16_t));
1846 
1847 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1848 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1849 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1850 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1851 		return true;
1852 	else
1853 		return false;
1854 }
1855 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1856 
1857 /**
1858  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
1859  * @data: Pointer to network data buffer
1860  *
1861  * This api is for ipv6 packet.
1862  *
1863  * Return: true if packet is MDNS packet
1864  *	   false otherwise
1865  */
1866 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
1867 {
1868 	uint16_t sport;
1869 	uint16_t dport;
1870 
1871 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1872 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1873 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
1874 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1875 					sizeof(uint16_t));
1876 
1877 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
1878 	    dport == sport)
1879 		return true;
1880 	else
1881 		return false;
1882 }
1883 
1884 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
1885 
1886 /**
1887  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
1888  * @data: Pointer to IPV4 packet data buffer
1889  *
1890  * This func. checks whether it is a IPV4 multicast packet or not.
1891  *
1892  * Return: TRUE if it is a IPV4 multicast packet
1893  *         FALSE if not
1894  */
1895 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
1896 {
1897 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1898 		uint32_t *dst_addr =
1899 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
1900 
1901 		/*
1902 		 * Check first word of the IPV4 address and if it is
1903 		 * equal to 0xE then it represents multicast IP.
1904 		 */
1905 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
1906 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
1907 			return true;
1908 		else
1909 			return false;
1910 	} else
1911 		return false;
1912 }
1913 
1914 /**
1915  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
1916  * @data: Pointer to IPV6 packet data buffer
1917  *
1918  * This func. checks whether it is a IPV6 multicast packet or not.
1919  *
1920  * Return: TRUE if it is a IPV6 multicast packet
1921  *         FALSE if not
1922  */
1923 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
1924 {
1925 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1926 		uint16_t *dst_addr;
1927 
1928 		dst_addr = (uint16_t *)
1929 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
1930 
1931 		/*
1932 		 * Check first byte of the IP address and if it
1933 		 * 0xFF00 then it is a IPV6 mcast packet.
1934 		 */
1935 		if (*dst_addr ==
1936 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
1937 			return true;
1938 		else
1939 			return false;
1940 	} else
1941 		return false;
1942 }
1943 
1944 /**
1945  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
1946  * @data: Pointer to IPV4 ICMP packet data buffer
1947  *
1948  * This func. checks whether it is a ICMP packet or not.
1949  *
1950  * Return: TRUE if it is a ICMP packet
1951  *         FALSE if not
1952  */
1953 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
1954 {
1955 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1956 		uint8_t pkt_type;
1957 
1958 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1959 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1960 
1961 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
1962 			return true;
1963 		else
1964 			return false;
1965 	} else
1966 		return false;
1967 }
1968 
1969 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
1970 
1971 /**
1972  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
1973  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1974  *
1975  * This func. checks whether it is a ICMPV6 packet or not.
1976  *
1977  * Return: TRUE if it is a ICMPV6 packet
1978  *         FALSE if not
1979  */
1980 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
1981 {
1982 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1983 		uint8_t pkt_type;
1984 
1985 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1986 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1987 
1988 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1989 			return true;
1990 		else
1991 			return false;
1992 	} else
1993 		return false;
1994 }
1995 
1996 /**
1997  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
1998  * @data: Pointer to IPV4 UDP packet data buffer
1999  *
2000  * This func. checks whether it is a IPV4 UDP packet or not.
2001  *
2002  * Return: TRUE if it is a IPV4 UDP packet
2003  *         FALSE if not
2004  */
2005 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2006 {
2007 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2008 		uint8_t pkt_type;
2009 
2010 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2011 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2012 
2013 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2014 			return true;
2015 		else
2016 			return false;
2017 	} else
2018 		return false;
2019 }
2020 
2021 /**
2022  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2023  * @data: Pointer to IPV4 TCP packet data buffer
2024  *
2025  * This func. checks whether it is a IPV4 TCP packet or not.
2026  *
2027  * Return: TRUE if it is a IPV4 TCP packet
2028  *         FALSE if not
2029  */
2030 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2031 {
2032 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2033 		uint8_t pkt_type;
2034 
2035 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2036 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2037 
2038 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2039 			return true;
2040 		else
2041 			return false;
2042 	} else
2043 		return false;
2044 }
2045 
2046 /**
2047  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2048  * @data: Pointer to IPV6 UDP packet data buffer
2049  *
2050  * This func. checks whether it is a IPV6 UDP packet or not.
2051  *
2052  * Return: TRUE if it is a IPV6 UDP packet
2053  *         FALSE if not
2054  */
2055 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2056 {
2057 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2058 		uint8_t pkt_type;
2059 
2060 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2061 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2062 
2063 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2064 			return true;
2065 		else
2066 			return false;
2067 	} else
2068 		return false;
2069 }
2070 
2071 /**
2072  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2073  * @data: Pointer to IPV6 TCP packet data buffer
2074  *
2075  * This func. checks whether it is a IPV6 TCP packet or not.
2076  *
2077  * Return: TRUE if it is a IPV6 TCP packet
2078  *         FALSE if not
2079  */
2080 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2081 {
2082 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2083 		uint8_t pkt_type;
2084 
2085 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2086 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2087 
2088 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2089 			return true;
2090 		else
2091 			return false;
2092 	} else
2093 		return false;
2094 }
2095 
2096 /**
2097  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2098  * @nbuf - sk buff
2099  *
2100  * Return: true if packet is broadcast
2101  *	   false otherwise
2102  */
2103 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2104 {
2105 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2106 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2107 }
2108 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2109 
2110 #ifdef NBUF_MEMORY_DEBUG
2111 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
2112 
2113 /**
2114  * struct qdf_nbuf_track_t - Network buffer track structure
2115  *
2116  * @p_next: Pointer to next
2117  * @net_buf: Pointer to network buffer
2118  * @func_name: Function name
2119  * @line_num: Line number
2120  * @size: Size
2121  * @map_func_name: nbuf mapping function name
2122  * @map_line_num: mapping function line number
2123  * @unmap_func_name: nbuf unmapping function name
2124  * @unmap_line_num: mapping function line number
2125  * @is_nbuf_mapped: indicate mapped/unmapped nbuf
2126  */
2127 struct qdf_nbuf_track_t {
2128 	struct qdf_nbuf_track_t *p_next;
2129 	qdf_nbuf_t net_buf;
2130 	char func_name[QDF_MEM_FUNC_NAME_SIZE];
2131 	uint32_t line_num;
2132 	size_t size;
2133 	char map_func_name[QDF_MEM_FUNC_NAME_SIZE];
2134 	uint32_t map_line_num;
2135 	char unmap_func_name[QDF_MEM_FUNC_NAME_SIZE];
2136 	uint32_t unmap_line_num;
2137 	bool is_nbuf_mapped;
2138 };
2139 
2140 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2141 typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
2142 
2143 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2144 static struct kmem_cache *nbuf_tracking_cache;
2145 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2146 static spinlock_t qdf_net_buf_track_free_list_lock;
2147 static uint32_t qdf_net_buf_track_free_list_count;
2148 static uint32_t qdf_net_buf_track_used_list_count;
2149 static uint32_t qdf_net_buf_track_max_used;
2150 static uint32_t qdf_net_buf_track_max_free;
2151 static uint32_t qdf_net_buf_track_max_allocated;
2152 
2153 /**
2154  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2155  *
2156  * tracks the max number of network buffers that the wlan driver was tracking
2157  * at any one time.
2158  *
2159  * Return: none
2160  */
2161 static inline void update_max_used(void)
2162 {
2163 	int sum;
2164 
2165 	if (qdf_net_buf_track_max_used <
2166 	    qdf_net_buf_track_used_list_count)
2167 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2168 	sum = qdf_net_buf_track_free_list_count +
2169 		qdf_net_buf_track_used_list_count;
2170 	if (qdf_net_buf_track_max_allocated < sum)
2171 		qdf_net_buf_track_max_allocated = sum;
2172 }
2173 
2174 /**
2175  * update_max_free() - update qdf_net_buf_track_free_list_count
2176  *
2177  * tracks the max number tracking buffers kept in the freelist.
2178  *
2179  * Return: none
2180  */
2181 static inline void update_max_free(void)
2182 {
2183 	if (qdf_net_buf_track_max_free <
2184 	    qdf_net_buf_track_free_list_count)
2185 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2186 }
2187 
2188 /**
2189  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2190  *
2191  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2192  * This function also ads fexibility to adjust the allocation and freelist
2193  * scheems.
2194  *
2195  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2196  */
2197 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2198 {
2199 	int flags = GFP_KERNEL;
2200 	unsigned long irq_flag;
2201 	QDF_NBUF_TRACK *new_node = NULL;
2202 
2203 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2204 	qdf_net_buf_track_used_list_count++;
2205 	if (qdf_net_buf_track_free_list) {
2206 		new_node = qdf_net_buf_track_free_list;
2207 		qdf_net_buf_track_free_list =
2208 			qdf_net_buf_track_free_list->p_next;
2209 		qdf_net_buf_track_free_list_count--;
2210 	}
2211 	update_max_used();
2212 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2213 
2214 	if (new_node)
2215 		return new_node;
2216 
2217 	if (in_interrupt() || irqs_disabled() || in_atomic())
2218 		flags = GFP_ATOMIC;
2219 
2220 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2221 }
2222 
2223 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2224 #define FREEQ_POOLSIZE 2048
2225 
2226 /**
2227  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2228  *
2229  * Matches calls to qdf_nbuf_track_alloc.
2230  * Either frees the tracking cookie to kernel or an internal
2231  * freelist based on the size of the freelist.
2232  *
2233  * Return: none
2234  */
2235 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2236 {
2237 	unsigned long irq_flag;
2238 
2239 	if (!node)
2240 		return;
2241 
2242 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2243 	 * only shrink the freelist if it is bigger than twice the number of
2244 	 * nbufs in use. If the driver is stalling in a consistent bursty
2245 	 * fasion, this will keep 3/4 of thee allocations from the free list
2246 	 * while also allowing the system to recover memory as less frantic
2247 	 * traffic occurs.
2248 	 */
2249 
2250 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2251 
2252 	qdf_net_buf_track_used_list_count--;
2253 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2254 	   (qdf_net_buf_track_free_list_count >
2255 	    qdf_net_buf_track_used_list_count << 1)) {
2256 		kmem_cache_free(nbuf_tracking_cache, node);
2257 	} else {
2258 		node->p_next = qdf_net_buf_track_free_list;
2259 		qdf_net_buf_track_free_list = node;
2260 		qdf_net_buf_track_free_list_count++;
2261 	}
2262 	update_max_free();
2263 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2264 }
2265 
2266 /**
2267  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2268  *
2269  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2270  * the freelist first makes it performant for the first iperf udp burst
2271  * as well as steady state.
2272  *
2273  * Return: None
2274  */
2275 static void qdf_nbuf_track_prefill(void)
2276 {
2277 	int i;
2278 	QDF_NBUF_TRACK *node, *head;
2279 
2280 	/* prepopulate the freelist */
2281 	head = NULL;
2282 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2283 		node = qdf_nbuf_track_alloc();
2284 		if (!node)
2285 			continue;
2286 		node->p_next = head;
2287 		head = node;
2288 	}
2289 	while (head) {
2290 		node = head->p_next;
2291 		qdf_nbuf_track_free(head);
2292 		head = node;
2293 	}
2294 
2295 	/* prefilled buffers should not count as used */
2296 	qdf_net_buf_track_max_used = 0;
2297 }
2298 
2299 /**
2300  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2301  *
2302  * This initializes the memory manager for the nbuf tracking cookies.  Because
2303  * these cookies are all the same size and only used in this feature, we can
2304  * use a kmem_cache to provide tracking as well as to speed up allocations.
2305  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2306  * features) a freelist is prepopulated here.
2307  *
2308  * Return: None
2309  */
2310 static void qdf_nbuf_track_memory_manager_create(void)
2311 {
2312 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2313 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2314 						sizeof(QDF_NBUF_TRACK),
2315 						0, 0, NULL);
2316 
2317 	qdf_nbuf_track_prefill();
2318 }
2319 
2320 /**
2321  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2322  *
2323  * Empty the freelist and print out usage statistics when it is no longer
2324  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2325  * any nbuf tracking cookies were leaked.
2326  *
2327  * Return: None
2328  */
2329 static void qdf_nbuf_track_memory_manager_destroy(void)
2330 {
2331 	QDF_NBUF_TRACK *node, *tmp;
2332 	unsigned long irq_flag;
2333 
2334 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2335 	node = qdf_net_buf_track_free_list;
2336 
2337 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2338 		qdf_print("%s: unexpectedly large max_used count %d",
2339 			  __func__, qdf_net_buf_track_max_used);
2340 
2341 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2342 		qdf_print("%s: %d unused trackers were allocated",
2343 			  __func__,
2344 			  qdf_net_buf_track_max_allocated -
2345 			  qdf_net_buf_track_max_used);
2346 
2347 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2348 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2349 		qdf_print("%s: check freelist shrinking functionality",
2350 			  __func__);
2351 
2352 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2353 		  "%s: %d residual freelist size",
2354 		  __func__, qdf_net_buf_track_free_list_count);
2355 
2356 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2357 		  "%s: %d max freelist size observed",
2358 		  __func__, qdf_net_buf_track_max_free);
2359 
2360 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2361 		  "%s: %d max buffers used observed",
2362 		  __func__, qdf_net_buf_track_max_used);
2363 
2364 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2365 		  "%s: %d max buffers allocated observed",
2366 		  __func__, qdf_net_buf_track_max_allocated);
2367 
2368 	while (node) {
2369 		tmp = node;
2370 		node = node->p_next;
2371 		kmem_cache_free(nbuf_tracking_cache, tmp);
2372 		qdf_net_buf_track_free_list_count--;
2373 	}
2374 
2375 	if (qdf_net_buf_track_free_list_count != 0)
2376 		qdf_info("%d unfreed tracking memory lost in freelist",
2377 			 qdf_net_buf_track_free_list_count);
2378 
2379 	if (qdf_net_buf_track_used_list_count != 0)
2380 		qdf_info("%d unfreed tracking memory still in use",
2381 			 qdf_net_buf_track_used_list_count);
2382 
2383 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2384 	kmem_cache_destroy(nbuf_tracking_cache);
2385 	qdf_net_buf_track_free_list = NULL;
2386 }
2387 
2388 /**
2389  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2390  *
2391  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2392  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2393  * WLAN driver module whose allocated SKB is freed by network stack are
2394  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2395  * reported as memory leak.
2396  *
2397  * Return: none
2398  */
2399 void qdf_net_buf_debug_init(void)
2400 {
2401 	uint32_t i;
2402 
2403 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2404 
2405 	if (is_initial_mem_debug_disabled)
2406 		return;
2407 
2408 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2409 
2410 	qdf_nbuf_map_tracking_init();
2411 	qdf_nbuf_track_memory_manager_create();
2412 
2413 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2414 		gp_qdf_net_buf_track_tbl[i] = NULL;
2415 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2416 	}
2417 }
2418 qdf_export_symbol(qdf_net_buf_debug_init);
2419 
2420 /**
2421  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2422  *
2423  * Exit network buffer tracking debug functionality and log SKB memory leaks
2424  * As part of exiting the functionality, free the leaked memory and
2425  * cleanup the tracking buffers.
2426  *
2427  * Return: none
2428  */
2429 void qdf_net_buf_debug_exit(void)
2430 {
2431 	uint32_t i;
2432 	uint32_t count = 0;
2433 	unsigned long irq_flag;
2434 	QDF_NBUF_TRACK *p_node;
2435 	QDF_NBUF_TRACK *p_prev;
2436 
2437 	if (is_initial_mem_debug_disabled)
2438 		return;
2439 
2440 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2441 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2442 		p_node = gp_qdf_net_buf_track_tbl[i];
2443 		while (p_node) {
2444 			p_prev = p_node;
2445 			p_node = p_node->p_next;
2446 			count++;
2447 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2448 				 p_prev->func_name, p_prev->line_num,
2449 				 p_prev->size, p_prev->net_buf);
2450 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2451 				 p_prev->map_func_name,
2452 				 p_prev->map_line_num,
2453 				 p_prev->unmap_func_name,
2454 				 p_prev->unmap_line_num,
2455 				 p_prev->is_nbuf_mapped);
2456 			qdf_nbuf_track_free(p_prev);
2457 		}
2458 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2459 	}
2460 
2461 	qdf_nbuf_track_memory_manager_destroy();
2462 	qdf_nbuf_map_tracking_deinit();
2463 
2464 #ifdef CONFIG_HALT_KMEMLEAK
2465 	if (count) {
2466 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2467 		QDF_BUG(0);
2468 	}
2469 #endif
2470 }
2471 qdf_export_symbol(qdf_net_buf_debug_exit);
2472 
2473 /**
2474  * qdf_net_buf_debug_hash() - hash network buffer pointer
2475  *
2476  * Return: hash value
2477  */
2478 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2479 {
2480 	uint32_t i;
2481 
2482 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2483 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2484 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2485 
2486 	return i;
2487 }
2488 
2489 /**
2490  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2491  *
2492  * Return: If skb is found in hash table then return pointer to network buffer
2493  *	else return %NULL
2494  */
2495 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2496 {
2497 	uint32_t i;
2498 	QDF_NBUF_TRACK *p_node;
2499 
2500 	i = qdf_net_buf_debug_hash(net_buf);
2501 	p_node = gp_qdf_net_buf_track_tbl[i];
2502 
2503 	while (p_node) {
2504 		if (p_node->net_buf == net_buf)
2505 			return p_node;
2506 		p_node = p_node->p_next;
2507 	}
2508 
2509 	return NULL;
2510 }
2511 
2512 /**
2513  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2514  *
2515  * Return: none
2516  */
2517 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2518 				const char *func_name, uint32_t line_num)
2519 {
2520 	uint32_t i;
2521 	unsigned long irq_flag;
2522 	QDF_NBUF_TRACK *p_node;
2523 	QDF_NBUF_TRACK *new_node;
2524 
2525 	if (is_initial_mem_debug_disabled)
2526 		return;
2527 
2528 	new_node = qdf_nbuf_track_alloc();
2529 
2530 	i = qdf_net_buf_debug_hash(net_buf);
2531 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2532 
2533 	p_node = qdf_net_buf_debug_look_up(net_buf);
2534 
2535 	if (p_node) {
2536 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2537 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2538 			  net_buf, func_name, line_num);
2539 		qdf_nbuf_track_free(new_node);
2540 	} else {
2541 		p_node = new_node;
2542 		if (p_node) {
2543 			p_node->net_buf = net_buf;
2544 			qdf_str_lcopy(p_node->func_name, func_name,
2545 				      QDF_MEM_FUNC_NAME_SIZE);
2546 			p_node->line_num = line_num;
2547 			p_node->is_nbuf_mapped = false;
2548 			p_node->map_line_num = 0;
2549 			p_node->unmap_line_num = 0;
2550 			p_node->map_func_name[0] = '\0';
2551 			p_node->unmap_func_name[0] = '\0';
2552 			p_node->size = size;
2553 			qdf_mem_skb_inc(size);
2554 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2555 			gp_qdf_net_buf_track_tbl[i] = p_node;
2556 		} else
2557 			qdf_print(
2558 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2559 				  func_name, line_num, size);
2560 	}
2561 
2562 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2563 }
2564 qdf_export_symbol(qdf_net_buf_debug_add_node);
2565 
2566 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2567 				   uint32_t line_num)
2568 {
2569 	uint32_t i;
2570 	unsigned long irq_flag;
2571 	QDF_NBUF_TRACK *p_node;
2572 
2573 	if (is_initial_mem_debug_disabled)
2574 		return;
2575 
2576 	i = qdf_net_buf_debug_hash(net_buf);
2577 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2578 
2579 	p_node = qdf_net_buf_debug_look_up(net_buf);
2580 
2581 	if (p_node) {
2582 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2583 			      QDF_MEM_FUNC_NAME_SIZE);
2584 		p_node->line_num = line_num;
2585 	}
2586 
2587 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2588 }
2589 
2590 qdf_export_symbol(qdf_net_buf_debug_update_node);
2591 
2592 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
2593 				       const char *func_name,
2594 				       uint32_t line_num)
2595 {
2596 	uint32_t i;
2597 	unsigned long irq_flag;
2598 	QDF_NBUF_TRACK *p_node;
2599 
2600 	if (is_initial_mem_debug_disabled)
2601 		return;
2602 
2603 	i = qdf_net_buf_debug_hash(net_buf);
2604 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2605 
2606 	p_node = qdf_net_buf_debug_look_up(net_buf);
2607 
2608 	if (p_node) {
2609 		qdf_str_lcopy(p_node->map_func_name, func_name,
2610 			      QDF_MEM_FUNC_NAME_SIZE);
2611 		p_node->map_line_num = line_num;
2612 		p_node->is_nbuf_mapped = true;
2613 	}
2614 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2615 }
2616 
2617 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
2618 					 const char *func_name,
2619 					 uint32_t line_num)
2620 {
2621 	uint32_t i;
2622 	unsigned long irq_flag;
2623 	QDF_NBUF_TRACK *p_node;
2624 
2625 	if (is_initial_mem_debug_disabled)
2626 		return;
2627 
2628 	i = qdf_net_buf_debug_hash(net_buf);
2629 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2630 
2631 	p_node = qdf_net_buf_debug_look_up(net_buf);
2632 
2633 	if (p_node) {
2634 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
2635 			      QDF_MEM_FUNC_NAME_SIZE);
2636 		p_node->unmap_line_num = line_num;
2637 		p_node->is_nbuf_mapped = false;
2638 	}
2639 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2640 }
2641 
2642 /**
2643  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2644  *
2645  * Return: none
2646  */
2647 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2648 {
2649 	uint32_t i;
2650 	QDF_NBUF_TRACK *p_head;
2651 	QDF_NBUF_TRACK *p_node = NULL;
2652 	unsigned long irq_flag;
2653 	QDF_NBUF_TRACK *p_prev;
2654 
2655 	if (is_initial_mem_debug_disabled)
2656 		return;
2657 
2658 	i = qdf_net_buf_debug_hash(net_buf);
2659 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2660 
2661 	p_head = gp_qdf_net_buf_track_tbl[i];
2662 
2663 	/* Unallocated SKB */
2664 	if (!p_head)
2665 		goto done;
2666 
2667 	p_node = p_head;
2668 	/* Found at head of the table */
2669 	if (p_head->net_buf == net_buf) {
2670 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2671 		goto done;
2672 	}
2673 
2674 	/* Search in collision list */
2675 	while (p_node) {
2676 		p_prev = p_node;
2677 		p_node = p_node->p_next;
2678 		if ((p_node) && (p_node->net_buf == net_buf)) {
2679 			p_prev->p_next = p_node->p_next;
2680 			break;
2681 		}
2682 	}
2683 
2684 done:
2685 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2686 
2687 	if (p_node) {
2688 		qdf_mem_skb_dec(p_node->size);
2689 		qdf_nbuf_track_free(p_node);
2690 	} else {
2691 		QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
2692 				   net_buf);
2693 	}
2694 }
2695 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2696 
2697 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2698 				   const char *func_name, uint32_t line_num)
2699 {
2700 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2701 
2702 	if (is_initial_mem_debug_disabled)
2703 		return;
2704 
2705 	while (ext_list) {
2706 		/*
2707 		 * Take care to add if it is Jumbo packet connected using
2708 		 * frag_list
2709 		 */
2710 		qdf_nbuf_t next;
2711 
2712 		next = qdf_nbuf_queue_next(ext_list);
2713 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2714 		ext_list = next;
2715 	}
2716 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2717 }
2718 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2719 
2720 /**
2721  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2722  * @net_buf: Network buf holding head segment (single)
2723  *
2724  * WLAN driver module whose allocated SKB is freed by network stack are
2725  * suppose to call this API before returning SKB to network stack such
2726  * that the SKB is not reported as memory leak.
2727  *
2728  * Return: none
2729  */
2730 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2731 {
2732 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2733 
2734 	if (is_initial_mem_debug_disabled)
2735 		return;
2736 
2737 	while (ext_list) {
2738 		/*
2739 		 * Take care to free if it is Jumbo packet connected using
2740 		 * frag_list
2741 		 */
2742 		qdf_nbuf_t next;
2743 
2744 		next = qdf_nbuf_queue_next(ext_list);
2745 
2746 		if (qdf_nbuf_get_users(ext_list) > 1) {
2747 			ext_list = next;
2748 			continue;
2749 		}
2750 
2751 		qdf_net_buf_debug_delete_node(ext_list);
2752 		ext_list = next;
2753 	}
2754 
2755 	if (qdf_nbuf_get_users(net_buf) > 1)
2756 		return;
2757 
2758 	qdf_net_buf_debug_delete_node(net_buf);
2759 }
2760 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2761 
2762 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2763 				int reserve, int align, int prio,
2764 				const char *func, uint32_t line)
2765 {
2766 	qdf_nbuf_t nbuf;
2767 
2768 	if (is_initial_mem_debug_disabled)
2769 		return __qdf_nbuf_alloc(osdev, size,
2770 					reserve, align,
2771 					prio, func, line);
2772 
2773 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2774 
2775 	/* Store SKB in internal QDF tracking table */
2776 	if (qdf_likely(nbuf)) {
2777 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2778 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2779 	} else {
2780 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2781 	}
2782 
2783 	return nbuf;
2784 }
2785 qdf_export_symbol(qdf_nbuf_alloc_debug);
2786 
2787 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
2788 					    const char *func, uint32_t line)
2789 {
2790 	qdf_nbuf_t nbuf;
2791 
2792 	if (is_initial_mem_debug_disabled)
2793 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
2794 						    line);
2795 
2796 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
2797 
2798 	/* Store SKB in internal QDF tracking table */
2799 	if (qdf_likely(nbuf)) {
2800 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2801 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2802 	} else {
2803 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2804 	}
2805 
2806 	return nbuf;
2807 }
2808 
2809 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
2810 
2811 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2812 {
2813 	qdf_nbuf_t ext_list;
2814 
2815 	if (qdf_unlikely(!nbuf))
2816 		return;
2817 
2818 	if (is_initial_mem_debug_disabled)
2819 		goto free_buf;
2820 
2821 	if (qdf_nbuf_get_users(nbuf) > 1)
2822 		goto free_buf;
2823 
2824 	/* Remove SKB from internal QDF tracking table */
2825 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2826 	qdf_net_buf_debug_delete_node(nbuf);
2827 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2828 
2829 	/* Take care to delete the debug entries for frag_list */
2830 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2831 	while (ext_list) {
2832 		if (qdf_nbuf_get_users(ext_list) == 1) {
2833 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2834 			qdf_net_buf_debug_delete_node(ext_list);
2835 		}
2836 
2837 		ext_list = qdf_nbuf_queue_next(ext_list);
2838 	}
2839 
2840 free_buf:
2841 	__qdf_nbuf_free(nbuf);
2842 }
2843 qdf_export_symbol(qdf_nbuf_free_debug);
2844 
2845 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2846 {
2847 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2848 
2849 	if (is_initial_mem_debug_disabled)
2850 		return cloned_buf;
2851 
2852 	if (qdf_unlikely(!cloned_buf))
2853 		return NULL;
2854 
2855 	/* Store SKB in internal QDF tracking table */
2856 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
2857 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
2858 
2859 	return cloned_buf;
2860 }
2861 qdf_export_symbol(qdf_nbuf_clone_debug);
2862 
2863 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2864 {
2865 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
2866 
2867 	if (is_initial_mem_debug_disabled)
2868 		return copied_buf;
2869 
2870 	if (qdf_unlikely(!copied_buf))
2871 		return NULL;
2872 
2873 	/* Store SKB in internal QDF tracking table */
2874 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2875 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
2876 
2877 	return copied_buf;
2878 }
2879 qdf_export_symbol(qdf_nbuf_copy_debug);
2880 
2881 qdf_nbuf_t
2882 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
2883 			   const char *func, uint32_t line)
2884 {
2885 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
2886 
2887 	if (qdf_unlikely(!copied_buf))
2888 		return NULL;
2889 
2890 	if (is_initial_mem_debug_disabled)
2891 		return copied_buf;
2892 
2893 	/* Store SKB in internal QDF tracking table */
2894 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
2895 	qdf_nbuf_history_add(copied_buf, func, line,
2896 			     QDF_NBUF_ALLOC_COPY_EXPAND);
2897 
2898 	return copied_buf;
2899 }
2900 
2901 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
2902 
2903 #endif /* NBUF_MEMORY_DEBUG */
2904 
2905 #if defined(FEATURE_TSO)
2906 
2907 /**
2908  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
2909  *
2910  * @ethproto: ethernet type of the msdu
2911  * @ip_tcp_hdr_len: ip + tcp length for the msdu
2912  * @l2_len: L2 length for the msdu
2913  * @eit_hdr: pointer to EIT header
2914  * @eit_hdr_len: EIT header length for the msdu
2915  * @eit_hdr_dma_map_addr: dma addr for EIT header
2916  * @tcphdr: pointer to tcp header
2917  * @ipv4_csum_en: ipv4 checksum enable
2918  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
2919  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
2920  * @ip_id: IP id
2921  * @tcp_seq_num: TCP sequence number
2922  *
2923  * This structure holds the TSO common info that is common
2924  * across all the TCP segments of the jumbo packet.
2925  */
2926 struct qdf_tso_cmn_seg_info_t {
2927 	uint16_t ethproto;
2928 	uint16_t ip_tcp_hdr_len;
2929 	uint16_t l2_len;
2930 	uint8_t *eit_hdr;
2931 	uint32_t eit_hdr_len;
2932 	qdf_dma_addr_t eit_hdr_dma_map_addr;
2933 	struct tcphdr *tcphdr;
2934 	uint16_t ipv4_csum_en;
2935 	uint16_t tcp_ipv4_csum_en;
2936 	uint16_t tcp_ipv6_csum_en;
2937 	uint16_t ip_id;
2938 	uint32_t tcp_seq_num;
2939 };
2940 
2941 /**
2942  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
2943  *
2944  * @skb: network buffer
2945  *
2946  * Return: byte offset length of 8 bytes aligned.
2947  */
2948 #ifdef WAR_TXDMA_LIMITATION
2949 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
2950 {
2951 	uint32_t eit_hdr_len;
2952 	uint8_t *eit_hdr;
2953 	uint8_t byte_8_align_offset;
2954 
2955 	/*
2956 	 * Workaround for TXDMA HW limitation.
2957 	 * ADDR0&0x1FFFFFFF8 should not equal ADDR1&0x1FFFFFFF8.
2958 	 * Otherwise, TXDMA will run into exception, which cause TX fail.
2959 	 * ADDR0: the address of last words in previous buffer;
2960 	 * ADDR1: the address of first words in next buffer;
2961 	 * To avoid this, shift several bytes for ADDR0.
2962 	 */
2963 	eit_hdr = skb->data;
2964 	eit_hdr_len = (skb_transport_header(skb)
2965 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
2966 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
2967 	if (qdf_unlikely(byte_8_align_offset)) {
2968 		TSO_DEBUG("%pK,Len %d %d",
2969 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
2970 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
2971 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
2972 				  __LINE__, skb->head, skb->data,
2973 				 byte_8_align_offset);
2974 			return 0;
2975 		}
2976 		qdf_nbuf_push_head(skb, byte_8_align_offset);
2977 		qdf_mem_move(skb->data,
2978 			     skb->data + byte_8_align_offset,
2979 			     eit_hdr_len);
2980 		skb->len -= byte_8_align_offset;
2981 		skb->mac_header -= byte_8_align_offset;
2982 		skb->network_header -= byte_8_align_offset;
2983 		skb->transport_header -= byte_8_align_offset;
2984 	}
2985 	return byte_8_align_offset;
2986 }
2987 #else
2988 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
2989 {
2990 	return 0;
2991 }
2992 #endif
2993 
2994 /**
2995  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
2996  * information
2997  * @osdev: qdf device handle
2998  * @skb: skb buffer
2999  * @tso_info: Parameters common to all segements
3000  *
3001  * Get the TSO information that is common across all the TCP
3002  * segments of the jumbo packet
3003  *
3004  * Return: 0 - success 1 - failure
3005  */
3006 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3007 			struct sk_buff *skb,
3008 			struct qdf_tso_cmn_seg_info_t *tso_info)
3009 {
3010 	/* Get ethernet type and ethernet header length */
3011 	tso_info->ethproto = vlan_get_protocol(skb);
3012 
3013 	/* Determine whether this is an IPv4 or IPv6 packet */
3014 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3015 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3016 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3017 
3018 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3019 		tso_info->ipv4_csum_en = 1;
3020 		tso_info->tcp_ipv4_csum_en = 1;
3021 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3022 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3023 				ipv4_hdr->protocol);
3024 			return 1;
3025 		}
3026 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3027 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3028 		tso_info->tcp_ipv6_csum_en = 1;
3029 	} else {
3030 		qdf_err("TSO: ethertype 0x%x is not supported!",
3031 			tso_info->ethproto);
3032 		return 1;
3033 	}
3034 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3035 	tso_info->tcphdr = tcp_hdr(skb);
3036 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3037 	/* get pointer to the ethernet + IP + TCP header and their length */
3038 	tso_info->eit_hdr = skb->data;
3039 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3040 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3041 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
3042 							tso_info->eit_hdr,
3043 							tso_info->eit_hdr_len,
3044 							DMA_TO_DEVICE);
3045 	if (unlikely(dma_mapping_error(osdev->dev,
3046 				       tso_info->eit_hdr_dma_map_addr))) {
3047 		qdf_err("DMA mapping error!");
3048 		qdf_assert(0);
3049 		return 1;
3050 	}
3051 
3052 	if (tso_info->ethproto == htons(ETH_P_IP)) {
3053 		/* inlcude IPv4 header length for IPV4 (total length) */
3054 		tso_info->ip_tcp_hdr_len =
3055 			tso_info->eit_hdr_len - tso_info->l2_len;
3056 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
3057 		/* exclude IPv6 header length for IPv6 (payload length) */
3058 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3059 	}
3060 	/*
3061 	 * The length of the payload (application layer data) is added to
3062 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3063 	 * descriptor.
3064 	 */
3065 
3066 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3067 		tso_info->tcp_seq_num,
3068 		tso_info->eit_hdr_len,
3069 		tso_info->l2_len,
3070 		skb->len);
3071 	return 0;
3072 }
3073 
3074 
3075 /**
3076  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3077  *
3078  * @curr_seg: Segment whose contents are initialized
3079  * @tso_cmn_info: Parameters common to all segements
3080  *
3081  * Return: None
3082  */
3083 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3084 				struct qdf_tso_seg_elem_t *curr_seg,
3085 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3086 {
3087 	/* Initialize the flags to 0 */
3088 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3089 
3090 	/*
3091 	 * The following fields remain the same across all segments of
3092 	 * a jumbo packet
3093 	 */
3094 	curr_seg->seg.tso_flags.tso_enable = 1;
3095 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3096 		tso_cmn_info->ipv4_csum_en;
3097 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3098 		tso_cmn_info->tcp_ipv6_csum_en;
3099 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3100 		tso_cmn_info->tcp_ipv4_csum_en;
3101 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3102 
3103 	/* The following fields change for the segments */
3104 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3105 	tso_cmn_info->ip_id++;
3106 
3107 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3108 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3109 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3110 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3111 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3112 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3113 
3114 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3115 
3116 	/*
3117 	 * First fragment for each segment always contains the ethernet,
3118 	 * IP and TCP header
3119 	 */
3120 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3121 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3122 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3123 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3124 
3125 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3126 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3127 		   tso_cmn_info->eit_hdr_len,
3128 		   curr_seg->seg.tso_flags.tcp_seq_num,
3129 		   curr_seg->seg.total_len);
3130 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3131 }
3132 
3133 /**
3134  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3135  * into segments
3136  * @nbuf: network buffer to be segmented
3137  * @tso_info: This is the output. The information about the
3138  *           TSO segments will be populated within this.
3139  *
3140  * This function fragments a TCP jumbo packet into smaller
3141  * segments to be transmitted by the driver. It chains the TSO
3142  * segments created into a list.
3143  *
3144  * Return: number of TSO segments
3145  */
3146 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3147 		struct qdf_tso_info_t *tso_info)
3148 {
3149 	/* common across all segments */
3150 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3151 	/* segment specific */
3152 	void *tso_frag_vaddr;
3153 	qdf_dma_addr_t tso_frag_paddr = 0;
3154 	uint32_t num_seg = 0;
3155 	struct qdf_tso_seg_elem_t *curr_seg;
3156 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3157 	skb_frag_t *frag = NULL;
3158 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3159 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3160 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3161 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3162 	int j = 0; /* skb fragment index */
3163 	uint8_t byte_8_align_offset;
3164 
3165 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3166 	total_num_seg = tso_info->tso_num_seg_list;
3167 	curr_seg = tso_info->tso_seg_list;
3168 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3169 
3170 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3171 
3172 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3173 						skb, &tso_cmn_info))) {
3174 		qdf_warn("TSO: error getting common segment info");
3175 		return 0;
3176 	}
3177 
3178 	/* length of the first chunk of data in the skb */
3179 	skb_frag_len = skb_headlen(skb);
3180 
3181 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3182 	/* update the remaining skb fragment length and TSO segment length */
3183 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3184 	skb_proc -= tso_cmn_info.eit_hdr_len;
3185 
3186 	/* get the address to the next tso fragment */
3187 	tso_frag_vaddr = skb->data +
3188 			 tso_cmn_info.eit_hdr_len +
3189 			 byte_8_align_offset;
3190 	/* get the length of the next tso fragment */
3191 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3192 
3193 	if (tso_frag_len != 0) {
3194 		tso_frag_paddr = dma_map_single(osdev->dev,
3195 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
3196 	}
3197 
3198 	if (unlikely(dma_mapping_error(osdev->dev,
3199 					tso_frag_paddr))) {
3200 		qdf_err("DMA mapping error!");
3201 		qdf_assert(0);
3202 		return 0;
3203 	}
3204 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3205 		__LINE__, skb_frag_len, tso_frag_len);
3206 	num_seg = tso_info->num_segs;
3207 	tso_info->num_segs = 0;
3208 	tso_info->is_tso = 1;
3209 
3210 	while (num_seg && curr_seg) {
3211 		int i = 1; /* tso fragment index */
3212 		uint8_t more_tso_frags = 1;
3213 
3214 		curr_seg->seg.num_frags = 0;
3215 		tso_info->num_segs++;
3216 		total_num_seg->num_seg.tso_cmn_num_seg++;
3217 
3218 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3219 						 &tso_cmn_info);
3220 
3221 		/* If TCP PSH flag is set, set it in the last or only segment */
3222 		if (num_seg == 1)
3223 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3224 
3225 		if (unlikely(skb_proc == 0))
3226 			return tso_info->num_segs;
3227 
3228 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3229 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3230 		/* frag len is added to ip_len in while loop below*/
3231 
3232 		curr_seg->seg.num_frags++;
3233 
3234 		while (more_tso_frags) {
3235 			if (tso_frag_len != 0) {
3236 				curr_seg->seg.tso_frags[i].vaddr =
3237 					tso_frag_vaddr;
3238 				curr_seg->seg.tso_frags[i].length =
3239 					tso_frag_len;
3240 				curr_seg->seg.total_len += tso_frag_len;
3241 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3242 				curr_seg->seg.num_frags++;
3243 				skb_proc = skb_proc - tso_frag_len;
3244 
3245 				/* increment the TCP sequence number */
3246 
3247 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3248 				curr_seg->seg.tso_frags[i].paddr =
3249 					tso_frag_paddr;
3250 			}
3251 
3252 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3253 					__func__, __LINE__,
3254 					i,
3255 					tso_frag_len,
3256 					curr_seg->seg.total_len,
3257 					curr_seg->seg.tso_frags[i].vaddr);
3258 
3259 			/* if there is no more data left in the skb */
3260 			if (!skb_proc)
3261 				return tso_info->num_segs;
3262 
3263 			/* get the next payload fragment information */
3264 			/* check if there are more fragments in this segment */
3265 			if (tso_frag_len < tso_seg_size) {
3266 				more_tso_frags = 1;
3267 				if (tso_frag_len != 0) {
3268 					tso_seg_size = tso_seg_size -
3269 						tso_frag_len;
3270 					i++;
3271 					if (curr_seg->seg.num_frags ==
3272 								FRAG_NUM_MAX) {
3273 						more_tso_frags = 0;
3274 						/*
3275 						 * reset i and the tso
3276 						 * payload size
3277 						 */
3278 						i = 1;
3279 						tso_seg_size =
3280 							skb_shinfo(skb)->
3281 								gso_size;
3282 					}
3283 				}
3284 			} else {
3285 				more_tso_frags = 0;
3286 				/* reset i and the tso payload size */
3287 				i = 1;
3288 				tso_seg_size = skb_shinfo(skb)->gso_size;
3289 			}
3290 
3291 			/* if the next fragment is contiguous */
3292 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3293 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3294 				skb_frag_len = skb_frag_len - tso_frag_len;
3295 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3296 
3297 			} else { /* the next fragment is not contiguous */
3298 				if (skb_shinfo(skb)->nr_frags == 0) {
3299 					qdf_info("TSO: nr_frags == 0!");
3300 					qdf_assert(0);
3301 					return 0;
3302 				}
3303 				if (j >= skb_shinfo(skb)->nr_frags) {
3304 					qdf_info("TSO: nr_frags %d j %d",
3305 						 skb_shinfo(skb)->nr_frags, j);
3306 					qdf_assert(0);
3307 					return 0;
3308 				}
3309 				frag = &skb_shinfo(skb)->frags[j];
3310 				skb_frag_len = skb_frag_size(frag);
3311 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3312 				tso_frag_vaddr = skb_frag_address_safe(frag);
3313 				j++;
3314 			}
3315 
3316 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3317 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3318 				tso_seg_size);
3319 
3320 			if (!(tso_frag_vaddr)) {
3321 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3322 						__func__);
3323 				return 0;
3324 			}
3325 
3326 			tso_frag_paddr =
3327 					 dma_map_single(osdev->dev,
3328 						 tso_frag_vaddr,
3329 						 tso_frag_len,
3330 						 DMA_TO_DEVICE);
3331 			if (unlikely(dma_mapping_error(osdev->dev,
3332 							tso_frag_paddr))) {
3333 				qdf_err("DMA mapping error!");
3334 				qdf_assert(0);
3335 				return 0;
3336 			}
3337 		}
3338 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3339 				curr_seg->seg.tso_flags.tcp_seq_num);
3340 		num_seg--;
3341 		/* if TCP FIN flag was set, set it in the last segment */
3342 		if (!num_seg)
3343 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3344 
3345 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3346 		curr_seg = curr_seg->next;
3347 	}
3348 	return tso_info->num_segs;
3349 }
3350 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3351 
3352 /**
3353  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3354  *
3355  * @osdev: qdf device handle
3356  * @tso_seg: TSO segment element to be unmapped
3357  * @is_last_seg: whether this is last tso seg or not
3358  *
3359  * Return: none
3360  */
3361 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3362 			  struct qdf_tso_seg_elem_t *tso_seg,
3363 			  bool is_last_seg)
3364 {
3365 	uint32_t num_frags = 0;
3366 
3367 	if (tso_seg->seg.num_frags > 0)
3368 		num_frags = tso_seg->seg.num_frags - 1;
3369 
3370 	/*Num of frags in a tso seg cannot be less than 2 */
3371 	if (num_frags < 1) {
3372 		/*
3373 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3374 		 * this may happen when qdf_nbuf_get_tso_info failed,
3375 		 * do dma unmap for the 0th frag in this seg.
3376 		 */
3377 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3378 			goto last_seg_free_first_frag;
3379 
3380 		qdf_assert(0);
3381 		qdf_err("ERROR: num of frags in a tso segment is %d",
3382 			(num_frags + 1));
3383 		return;
3384 	}
3385 
3386 	while (num_frags) {
3387 		/*Do dma unmap the tso seg except the 0th frag */
3388 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3389 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3390 				num_frags);
3391 			qdf_assert(0);
3392 			return;
3393 		}
3394 		dma_unmap_single(osdev->dev,
3395 				 tso_seg->seg.tso_frags[num_frags].paddr,
3396 				 tso_seg->seg.tso_frags[num_frags].length,
3397 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3398 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3399 		num_frags--;
3400 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3401 	}
3402 
3403 last_seg_free_first_frag:
3404 	if (is_last_seg) {
3405 		/*Do dma unmap for the tso seg 0th frag */
3406 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3407 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3408 			qdf_assert(0);
3409 			return;
3410 		}
3411 		dma_unmap_single(osdev->dev,
3412 				 tso_seg->seg.tso_frags[0].paddr,
3413 				 tso_seg->seg.tso_frags[0].length,
3414 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3415 		tso_seg->seg.tso_frags[0].paddr = 0;
3416 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3417 	}
3418 }
3419 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3420 
3421 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3422 {
3423 	size_t packet_len;
3424 
3425 	packet_len = skb->len -
3426 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3427 		 tcp_hdrlen(skb));
3428 
3429 	return packet_len;
3430 }
3431 
3432 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3433 
3434 /**
3435  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3436  * into segments
3437  * @nbuf:   network buffer to be segmented
3438  * @tso_info:  This is the output. The information about the
3439  *      TSO segments will be populated within this.
3440  *
3441  * This function fragments a TCP jumbo packet into smaller
3442  * segments to be transmitted by the driver. It chains the TSO
3443  * segments created into a list.
3444  *
3445  * Return: 0 - success, 1 - failure
3446  */
3447 #ifndef BUILD_X86
3448 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3449 {
3450 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3451 	uint32_t remainder, num_segs = 0;
3452 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3453 	uint8_t frags_per_tso = 0;
3454 	uint32_t skb_frag_len = 0;
3455 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3456 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3457 	skb_frag_t *frag = NULL;
3458 	int j = 0;
3459 	uint32_t temp_num_seg = 0;
3460 
3461 	/* length of the first chunk of data in the skb minus eit header*/
3462 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3463 
3464 	/* Calculate num of segs for skb's first chunk of data*/
3465 	remainder = skb_frag_len % tso_seg_size;
3466 	num_segs = skb_frag_len / tso_seg_size;
3467 	/**
3468 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3469 	 * In that case, one more tso seg is required to accommodate
3470 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3471 	 * then remaining data will be accomodated while doing the calculation
3472 	 * for nr_frags data. Hence, frags_per_tso++.
3473 	 */
3474 	if (remainder) {
3475 		if (!skb_nr_frags)
3476 			num_segs++;
3477 		else
3478 			frags_per_tso++;
3479 	}
3480 
3481 	while (skb_nr_frags) {
3482 		if (j >= skb_shinfo(skb)->nr_frags) {
3483 			qdf_info("TSO: nr_frags %d j %d",
3484 				 skb_shinfo(skb)->nr_frags, j);
3485 			qdf_assert(0);
3486 			return 0;
3487 		}
3488 		/**
3489 		 * Calculate the number of tso seg for nr_frags data:
3490 		 * Get the length of each frag in skb_frag_len, add to
3491 		 * remainder.Get the number of segments by dividing it to
3492 		 * tso_seg_size and calculate the new remainder.
3493 		 * Decrement the nr_frags value and keep
3494 		 * looping all the skb_fragments.
3495 		 */
3496 		frag = &skb_shinfo(skb)->frags[j];
3497 		skb_frag_len = skb_frag_size(frag);
3498 		temp_num_seg = num_segs;
3499 		remainder += skb_frag_len;
3500 		num_segs += remainder / tso_seg_size;
3501 		remainder = remainder % tso_seg_size;
3502 		skb_nr_frags--;
3503 		if (remainder) {
3504 			if (num_segs > temp_num_seg)
3505 				frags_per_tso = 0;
3506 			/**
3507 			 * increment the tso per frags whenever remainder is
3508 			 * positive. If frags_per_tso reaches the (max-1),
3509 			 * [First frags always have EIT header, therefore max-1]
3510 			 * increment the num_segs as no more data can be
3511 			 * accomodated in the curr tso seg. Reset the remainder
3512 			 * and frags per tso and keep looping.
3513 			 */
3514 			frags_per_tso++;
3515 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3516 				num_segs++;
3517 				frags_per_tso = 0;
3518 				remainder = 0;
3519 			}
3520 			/**
3521 			 * If this is the last skb frag and still remainder is
3522 			 * non-zero(frags_per_tso is not reached to the max-1)
3523 			 * then increment the num_segs to take care of the
3524 			 * remaining length.
3525 			 */
3526 			if (!skb_nr_frags && remainder) {
3527 				num_segs++;
3528 				frags_per_tso = 0;
3529 			}
3530 		} else {
3531 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3532 			frags_per_tso = 0;
3533 		}
3534 		j++;
3535 	}
3536 
3537 	return num_segs;
3538 }
3539 #elif !defined(QCA_WIFI_QCN9000)
3540 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3541 {
3542 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3543 	skb_frag_t *frag = NULL;
3544 
3545 	/*
3546 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3547 	 * region which cannot be accessed by Target
3548 	 */
3549 	if (virt_to_phys(skb->data) < 0x50000040) {
3550 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3551 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3552 				virt_to_phys(skb->data));
3553 		goto fail;
3554 
3555 	}
3556 
3557 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3558 		frag = &skb_shinfo(skb)->frags[i];
3559 
3560 		if (!frag)
3561 			goto fail;
3562 
3563 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3564 			goto fail;
3565 	}
3566 
3567 
3568 	gso_size = skb_shinfo(skb)->gso_size;
3569 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3570 			+ tcp_hdrlen(skb));
3571 	while (tmp_len) {
3572 		num_segs++;
3573 		if (tmp_len > gso_size)
3574 			tmp_len -= gso_size;
3575 		else
3576 			break;
3577 	}
3578 
3579 	return num_segs;
3580 
3581 	/*
3582 	 * Do not free this frame, just do socket level accounting
3583 	 * so that this is not reused.
3584 	 */
3585 fail:
3586 	if (skb->sk)
3587 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3588 
3589 	return 0;
3590 }
3591 #else
3592 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3593 {
3594 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3595 	skb_frag_t *frag = NULL;
3596 
3597 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3598 		frag = &skb_shinfo(skb)->frags[i];
3599 
3600 		if (!frag)
3601 			goto fail;
3602 	}
3603 
3604 	gso_size = skb_shinfo(skb)->gso_size;
3605 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3606 			+ tcp_hdrlen(skb));
3607 	while (tmp_len) {
3608 		num_segs++;
3609 		if (tmp_len > gso_size)
3610 			tmp_len -= gso_size;
3611 		else
3612 			break;
3613 	}
3614 
3615 	return num_segs;
3616 
3617 	/*
3618 	 * Do not free this frame, just do socket level accounting
3619 	 * so that this is not reused.
3620 	 */
3621 fail:
3622 	if (skb->sk)
3623 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3624 
3625 	return 0;
3626 }
3627 #endif
3628 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3629 
3630 #endif /* FEATURE_TSO */
3631 
3632 /**
3633  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3634  *
3635  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3636  *
3637  * Return: N/A
3638  */
3639 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3640 			  uint32_t *lo, uint32_t *hi)
3641 {
3642 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3643 		*lo = lower_32_bits(dmaaddr);
3644 		*hi = upper_32_bits(dmaaddr);
3645 	} else {
3646 		*lo = dmaaddr;
3647 		*hi = 0;
3648 	}
3649 }
3650 
3651 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3652 
3653 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3654 {
3655 	qdf_nbuf_users_inc(&skb->users);
3656 	return skb;
3657 }
3658 qdf_export_symbol(__qdf_nbuf_inc_users);
3659 
3660 int __qdf_nbuf_get_users(struct sk_buff *skb)
3661 {
3662 	return qdf_nbuf_users_read(&skb->users);
3663 }
3664 qdf_export_symbol(__qdf_nbuf_get_users);
3665 
3666 /**
3667  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3668  * @skb: sk_buff handle
3669  *
3670  * Return: none
3671  */
3672 
3673 void __qdf_nbuf_ref(struct sk_buff *skb)
3674 {
3675 	skb_get(skb);
3676 }
3677 qdf_export_symbol(__qdf_nbuf_ref);
3678 
3679 /**
3680  * __qdf_nbuf_shared() - Check whether the buffer is shared
3681  *  @skb: sk_buff buffer
3682  *
3683  *  Return: true if more than one person has a reference to this buffer.
3684  */
3685 int __qdf_nbuf_shared(struct sk_buff *skb)
3686 {
3687 	return skb_shared(skb);
3688 }
3689 qdf_export_symbol(__qdf_nbuf_shared);
3690 
3691 /**
3692  * __qdf_nbuf_dmamap_create() - create a DMA map.
3693  * @osdev: qdf device handle
3694  * @dmap: dma map handle
3695  *
3696  * This can later be used to map networking buffers. They :
3697  * - need space in adf_drv's software descriptor
3698  * - are typically created during adf_drv_create
3699  * - need to be created before any API(qdf_nbuf_map) that uses them
3700  *
3701  * Return: QDF STATUS
3702  */
3703 QDF_STATUS
3704 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3705 {
3706 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3707 	/*
3708 	 * driver can tell its SG capablity, it must be handled.
3709 	 * Bounce buffers if they are there
3710 	 */
3711 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3712 	if (!(*dmap))
3713 		error = QDF_STATUS_E_NOMEM;
3714 
3715 	return error;
3716 }
3717 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3718 /**
3719  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3720  * @osdev: qdf device handle
3721  * @dmap: dma map handle
3722  *
3723  * Return: none
3724  */
3725 void
3726 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3727 {
3728 	kfree(dmap);
3729 }
3730 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3731 
3732 /**
3733  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3734  * @osdev: os device
3735  * @skb: skb handle
3736  * @dir: dma direction
3737  * @nbytes: number of bytes to be mapped
3738  *
3739  * Return: QDF_STATUS
3740  */
3741 #ifdef QDF_OS_DEBUG
3742 QDF_STATUS
3743 __qdf_nbuf_map_nbytes(
3744 	qdf_device_t osdev,
3745 	struct sk_buff *skb,
3746 	qdf_dma_dir_t dir,
3747 	int nbytes)
3748 {
3749 	struct skb_shared_info  *sh = skb_shinfo(skb);
3750 
3751 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3752 
3753 	/*
3754 	 * Assume there's only a single fragment.
3755 	 * To support multiple fragments, it would be necessary to change
3756 	 * adf_nbuf_t to be a separate object that stores meta-info
3757 	 * (including the bus address for each fragment) and a pointer
3758 	 * to the underlying sk_buff.
3759 	 */
3760 	qdf_assert(sh->nr_frags == 0);
3761 
3762 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3763 }
3764 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3765 #else
3766 QDF_STATUS
3767 __qdf_nbuf_map_nbytes(
3768 	qdf_device_t osdev,
3769 	struct sk_buff *skb,
3770 	qdf_dma_dir_t dir,
3771 	int nbytes)
3772 {
3773 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3774 }
3775 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3776 #endif
3777 /**
3778  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
3779  * @osdev: OS device
3780  * @skb: skb handle
3781  * @dir: direction
3782  * @nbytes: number of bytes
3783  *
3784  * Return: none
3785  */
3786 void
3787 __qdf_nbuf_unmap_nbytes(
3788 	qdf_device_t osdev,
3789 	struct sk_buff *skb,
3790 	qdf_dma_dir_t dir,
3791 	int nbytes)
3792 {
3793 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3794 
3795 	/*
3796 	 * Assume there's a single fragment.
3797 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3798 	 */
3799 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
3800 }
3801 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
3802 
3803 /**
3804  * __qdf_nbuf_dma_map_info() - return the dma map info
3805  * @bmap: dma map
3806  * @sg: dma map info
3807  *
3808  * Return: none
3809  */
3810 void
3811 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
3812 {
3813 	qdf_assert(bmap->mapped);
3814 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
3815 
3816 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
3817 			sizeof(struct __qdf_segment));
3818 	sg->nsegs = bmap->nsegs;
3819 }
3820 qdf_export_symbol(__qdf_nbuf_dma_map_info);
3821 /**
3822  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
3823  *			specified by the index
3824  * @skb: sk buff
3825  * @sg: scatter/gather list of all the frags
3826  *
3827  * Return: none
3828  */
3829 #if defined(__QDF_SUPPORT_FRAG_MEM)
3830 void
3831 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3832 {
3833 	qdf_assert(skb);
3834 	sg->sg_segs[0].vaddr = skb->data;
3835 	sg->sg_segs[0].len   = skb->len;
3836 	sg->nsegs            = 1;
3837 
3838 	for (int i = 1; i <= sh->nr_frags; i++) {
3839 		skb_frag_t    *f        = &sh->frags[i - 1];
3840 
3841 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
3842 			f->page_offset);
3843 		sg->sg_segs[i].len      = f->size;
3844 
3845 		qdf_assert(i < QDF_MAX_SGLIST);
3846 	}
3847 	sg->nsegs += i;
3848 
3849 }
3850 qdf_export_symbol(__qdf_nbuf_frag_info);
3851 #else
3852 #ifdef QDF_OS_DEBUG
3853 void
3854 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3855 {
3856 
3857 	struct skb_shared_info  *sh = skb_shinfo(skb);
3858 
3859 	qdf_assert(skb);
3860 	sg->sg_segs[0].vaddr = skb->data;
3861 	sg->sg_segs[0].len   = skb->len;
3862 	sg->nsegs            = 1;
3863 
3864 	qdf_assert(sh->nr_frags == 0);
3865 }
3866 qdf_export_symbol(__qdf_nbuf_frag_info);
3867 #else
3868 void
3869 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
3870 {
3871 	sg->sg_segs[0].vaddr = skb->data;
3872 	sg->sg_segs[0].len   = skb->len;
3873 	sg->nsegs            = 1;
3874 }
3875 qdf_export_symbol(__qdf_nbuf_frag_info);
3876 #endif
3877 #endif
3878 /**
3879  * __qdf_nbuf_get_frag_size() - get frag size
3880  * @nbuf: sk buffer
3881  * @cur_frag: current frag
3882  *
3883  * Return: frag size
3884  */
3885 uint32_t
3886 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
3887 {
3888 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
3889 	const skb_frag_t *frag = sh->frags + cur_frag;
3890 
3891 	return skb_frag_size(frag);
3892 }
3893 qdf_export_symbol(__qdf_nbuf_get_frag_size);
3894 
3895 /**
3896  * __qdf_nbuf_frag_map() - dma map frag
3897  * @osdev: os device
3898  * @nbuf: sk buff
3899  * @offset: offset
3900  * @dir: direction
3901  * @cur_frag: current fragment
3902  *
3903  * Return: QDF status
3904  */
3905 #ifdef A_SIMOS_DEVHOST
3906 QDF_STATUS __qdf_nbuf_frag_map(
3907 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3908 	int offset, qdf_dma_dir_t dir, int cur_frag)
3909 {
3910 	int32_t paddr, frag_len;
3911 
3912 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
3913 	return QDF_STATUS_SUCCESS;
3914 }
3915 qdf_export_symbol(__qdf_nbuf_frag_map);
3916 #else
3917 QDF_STATUS __qdf_nbuf_frag_map(
3918 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
3919 	int offset, qdf_dma_dir_t dir, int cur_frag)
3920 {
3921 	dma_addr_t paddr, frag_len;
3922 	struct skb_shared_info *sh = skb_shinfo(nbuf);
3923 	const skb_frag_t *frag = sh->frags + cur_frag;
3924 
3925 	frag_len = skb_frag_size(frag);
3926 
3927 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
3928 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
3929 					__qdf_dma_dir_to_os(dir));
3930 	return dma_mapping_error(osdev->dev, paddr) ?
3931 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
3932 }
3933 qdf_export_symbol(__qdf_nbuf_frag_map);
3934 #endif
3935 /**
3936  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
3937  * @dmap: dma map
3938  * @cb: callback
3939  * @arg: argument
3940  *
3941  * Return: none
3942  */
3943 void
3944 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
3945 {
3946 	return;
3947 }
3948 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
3949 
3950 
3951 /**
3952  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
3953  * @osdev: os device
3954  * @buf: sk buff
3955  * @dir: direction
3956  *
3957  * Return: none
3958  */
3959 #if defined(A_SIMOS_DEVHOST)
3960 static void __qdf_nbuf_sync_single_for_cpu(
3961 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3962 {
3963 	return;
3964 }
3965 #else
3966 static void __qdf_nbuf_sync_single_for_cpu(
3967 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
3968 {
3969 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
3970 		qdf_err("ERROR: NBUF mapped physical address is NULL");
3971 		return;
3972 	}
3973 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
3974 		skb_end_offset(buf) - skb_headroom(buf),
3975 		__qdf_dma_dir_to_os(dir));
3976 }
3977 #endif
3978 /**
3979  * __qdf_nbuf_sync_for_cpu() - nbuf sync
3980  * @osdev: os device
3981  * @skb: sk buff
3982  * @dir: direction
3983  *
3984  * Return: none
3985  */
3986 void
3987 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
3988 	struct sk_buff *skb, qdf_dma_dir_t dir)
3989 {
3990 	qdf_assert(
3991 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3992 
3993 	/*
3994 	 * Assume there's a single fragment.
3995 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
3996 	 */
3997 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
3998 }
3999 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4000 
4001 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4002 /**
4003  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4004  * @rx_status: Pointer to rx_status.
4005  * @rtap_buf: Buf to which VHT info has to be updated.
4006  * @rtap_len: Current length of radiotap buffer
4007  *
4008  * Return: Length of radiotap after VHT flags updated.
4009  */
4010 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4011 					struct mon_rx_status *rx_status,
4012 					int8_t *rtap_buf,
4013 					uint32_t rtap_len)
4014 {
4015 	uint16_t vht_flags = 0;
4016 
4017 	rtap_len = qdf_align(rtap_len, 2);
4018 
4019 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4020 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4021 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4022 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4023 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4024 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4025 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4026 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4027 	rtap_len += 2;
4028 
4029 	rtap_buf[rtap_len] |=
4030 		(rx_status->is_stbc ?
4031 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4032 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4033 		(rx_status->ldpc ?
4034 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4035 		(rx_status->beamformed ?
4036 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4037 	rtap_len += 1;
4038 	switch (rx_status->vht_flag_values2) {
4039 	case IEEE80211_RADIOTAP_VHT_BW_20:
4040 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4041 		break;
4042 	case IEEE80211_RADIOTAP_VHT_BW_40:
4043 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4044 		break;
4045 	case IEEE80211_RADIOTAP_VHT_BW_80:
4046 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4047 		break;
4048 	case IEEE80211_RADIOTAP_VHT_BW_160:
4049 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4050 		break;
4051 	}
4052 	rtap_len += 1;
4053 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
4054 	rtap_len += 1;
4055 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
4056 	rtap_len += 1;
4057 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4058 	rtap_len += 1;
4059 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4060 	rtap_len += 1;
4061 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4062 	rtap_len += 1;
4063 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4064 	rtap_len += 1;
4065 	put_unaligned_le16(rx_status->vht_flag_values6,
4066 			   &rtap_buf[rtap_len]);
4067 	rtap_len += 2;
4068 
4069 	return rtap_len;
4070 }
4071 
4072 /**
4073  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4074  * @rx_status: Pointer to rx_status.
4075  * @rtap_buf: buffer to which radiotap has to be updated
4076  * @rtap_len: radiotap length
4077  *
4078  * API update high-efficiency (11ax) fields in the radiotap header
4079  *
4080  * Return: length of rtap_len updated.
4081  */
4082 static unsigned int
4083 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4084 				     int8_t *rtap_buf, uint32_t rtap_len)
4085 {
4086 	/*
4087 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4088 	 * Enable all "known" HE radiotap flags for now
4089 	 */
4090 	rtap_len = qdf_align(rtap_len, 2);
4091 
4092 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4093 	rtap_len += 2;
4094 
4095 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4096 	rtap_len += 2;
4097 
4098 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4099 	rtap_len += 2;
4100 
4101 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4102 	rtap_len += 2;
4103 
4104 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4105 	rtap_len += 2;
4106 
4107 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4108 	rtap_len += 2;
4109 	qdf_rl_debug("he data %x %x %x %x %x %x",
4110 		     rx_status->he_data1,
4111 		     rx_status->he_data2, rx_status->he_data3,
4112 		     rx_status->he_data4, rx_status->he_data5,
4113 		     rx_status->he_data6);
4114 	return rtap_len;
4115 }
4116 
4117 
4118 /**
4119  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4120  * @rx_status: Pointer to rx_status.
4121  * @rtap_buf: buffer to which radiotap has to be updated
4122  * @rtap_len: radiotap length
4123  *
4124  * API update HE-MU fields in the radiotap header
4125  *
4126  * Return: length of rtap_len updated.
4127  */
4128 static unsigned int
4129 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4130 				     int8_t *rtap_buf, uint32_t rtap_len)
4131 {
4132 	rtap_len = qdf_align(rtap_len, 2);
4133 
4134 	/*
4135 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4136 	 * Enable all "known" he-mu radiotap flags for now
4137 	 */
4138 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4139 	rtap_len += 2;
4140 
4141 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4142 	rtap_len += 2;
4143 
4144 	rtap_buf[rtap_len] = rx_status->he_RU[0];
4145 	rtap_len += 1;
4146 
4147 	rtap_buf[rtap_len] = rx_status->he_RU[1];
4148 	rtap_len += 1;
4149 
4150 	rtap_buf[rtap_len] = rx_status->he_RU[2];
4151 	rtap_len += 1;
4152 
4153 	rtap_buf[rtap_len] = rx_status->he_RU[3];
4154 	rtap_len += 1;
4155 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4156 		  rx_status->he_flags1,
4157 		  rx_status->he_flags2, rx_status->he_RU[0],
4158 		  rx_status->he_RU[1], rx_status->he_RU[2],
4159 		  rx_status->he_RU[3]);
4160 
4161 	return rtap_len;
4162 }
4163 
4164 /**
4165  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4166  * @rx_status: Pointer to rx_status.
4167  * @rtap_buf: buffer to which radiotap has to be updated
4168  * @rtap_len: radiotap length
4169  *
4170  * API update he-mu-other fields in the radiotap header
4171  *
4172  * Return: length of rtap_len updated.
4173  */
4174 static unsigned int
4175 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4176 				     int8_t *rtap_buf, uint32_t rtap_len)
4177 {
4178 	rtap_len = qdf_align(rtap_len, 2);
4179 
4180 	/*
4181 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4182 	 * Enable all "known" he-mu-other radiotap flags for now
4183 	 */
4184 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
4185 	rtap_len += 2;
4186 
4187 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
4188 	rtap_len += 2;
4189 
4190 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
4191 	rtap_len += 1;
4192 
4193 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4194 	rtap_len += 1;
4195 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4196 		  rx_status->he_per_user_1,
4197 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4198 		  rx_status->he_per_user_known);
4199 	return rtap_len;
4200 }
4201 
4202 #define IEEE80211_RADIOTAP_TX_STATUS 0
4203 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
4204 
4205 /**
4206  * This is the length for radiotap, combined length
4207  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
4208  * cannot be more than available headroom_sz.
4209  * increase this when we add more radiotap elements.
4210  * Number after '+' indicates maximum possible increase due to alignment
4211  */
4212 
4213 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
4214 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4215 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4216 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4217 #define RADIOTAP_FIXED_HEADER_LEN 17
4218 #define RADIOTAP_HT_FLAGS_LEN 3
4219 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4220 #define RADIOTAP_VENDOR_NS_LEN \
4221 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4222 /* This is Radio Tap Header Extension Length.
4223  * 4 Bytes for Extended it_present bit map +
4224  * 4 bytes padding for alignment
4225  */
4226 #define RADIOTAP_HEADER_EXT_LEN (2 * sizeof(uint32_t))
4227 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4228 				RADIOTAP_FIXED_HEADER_LEN + \
4229 				RADIOTAP_HT_FLAGS_LEN + \
4230 				RADIOTAP_VHT_FLAGS_LEN + \
4231 				RADIOTAP_AMPDU_STATUS_LEN + \
4232 				RADIOTAP_HE_FLAGS_LEN + \
4233 				RADIOTAP_HE_MU_FLAGS_LEN + \
4234 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4235 				RADIOTAP_VENDOR_NS_LEN + \
4236 				RADIOTAP_HEADER_EXT_LEN)
4237 
4238 #define IEEE80211_RADIOTAP_HE 23
4239 #define IEEE80211_RADIOTAP_HE_MU	24
4240 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4241 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4242 
4243 /**
4244  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4245  * @rx_status: Pointer to rx_status.
4246  * @rtap_buf: Buf to which AMPDU info has to be updated.
4247  * @rtap_len: Current length of radiotap buffer
4248  *
4249  * Return: Length of radiotap after AMPDU flags updated.
4250  */
4251 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4252 					struct mon_rx_status *rx_status,
4253 					uint8_t *rtap_buf,
4254 					uint32_t rtap_len)
4255 {
4256 	/*
4257 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4258 	 * First 32 bits of AMPDU represents the reference number
4259 	 */
4260 
4261 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4262 	uint16_t ampdu_flags = 0;
4263 	uint16_t ampdu_reserved_flags = 0;
4264 
4265 	rtap_len = qdf_align(rtap_len, 4);
4266 
4267 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4268 	rtap_len += 4;
4269 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4270 	rtap_len += 2;
4271 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4272 	rtap_len += 2;
4273 
4274 	return rtap_len;
4275 }
4276 
4277 /**
4278  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4279  * @rx_status: Pointer to rx_status.
4280  * @nbuf:      nbuf pointer to which radiotap has to be updated
4281  * @headroom_sz: Available headroom size.
4282  *
4283  * Return: length of rtap_len updated.
4284  */
4285 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4286 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4287 {
4288 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4289 	struct ieee80211_radiotap_header *rthdr =
4290 		(struct ieee80211_radiotap_header *)rtap_buf;
4291 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4292 	uint32_t rtap_len = rtap_hdr_len;
4293 	uint8_t length = rtap_len;
4294 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4295 	uint32_t *rtap_ext = NULL;
4296 
4297 	/* Adding Extended Header space */
4298 	if (rx_status->add_rtap_ext) {
4299 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
4300 		rtap_len = rtap_hdr_len;
4301 	}
4302 	length = rtap_len;
4303 
4304 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4305 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4306 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4307 	rtap_len += 8;
4308 
4309 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4310 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4311 
4312 	if (rx_status->rs_fcs_err)
4313 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4314 
4315 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4316 	rtap_len += 1;
4317 
4318 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4319 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4320 	    !rx_status->he_flags) {
4321 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4322 		rtap_buf[rtap_len] = rx_status->rate;
4323 	} else
4324 		rtap_buf[rtap_len] = 0;
4325 	rtap_len += 1;
4326 
4327 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4328 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4329 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4330 	rtap_len += 2;
4331 	/* Channel flags. */
4332 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
4333 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4334 	else
4335 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4336 	if (rx_status->cck_flag)
4337 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4338 	if (rx_status->ofdm_flag)
4339 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4340 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4341 	rtap_len += 2;
4342 
4343 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4344 	 *					(dBm)
4345 	 */
4346 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4347 	/*
4348 	 * rssi_comb is int dB, need to convert it to dBm.
4349 	 * normalize value to noise floor of -96 dBm
4350 	 */
4351 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4352 	rtap_len += 1;
4353 
4354 	/* RX signal noise floor */
4355 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4356 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4357 	rtap_len += 1;
4358 
4359 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4360 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4361 	rtap_buf[rtap_len] = rx_status->nr_ant;
4362 	rtap_len += 1;
4363 
4364 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4365 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4366 		return 0;
4367 	}
4368 
4369 	if (rx_status->ht_flags) {
4370 		length = rtap_len;
4371 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4372 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4373 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4374 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4375 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4376 		rtap_len += 1;
4377 
4378 		if (rx_status->sgi)
4379 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4380 		if (rx_status->bw)
4381 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4382 		else
4383 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4384 		rtap_len += 1;
4385 
4386 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4387 		rtap_len += 1;
4388 
4389 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4390 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4391 			return 0;
4392 		}
4393 	}
4394 
4395 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4396 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4397 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4398 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4399 								rtap_buf,
4400 								rtap_len);
4401 	}
4402 
4403 	if (rx_status->vht_flags) {
4404 		length = rtap_len;
4405 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4406 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4407 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4408 								rtap_buf,
4409 								rtap_len);
4410 
4411 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4412 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4413 			return 0;
4414 		}
4415 	}
4416 
4417 	if (rx_status->he_flags) {
4418 		length = rtap_len;
4419 		/* IEEE80211_RADIOTAP_HE */
4420 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4421 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4422 								rtap_buf,
4423 								rtap_len);
4424 
4425 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4426 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4427 			return 0;
4428 		}
4429 	}
4430 
4431 	if (rx_status->he_mu_flags) {
4432 		length = rtap_len;
4433 		/* IEEE80211_RADIOTAP_HE-MU */
4434 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4435 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4436 								rtap_buf,
4437 								rtap_len);
4438 
4439 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4440 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4441 			return 0;
4442 		}
4443 	}
4444 
4445 	if (rx_status->he_mu_other_flags) {
4446 		length = rtap_len;
4447 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4448 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4449 		rtap_len =
4450 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4451 								rtap_buf,
4452 								rtap_len);
4453 
4454 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4455 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4456 			return 0;
4457 		}
4458 	}
4459 
4460 	rtap_len = qdf_align(rtap_len, 2);
4461 	/*
4462 	 * Radiotap Vendor Namespace
4463 	 */
4464 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4465 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4466 					(rtap_buf + rtap_len);
4467 	/*
4468 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4469 	 */
4470 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4471 	/*
4472 	 * Name space selector = 0
4473 	 * We only will have one namespace for now
4474 	 */
4475 	radiotap_vendor_ns_ath->hdr.selector = 0;
4476 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4477 					sizeof(*radiotap_vendor_ns_ath) -
4478 					sizeof(radiotap_vendor_ns_ath->hdr));
4479 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4480 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4481 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4482 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4483 				cpu_to_le32(rx_status->ppdu_timestamp);
4484 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4485 
4486 	/* Add Extension to Radiotap Header & corresponding data */
4487 	if (rx_status->add_rtap_ext) {
4488 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_EXT);
4489 		rtap_ext = (uint32_t *)&rthdr->it_present;
4490 		rtap_ext++;
4491 		*rtap_ext = cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_STATUS);
4492 		*rtap_ext |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RETRY_COUNT);
4493 
4494 		rtap_buf[rtap_len] = rx_status->tx_status;
4495 		rtap_len += 1;
4496 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
4497 		rtap_len += 1;
4498 	}
4499 
4500 	rthdr->it_len = cpu_to_le16(rtap_len);
4501 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4502 
4503 	if (headroom_sz < rtap_len) {
4504 		qdf_err("ERROR: not enough space to update radiotap");
4505 		return 0;
4506 	}
4507 	qdf_nbuf_push_head(nbuf, rtap_len);
4508 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4509 	return rtap_len;
4510 }
4511 #else
4512 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4513 					struct mon_rx_status *rx_status,
4514 					int8_t *rtap_buf,
4515 					uint32_t rtap_len)
4516 {
4517 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4518 	return 0;
4519 }
4520 
4521 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4522 				      int8_t *rtap_buf, uint32_t rtap_len)
4523 {
4524 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4525 	return 0;
4526 }
4527 
4528 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4529 					struct mon_rx_status *rx_status,
4530 					uint8_t *rtap_buf,
4531 					uint32_t rtap_len)
4532 {
4533 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4534 	return 0;
4535 }
4536 
4537 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4538 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4539 {
4540 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4541 	return 0;
4542 }
4543 #endif
4544 qdf_export_symbol(qdf_nbuf_update_radiotap);
4545 
4546 /**
4547  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4548  * @cb_func_ptr: function pointer to the nbuf free callback
4549  *
4550  * This function registers a callback function for nbuf free.
4551  *
4552  * Return: none
4553  */
4554 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4555 {
4556 	nbuf_free_cb = cb_func_ptr;
4557 }
4558 
4559 /**
4560  * qdf_nbuf_classify_pkt() - classify packet
4561  * @skb - sk buff
4562  *
4563  * Return: none
4564  */
4565 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4566 {
4567 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4568 
4569 	/* check destination mac address is broadcast/multicast */
4570 	if (is_broadcast_ether_addr((uint8_t *)eh))
4571 		QDF_NBUF_CB_SET_BCAST(skb);
4572 	else if (is_multicast_ether_addr((uint8_t *)eh))
4573 		QDF_NBUF_CB_SET_MCAST(skb);
4574 
4575 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4576 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4577 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4578 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4579 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4580 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4581 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4582 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4583 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4584 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4585 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4586 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4587 }
4588 qdf_export_symbol(qdf_nbuf_classify_pkt);
4589 
4590 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4591 {
4592 	qdf_nbuf_users_set(&nbuf->users, 1);
4593 	nbuf->data = nbuf->head + NET_SKB_PAD;
4594 	skb_reset_tail_pointer(nbuf);
4595 }
4596 qdf_export_symbol(__qdf_nbuf_init);
4597 
4598 #ifdef WLAN_FEATURE_FASTPATH
4599 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4600 {
4601 	qdf_nbuf_users_set(&nbuf->users, 1);
4602 	nbuf->data = nbuf->head + NET_SKB_PAD;
4603 	skb_reset_tail_pointer(nbuf);
4604 }
4605 qdf_export_symbol(qdf_nbuf_init_fast);
4606 #endif /* WLAN_FEATURE_FASTPATH */
4607 
4608 
4609 #ifdef QDF_NBUF_GLOBAL_COUNT
4610 /**
4611  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4612  *
4613  * Return void
4614  */
4615 void __qdf_nbuf_mod_init(void)
4616 {
4617 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
4618 	qdf_atomic_init(&nbuf_count);
4619 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4620 }
4621 
4622 /**
4623  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4624  *
4625  * Return void
4626  */
4627 void __qdf_nbuf_mod_exit(void)
4628 {
4629 }
4630 #endif
4631 
4632 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
4633 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
4634 					    int offset)
4635 {
4636 	unsigned int frag_offset;
4637 	skb_frag_t *frag;
4638 
4639 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
4640 		return QDF_STATUS_E_FAILURE;
4641 
4642 	frag = &skb_shinfo(nbuf)->frags[idx];
4643 	frag_offset = skb_frag_off(frag);
4644 
4645 	frag_offset += offset;
4646 	skb_frag_off_set(frag, frag_offset);
4647 
4648 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
4649 
4650 	return QDF_STATUS_SUCCESS;
4651 }
4652 
4653 #else
4654 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
4655 					    int offset)
4656 {
4657 	uint16_t frag_offset;
4658 	skb_frag_t *frag;
4659 
4660 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
4661 		return QDF_STATUS_E_FAILURE;
4662 
4663 	frag = &skb_shinfo(nbuf)->frags[idx];
4664 	frag_offset = frag->page_offset;
4665 
4666 	frag_offset += offset;
4667 	frag->page_offset = frag_offset;
4668 
4669 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
4670 
4671 	return QDF_STATUS_SUCCESS;
4672 }
4673 #endif
4674 
4675 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
4676 
4677 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
4678 			    int offset, int frag_len,
4679 			    unsigned int truesize, bool take_frag_ref)
4680 {
4681 	struct page *page;
4682 	int frag_offset;
4683 	uint8_t nr_frag;
4684 
4685 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
4686 
4687 	page = virt_to_head_page(buf);
4688 	frag_offset = buf - page_address(page);
4689 
4690 	skb_add_rx_frag(nbuf, nr_frag, page,
4691 			(frag_offset + offset),
4692 			frag_len, truesize);
4693 
4694 	if (unlikely(take_frag_ref))
4695 		skb_frag_ref(nbuf, nr_frag);
4696 }
4697 
4698 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
4699