xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define CHANNEL_FREQ_5150 5150
88 #define FREQ_MULTIPLIER_CONST_5MHZ 5
89 #define FREQ_MULTIPLIER_CONST_20MHZ 20
90 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
91 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
92 #define RADIOTAP_CCK_CHANNEL 0x0020
93 #define RADIOTAP_OFDM_CHANNEL 0x0040
94 
95 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
96 #include <qdf_mc_timer.h>
97 
98 struct qdf_track_timer {
99 	qdf_mc_timer_t track_timer;
100 	qdf_atomic_t alloc_fail_cnt;
101 };
102 
103 static struct qdf_track_timer alloc_track_timer;
104 
105 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
106 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
107 #endif
108 
109 /* Packet Counter */
110 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
111 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
112 #ifdef QDF_NBUF_GLOBAL_COUNT
113 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
114 static qdf_atomic_t nbuf_count;
115 #endif
116 
117 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
118 static bool is_initial_mem_debug_disabled;
119 #endif
120 
121 /**
122  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
123  * @data: Pointer to network data buffer
124  *
125  * Get the IP header offset in case of 8021Q and 8021AD
126  * tag is present in L2 header.
127  *
128  * Return: IP header offset
129  */
130 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
131 {
132 	uint16_t ether_type;
133 
134 	ether_type = *(uint16_t *)(data +
135 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
136 
137 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
138 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
139 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
140 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
141 
142 	return QDF_NBUF_TRAC_IP_OFFSET;
143 }
144 
145 qdf_export_symbol(__qdf_nbuf_get_ip_offset);
146 
147 /**
148  *  __qdf_nbuf_get_ether_type - Get the ether type
149  * @data: Pointer to network data buffer
150  *
151  * Get the ether type in case of 8021Q and 8021AD tag
152  * is present in L2 header, e.g for the returned ether type
153  * value, if IPV4 data ether type 0x0800, return 0x0008.
154  *
155  * Return ether type.
156  */
157 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
158 {
159 	uint16_t ether_type;
160 
161 	ether_type = *(uint16_t *)(data +
162 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
163 
164 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
165 		ether_type = *(uint16_t *)(data +
166 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
167 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
168 		ether_type = *(uint16_t *)(data +
169 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
170 
171 	return ether_type;
172 }
173 
174 qdf_export_symbol(__qdf_nbuf_get_ether_type);
175 
176 /**
177  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
178  *
179  * Return: none
180  */
181 void qdf_nbuf_tx_desc_count_display(void)
182 {
183 	qdf_debug("Current Snapshot of the Driver:");
184 	qdf_debug("Data Packets:");
185 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
186 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
187 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
188 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
189 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
190 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
191 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
192 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
193 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
194 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
195 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
196 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
198 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
199 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
204 	qdf_debug("Mgmt Packets:");
205 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
206 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
207 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
208 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
209 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
210 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
211 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
212 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
213 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
214 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
215 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
216 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
219 }
220 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
221 
222 /**
223  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
224  * @packet_type   : packet type either mgmt/data
225  * @current_state : layer at which the packet currently present
226  *
227  * Return: none
228  */
229 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
230 			uint8_t current_state)
231 {
232 	switch (packet_type) {
233 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
234 		nbuf_tx_mgmt[current_state]++;
235 		break;
236 	case QDF_NBUF_TX_PKT_DATA_TRACK:
237 		nbuf_tx_data[current_state]++;
238 		break;
239 	default:
240 		break;
241 	}
242 }
243 qdf_export_symbol(qdf_nbuf_tx_desc_count_update);
244 
245 /**
246  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
247  *
248  * Return: none
249  */
250 void qdf_nbuf_tx_desc_count_clear(void)
251 {
252 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
253 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
254 }
255 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
256 
257 /**
258  * qdf_nbuf_set_state() - Updates the packet state
259  * @nbuf:            network buffer
260  * @current_state :  layer at which the packet currently is
261  *
262  * This function updates the packet state to the layer at which the packet
263  * currently is
264  *
265  * Return: none
266  */
267 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
268 {
269 	/*
270 	 * Only Mgmt, Data Packets are tracked. WMI messages
271 	 * such as scan commands are not tracked
272 	 */
273 	uint8_t packet_type;
274 
275 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
276 
277 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
278 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
279 		return;
280 	}
281 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
282 	qdf_nbuf_tx_desc_count_update(packet_type,
283 					current_state);
284 }
285 qdf_export_symbol(qdf_nbuf_set_state);
286 
287 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
288 /**
289  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
290  *
291  * This function starts the alloc fail replenish timer.
292  *
293  * Return: void
294  */
295 static void __qdf_nbuf_start_replenish_timer(void)
296 {
297 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
298 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
299 	    QDF_TIMER_STATE_RUNNING)
300 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
301 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
302 }
303 
304 /**
305  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
306  *
307  * This function stops the alloc fail replenish timer.
308  *
309  * Return: void
310  */
311 static void __qdf_nbuf_stop_replenish_timer(void)
312 {
313 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
314 		return;
315 
316 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
317 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
318 	    QDF_TIMER_STATE_RUNNING)
319 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
320 }
321 
322 /**
323  * qdf_replenish_expire_handler - Replenish expire handler
324  *
325  * This function triggers when the alloc fail replenish timer expires.
326  *
327  * Return: void
328  */
329 static void qdf_replenish_expire_handler(void *arg)
330 {
331 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
332 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
333 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
334 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
335 
336 		/* Error handling here */
337 	}
338 }
339 
340 /**
341  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
342  *
343  * This function initializes the nbuf alloc fail replenish timer.
344  *
345  * Return: void
346  */
347 void __qdf_nbuf_init_replenish_timer(void)
348 {
349 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
350 			  qdf_replenish_expire_handler, NULL);
351 }
352 
353 /**
354  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
355  *
356  * This function deinitializes the nbuf alloc fail replenish timer.
357  *
358  * Return: void
359  */
360 void __qdf_nbuf_deinit_replenish_timer(void)
361 {
362 	__qdf_nbuf_stop_replenish_timer();
363 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
364 }
365 #else
366 
367 static inline void __qdf_nbuf_start_replenish_timer(void) {}
368 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
369 #endif
370 
371 /* globals do not need to be initialized to NULL/0 */
372 qdf_nbuf_trace_update_t qdf_trace_update_cb;
373 qdf_nbuf_free_t nbuf_free_cb;
374 
375 #ifdef QDF_NBUF_GLOBAL_COUNT
376 
377 /**
378  * __qdf_nbuf_count_get() - get nbuf global count
379  *
380  * Return: nbuf global count
381  */
382 int __qdf_nbuf_count_get(void)
383 {
384 	return qdf_atomic_read(&nbuf_count);
385 }
386 qdf_export_symbol(__qdf_nbuf_count_get);
387 
388 /**
389  * __qdf_nbuf_count_inc() - increment nbuf global count
390  *
391  * @buf: sk buff
392  *
393  * Return: void
394  */
395 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
396 {
397 	int num_nbuf = 1;
398 	qdf_nbuf_t ext_list;
399 
400 	if (qdf_likely(is_initial_mem_debug_disabled))
401 		return;
402 
403 	ext_list = qdf_nbuf_get_ext_list(nbuf);
404 
405 	/* Take care to account for frag_list */
406 	while (ext_list) {
407 		++num_nbuf;
408 		ext_list = qdf_nbuf_queue_next(ext_list);
409 	}
410 
411 	qdf_atomic_add(num_nbuf, &nbuf_count);
412 }
413 qdf_export_symbol(__qdf_nbuf_count_inc);
414 
415 /**
416  * __qdf_nbuf_count_dec() - decrement nbuf global count
417  *
418  * @buf: sk buff
419  *
420  * Return: void
421  */
422 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
423 {
424 	qdf_nbuf_t ext_list;
425 	int num_nbuf;
426 
427 	if (qdf_likely(is_initial_mem_debug_disabled))
428 		return;
429 
430 	if (qdf_nbuf_get_users(nbuf) > 1)
431 		return;
432 
433 	num_nbuf = 1;
434 
435 	/* Take care to account for frag_list */
436 	ext_list = qdf_nbuf_get_ext_list(nbuf);
437 	while (ext_list) {
438 		if (qdf_nbuf_get_users(ext_list) == 1)
439 			++num_nbuf;
440 		ext_list = qdf_nbuf_queue_next(ext_list);
441 	}
442 
443 	qdf_atomic_sub(num_nbuf, &nbuf_count);
444 }
445 qdf_export_symbol(__qdf_nbuf_count_dec);
446 #endif
447 
448 #ifdef NBUF_FRAG_MEMORY_DEBUG
449 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
450 {
451 	qdf_nbuf_t ext_list;
452 	uint32_t num_nr_frags;
453 	uint32_t total_num_nr_frags;
454 
455 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
456 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
457 
458 	total_num_nr_frags = num_nr_frags;
459 
460 	/* Take into account the frags attached to frag_list */
461 	ext_list = qdf_nbuf_get_ext_list(nbuf);
462 	while (ext_list) {
463 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
464 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
465 		total_num_nr_frags += num_nr_frags;
466 		ext_list = qdf_nbuf_queue_next(ext_list);
467 	}
468 
469 	qdf_frag_count_inc(total_num_nr_frags);
470 }
471 
472 qdf_export_symbol(qdf_nbuf_frag_count_inc);
473 
474 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
475 {
476 	qdf_nbuf_t ext_list;
477 	uint32_t num_nr_frags;
478 	uint32_t total_num_nr_frags;
479 
480 	if (qdf_nbuf_get_users(nbuf) > 1)
481 		return;
482 
483 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
484 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
485 
486 	total_num_nr_frags = num_nr_frags;
487 
488 	/* Take into account the frags attached to frag_list */
489 	ext_list = qdf_nbuf_get_ext_list(nbuf);
490 	while (ext_list) {
491 		if (qdf_nbuf_get_users(ext_list) == 1) {
492 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
493 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
494 			total_num_nr_frags += num_nr_frags;
495 		}
496 		ext_list = qdf_nbuf_queue_next(ext_list);
497 	}
498 
499 	qdf_frag_count_dec(total_num_nr_frags);
500 }
501 
502 qdf_export_symbol(qdf_nbuf_frag_count_dec);
503 
504 #endif
505 
506 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
507 	!defined(QCA_WIFI_QCN9000)
508 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
509 				 int align, int prio, const char *func,
510 				 uint32_t line)
511 {
512 	struct sk_buff *skb;
513 	unsigned long offset;
514 	uint32_t lowmem_alloc_tries = 0;
515 
516 	if (align)
517 		size += (align - 1);
518 
519 realloc:
520 	skb = dev_alloc_skb(size);
521 
522 	if (skb)
523 		goto skb_alloc;
524 
525 	skb = pld_nbuf_pre_alloc(size);
526 
527 	if (!skb) {
528 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
529 				size, func, line);
530 		return NULL;
531 	}
532 
533 skb_alloc:
534 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
535 	 * Though we are trying to reserve low memory upfront to prevent this,
536 	 * we sometimes see SKBs allocated from low memory.
537 	 */
538 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
539 		lowmem_alloc_tries++;
540 		if (lowmem_alloc_tries > 100) {
541 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
542 				     size, func, line);
543 			return NULL;
544 		} else {
545 			/* Not freeing to make sure it
546 			 * will not get allocated again
547 			 */
548 			goto realloc;
549 		}
550 	}
551 	memset(skb->cb, 0x0, sizeof(skb->cb));
552 
553 	/*
554 	 * The default is for netbuf fragments to be interpreted
555 	 * as wordstreams rather than bytestreams.
556 	 */
557 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
558 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
559 
560 	/*
561 	 * XXX:how about we reserve first then align
562 	 * Align & make sure that the tail & data are adjusted properly
563 	 */
564 
565 	if (align) {
566 		offset = ((unsigned long)skb->data) % align;
567 		if (offset)
568 			skb_reserve(skb, align - offset);
569 	}
570 
571 	/*
572 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
573 	 * pointer
574 	 */
575 	skb_reserve(skb, reserve);
576 	qdf_nbuf_count_inc(skb);
577 
578 	return skb;
579 }
580 #else
581 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
582 				 int align, int prio, const char *func,
583 				 uint32_t line)
584 {
585 	struct sk_buff *skb;
586 	unsigned long offset;
587 	int flags = GFP_KERNEL;
588 
589 	if (align)
590 		size += (align - 1);
591 
592 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
593 		flags = GFP_ATOMIC;
594 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
595 		/*
596 		 * Observed that kcompactd burns out CPU to make order-3 page.
597 		 *__netdev_alloc_skb has 4k page fallback option just in case of
598 		 * failing high order page allocation so we don't need to be
599 		 * hard. Make kcompactd rest in piece.
600 		 */
601 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
602 #endif
603 	}
604 
605 	skb = __netdev_alloc_skb(NULL, size, flags);
606 
607 	if (skb)
608 		goto skb_alloc;
609 
610 	skb = pld_nbuf_pre_alloc(size);
611 
612 	if (!skb) {
613 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
614 				size, func, line);
615 		__qdf_nbuf_start_replenish_timer();
616 		return NULL;
617 	} else {
618 		__qdf_nbuf_stop_replenish_timer();
619 	}
620 
621 skb_alloc:
622 	memset(skb->cb, 0x0, sizeof(skb->cb));
623 
624 	/*
625 	 * The default is for netbuf fragments to be interpreted
626 	 * as wordstreams rather than bytestreams.
627 	 */
628 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
629 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
630 
631 	/*
632 	 * XXX:how about we reserve first then align
633 	 * Align & make sure that the tail & data are adjusted properly
634 	 */
635 
636 	if (align) {
637 		offset = ((unsigned long)skb->data) % align;
638 		if (offset)
639 			skb_reserve(skb, align - offset);
640 	}
641 
642 	/*
643 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
644 	 * pointer
645 	 */
646 	skb_reserve(skb, reserve);
647 	qdf_nbuf_count_inc(skb);
648 
649 	return skb;
650 }
651 #endif
652 qdf_export_symbol(__qdf_nbuf_alloc);
653 
654 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
655 					  const char *func, uint32_t line)
656 {
657 	qdf_nbuf_t nbuf;
658 	unsigned long offset;
659 
660 	if (align)
661 		size += (align - 1);
662 
663 	nbuf = alloc_skb(size, GFP_ATOMIC);
664 	if (!nbuf)
665 		goto ret_nbuf;
666 
667 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
668 
669 	skb_reserve(nbuf, reserve);
670 
671 	if (align) {
672 		offset = ((unsigned long)nbuf->data) % align;
673 		if (offset)
674 			skb_reserve(nbuf, align - offset);
675 	}
676 
677 	qdf_nbuf_count_inc(nbuf);
678 
679 ret_nbuf:
680 	return nbuf;
681 }
682 
683 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
684 
685 /**
686  * __qdf_nbuf_free() - free the nbuf its interrupt safe
687  * @skb: Pointer to network buffer
688  *
689  * Return: none
690  */
691 
692 void __qdf_nbuf_free(struct sk_buff *skb)
693 {
694 	if (pld_nbuf_pre_alloc_free(skb))
695 		return;
696 
697 	qdf_nbuf_frag_count_dec(skb);
698 
699 	qdf_nbuf_count_dec(skb);
700 	if (nbuf_free_cb)
701 		nbuf_free_cb(skb);
702 	else
703 		dev_kfree_skb_any(skb);
704 }
705 
706 qdf_export_symbol(__qdf_nbuf_free);
707 
708 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
709 {
710 	qdf_nbuf_t skb_new = NULL;
711 
712 	skb_new = skb_clone(skb, GFP_ATOMIC);
713 	if (skb_new) {
714 		qdf_nbuf_frag_count_inc(skb_new);
715 		qdf_nbuf_count_inc(skb_new);
716 	}
717 	return skb_new;
718 }
719 
720 qdf_export_symbol(__qdf_nbuf_clone);
721 
722 #ifdef NBUF_MEMORY_DEBUG
723 enum qdf_nbuf_event_type {
724 	QDF_NBUF_ALLOC,
725 	QDF_NBUF_ALLOC_CLONE,
726 	QDF_NBUF_ALLOC_COPY,
727 	QDF_NBUF_ALLOC_FAILURE,
728 	QDF_NBUF_FREE,
729 	QDF_NBUF_MAP,
730 	QDF_NBUF_UNMAP,
731 	QDF_NBUF_ALLOC_COPY_EXPAND,
732 };
733 
734 struct qdf_nbuf_event {
735 	qdf_nbuf_t nbuf;
736 	char func[QDF_MEM_FUNC_NAME_SIZE];
737 	uint32_t line;
738 	enum qdf_nbuf_event_type type;
739 	uint64_t timestamp;
740 };
741 
742 #define QDF_NBUF_HISTORY_SIZE 4096
743 static qdf_atomic_t qdf_nbuf_history_index;
744 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
745 
746 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
747 {
748 	int32_t next = qdf_atomic_inc_return(index);
749 
750 	if (next == size)
751 		qdf_atomic_sub(size, index);
752 
753 	return next % size;
754 }
755 
756 static void
757 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
758 		     enum qdf_nbuf_event_type type)
759 {
760 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
761 						   QDF_NBUF_HISTORY_SIZE);
762 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
763 
764 	event->nbuf = nbuf;
765 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
766 	event->line = line;
767 	event->type = type;
768 	event->timestamp = qdf_get_log_timestamp();
769 }
770 #endif /* NBUF_MEMORY_DEBUG */
771 
772 #ifdef NBUF_MAP_UNMAP_DEBUG
773 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
774 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
775 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
776 
777 static void qdf_nbuf_map_tracking_init(void)
778 {
779 	qdf_tracker_init(&qdf_nbuf_map_tracker);
780 }
781 
782 static void qdf_nbuf_map_tracking_deinit(void)
783 {
784 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
785 }
786 
787 static QDF_STATUS
788 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
789 {
790 	QDF_STATUS status;
791 
792 	if (is_initial_mem_debug_disabled)
793 		return QDF_STATUS_SUCCESS;
794 
795 	status = qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
796 	if (QDF_IS_STATUS_ERROR(status))
797 		return status;
798 
799 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
800 
801 	return QDF_STATUS_SUCCESS;
802 }
803 
804 static void
805 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
806 {
807 	if (is_initial_mem_debug_disabled)
808 		return;
809 
810 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
811 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
812 }
813 
814 void qdf_nbuf_map_check_for_leaks(void)
815 {
816 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
817 }
818 
819 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
820 			      qdf_nbuf_t buf,
821 			      qdf_dma_dir_t dir,
822 			      const char *func,
823 			      uint32_t line)
824 {
825 	QDF_STATUS status;
826 
827 	status = qdf_nbuf_track_map(buf, func, line);
828 	if (QDF_IS_STATUS_ERROR(status))
829 		return status;
830 
831 	status = __qdf_nbuf_map(osdev, buf, dir);
832 	if (QDF_IS_STATUS_ERROR(status))
833 		qdf_nbuf_untrack_map(buf, func, line);
834 	else
835 		qdf_net_buf_debug_update_map_node(buf, func, line);
836 
837 	return status;
838 }
839 
840 qdf_export_symbol(qdf_nbuf_map_debug);
841 
842 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
843 			  qdf_nbuf_t buf,
844 			  qdf_dma_dir_t dir,
845 			  const char *func,
846 			  uint32_t line)
847 {
848 	qdf_nbuf_untrack_map(buf, func, line);
849 	__qdf_nbuf_unmap_single(osdev, buf, dir);
850 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
851 }
852 
853 qdf_export_symbol(qdf_nbuf_unmap_debug);
854 
855 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
856 				     qdf_nbuf_t buf,
857 				     qdf_dma_dir_t dir,
858 				     const char *func,
859 				     uint32_t line)
860 {
861 	QDF_STATUS status;
862 
863 	status = qdf_nbuf_track_map(buf, func, line);
864 	if (QDF_IS_STATUS_ERROR(status))
865 		return status;
866 
867 	status = __qdf_nbuf_map_single(osdev, buf, dir);
868 	if (QDF_IS_STATUS_ERROR(status))
869 		qdf_nbuf_untrack_map(buf, func, line);
870 	else
871 		qdf_net_buf_debug_update_map_node(buf, func, line);
872 
873 	return status;
874 }
875 
876 qdf_export_symbol(qdf_nbuf_map_single_debug);
877 
878 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
879 				 qdf_nbuf_t buf,
880 				 qdf_dma_dir_t dir,
881 				 const char *func,
882 				 uint32_t line)
883 {
884 	qdf_nbuf_untrack_map(buf, func, line);
885 	__qdf_nbuf_unmap_single(osdev, buf, dir);
886 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
887 }
888 
889 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
890 
891 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
892 				     qdf_nbuf_t buf,
893 				     qdf_dma_dir_t dir,
894 				     int nbytes,
895 				     const char *func,
896 				     uint32_t line)
897 {
898 	QDF_STATUS status;
899 
900 	status = qdf_nbuf_track_map(buf, func, line);
901 	if (QDF_IS_STATUS_ERROR(status))
902 		return status;
903 
904 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
905 	if (QDF_IS_STATUS_ERROR(status))
906 		qdf_nbuf_untrack_map(buf, func, line);
907 	else
908 		qdf_net_buf_debug_update_map_node(buf, func, line);
909 
910 	return status;
911 }
912 
913 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
914 
915 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
916 				 qdf_nbuf_t buf,
917 				 qdf_dma_dir_t dir,
918 				 int nbytes,
919 				 const char *func,
920 				 uint32_t line)
921 {
922 	qdf_nbuf_untrack_map(buf, func, line);
923 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
924 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
925 }
926 
927 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
928 
929 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
930 					    qdf_nbuf_t buf,
931 					    qdf_dma_dir_t dir,
932 					    int nbytes,
933 					    const char *func,
934 					    uint32_t line)
935 {
936 	QDF_STATUS status;
937 
938 	status = qdf_nbuf_track_map(buf, func, line);
939 	if (QDF_IS_STATUS_ERROR(status))
940 		return status;
941 
942 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
943 	if (QDF_IS_STATUS_ERROR(status))
944 		qdf_nbuf_untrack_map(buf, func, line);
945 	else
946 		qdf_net_buf_debug_update_map_node(buf, func, line);
947 
948 	return status;
949 }
950 
951 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
952 
953 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
954 					qdf_nbuf_t buf,
955 					qdf_dma_dir_t dir,
956 					int nbytes,
957 					const char *func,
958 					uint32_t line)
959 {
960 	qdf_nbuf_untrack_map(buf, func, line);
961 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
962 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
963 }
964 
965 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
966 
967 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
968 					     const char *func,
969 					     uint32_t line)
970 {
971 	char map_func[QDF_TRACKER_FUNC_SIZE];
972 	uint32_t map_line;
973 
974 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
975 				&map_func, &map_line))
976 		return;
977 
978 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
979 			   func, line, map_func, map_line);
980 }
981 #else
982 static inline void qdf_nbuf_map_tracking_init(void)
983 {
984 }
985 
986 static inline void qdf_nbuf_map_tracking_deinit(void)
987 {
988 }
989 
990 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
991 						    const char *func,
992 						    uint32_t line)
993 {
994 }
995 #endif /* NBUF_MAP_UNMAP_DEBUG */
996 
997 /**
998  * __qdf_nbuf_map() - map a buffer to local bus address space
999  * @osdev: OS device
1000  * @bmap: Bitmap
1001  * @skb: Pointer to network buffer
1002  * @dir: Direction
1003  *
1004  * Return: QDF_STATUS
1005  */
1006 #ifdef QDF_OS_DEBUG
1007 QDF_STATUS
1008 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1009 {
1010 	struct skb_shared_info *sh = skb_shinfo(skb);
1011 
1012 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1013 			|| (dir == QDF_DMA_FROM_DEVICE));
1014 
1015 	/*
1016 	 * Assume there's only a single fragment.
1017 	 * To support multiple fragments, it would be necessary to change
1018 	 * qdf_nbuf_t to be a separate object that stores meta-info
1019 	 * (including the bus address for each fragment) and a pointer
1020 	 * to the underlying sk_buff.
1021 	 */
1022 	qdf_assert(sh->nr_frags == 0);
1023 
1024 	return __qdf_nbuf_map_single(osdev, skb, dir);
1025 }
1026 qdf_export_symbol(__qdf_nbuf_map);
1027 
1028 #else
1029 QDF_STATUS
1030 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1031 {
1032 	return __qdf_nbuf_map_single(osdev, skb, dir);
1033 }
1034 qdf_export_symbol(__qdf_nbuf_map);
1035 #endif
1036 /**
1037  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1038  * @osdev: OS device
1039  * @skb: Pointer to network buffer
1040  * @dir: dma direction
1041  *
1042  * Return: none
1043  */
1044 void
1045 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1046 			qdf_dma_dir_t dir)
1047 {
1048 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1049 		   || (dir == QDF_DMA_FROM_DEVICE));
1050 
1051 	/*
1052 	 * Assume there's a single fragment.
1053 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1054 	 */
1055 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1056 }
1057 qdf_export_symbol(__qdf_nbuf_unmap);
1058 
1059 /**
1060  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1061  * @osdev: OS device
1062  * @skb: Pointer to network buffer
1063  * @dir: Direction
1064  *
1065  * Return: QDF_STATUS
1066  */
1067 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1068 QDF_STATUS
1069 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1070 {
1071 	qdf_dma_addr_t paddr;
1072 
1073 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1074 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1075 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1076 	return QDF_STATUS_SUCCESS;
1077 }
1078 qdf_export_symbol(__qdf_nbuf_map_single);
1079 #else
1080 QDF_STATUS
1081 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1082 {
1083 	qdf_dma_addr_t paddr;
1084 
1085 	/* assume that the OS only provides a single fragment */
1086 	QDF_NBUF_CB_PADDR(buf) = paddr =
1087 		dma_map_single(osdev->dev, buf->data,
1088 				skb_end_pointer(buf) - buf->data,
1089 				__qdf_dma_dir_to_os(dir));
1090 	__qdf_record_nbuf_nbytes(
1091 		__qdf_nbuf_get_data_len(buf), dir, true);
1092 	return dma_mapping_error(osdev->dev, paddr)
1093 		? QDF_STATUS_E_FAILURE
1094 		: QDF_STATUS_SUCCESS;
1095 }
1096 qdf_export_symbol(__qdf_nbuf_map_single);
1097 #endif
1098 /**
1099  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1100  * @osdev: OS device
1101  * @skb: Pointer to network buffer
1102  * @dir: Direction
1103  *
1104  * Return: none
1105  */
1106 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1107 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1108 				qdf_dma_dir_t dir)
1109 {
1110 }
1111 #else
1112 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1113 					qdf_dma_dir_t dir)
1114 {
1115 	if (QDF_NBUF_CB_PADDR(buf)) {
1116 		__qdf_record_nbuf_nbytes(
1117 			__qdf_nbuf_get_data_len(buf), dir, false);
1118 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1119 			skb_end_pointer(buf) - buf->data,
1120 			__qdf_dma_dir_to_os(dir));
1121 	}
1122 }
1123 #endif
1124 qdf_export_symbol(__qdf_nbuf_unmap_single);
1125 
1126 /**
1127  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1128  * @skb: Pointer to network buffer
1129  * @cksum: Pointer to checksum value
1130  *
1131  * Return: QDF_STATUS
1132  */
1133 QDF_STATUS
1134 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1135 {
1136 	switch (cksum->l4_result) {
1137 	case QDF_NBUF_RX_CKSUM_NONE:
1138 		skb->ip_summed = CHECKSUM_NONE;
1139 		break;
1140 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1141 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1142 		break;
1143 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1144 		skb->ip_summed = CHECKSUM_PARTIAL;
1145 		skb->csum = cksum->val;
1146 		break;
1147 	default:
1148 		pr_err("Unknown checksum type\n");
1149 		qdf_assert(0);
1150 		return QDF_STATUS_E_NOSUPPORT;
1151 	}
1152 	return QDF_STATUS_SUCCESS;
1153 }
1154 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1155 
1156 /**
1157  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1158  * @skb: Pointer to network buffer
1159  *
1160  * Return: TX checksum value
1161  */
1162 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1163 {
1164 	switch (skb->ip_summed) {
1165 	case CHECKSUM_NONE:
1166 		return QDF_NBUF_TX_CKSUM_NONE;
1167 	case CHECKSUM_PARTIAL:
1168 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1169 	case CHECKSUM_COMPLETE:
1170 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1171 	default:
1172 		return QDF_NBUF_TX_CKSUM_NONE;
1173 	}
1174 }
1175 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1176 
1177 /**
1178  * __qdf_nbuf_get_tid() - get tid
1179  * @skb: Pointer to network buffer
1180  *
1181  * Return: tid
1182  */
1183 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1184 {
1185 	return skb->priority;
1186 }
1187 qdf_export_symbol(__qdf_nbuf_get_tid);
1188 
1189 /**
1190  * __qdf_nbuf_set_tid() - set tid
1191  * @skb: Pointer to network buffer
1192  *
1193  * Return: none
1194  */
1195 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1196 {
1197 	skb->priority = tid;
1198 }
1199 qdf_export_symbol(__qdf_nbuf_set_tid);
1200 
1201 /**
1202  * __qdf_nbuf_set_tid() - set tid
1203  * @skb: Pointer to network buffer
1204  *
1205  * Return: none
1206  */
1207 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1208 {
1209 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1210 }
1211 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1212 
1213 /**
1214  * __qdf_nbuf_reg_trace_cb() - register trace callback
1215  * @cb_func_ptr: Pointer to trace callback function
1216  *
1217  * Return: none
1218  */
1219 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1220 {
1221 	qdf_trace_update_cb = cb_func_ptr;
1222 }
1223 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1224 
1225 /**
1226  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1227  *              of DHCP packet.
1228  * @data: Pointer to DHCP packet data buffer
1229  *
1230  * This func. returns the subtype of DHCP packet.
1231  *
1232  * Return: subtype of the DHCP packet.
1233  */
1234 enum qdf_proto_subtype
1235 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1236 {
1237 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1238 
1239 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1240 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1241 					QDF_DHCP_OPTION53_LENGTH)) {
1242 
1243 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1244 		case QDF_DHCP_DISCOVER:
1245 			subtype = QDF_PROTO_DHCP_DISCOVER;
1246 			break;
1247 		case QDF_DHCP_REQUEST:
1248 			subtype = QDF_PROTO_DHCP_REQUEST;
1249 			break;
1250 		case QDF_DHCP_OFFER:
1251 			subtype = QDF_PROTO_DHCP_OFFER;
1252 			break;
1253 		case QDF_DHCP_ACK:
1254 			subtype = QDF_PROTO_DHCP_ACK;
1255 			break;
1256 		case QDF_DHCP_NAK:
1257 			subtype = QDF_PROTO_DHCP_NACK;
1258 			break;
1259 		case QDF_DHCP_RELEASE:
1260 			subtype = QDF_PROTO_DHCP_RELEASE;
1261 			break;
1262 		case QDF_DHCP_INFORM:
1263 			subtype = QDF_PROTO_DHCP_INFORM;
1264 			break;
1265 		case QDF_DHCP_DECLINE:
1266 			subtype = QDF_PROTO_DHCP_DECLINE;
1267 			break;
1268 		default:
1269 			break;
1270 		}
1271 	}
1272 
1273 	return subtype;
1274 }
1275 
1276 /**
1277  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1278  *            of EAPOL packet.
1279  * @data: Pointer to EAPOL packet data buffer
1280  *
1281  * This func. returns the subtype of EAPOL packet.
1282  *
1283  * Return: subtype of the EAPOL packet.
1284  */
1285 enum qdf_proto_subtype
1286 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1287 {
1288 	uint16_t eapol_key_info;
1289 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1290 	uint16_t mask;
1291 
1292 	eapol_key_info = (uint16_t)(*(uint16_t *)
1293 			(data + EAPOL_KEY_INFO_OFFSET));
1294 
1295 	mask = eapol_key_info & EAPOL_MASK;
1296 	switch (mask) {
1297 	case EAPOL_M1_BIT_MASK:
1298 		subtype = QDF_PROTO_EAPOL_M1;
1299 		break;
1300 	case EAPOL_M2_BIT_MASK:
1301 		subtype = QDF_PROTO_EAPOL_M2;
1302 		break;
1303 	case EAPOL_M3_BIT_MASK:
1304 		subtype = QDF_PROTO_EAPOL_M3;
1305 		break;
1306 	case EAPOL_M4_BIT_MASK:
1307 		subtype = QDF_PROTO_EAPOL_M4;
1308 		break;
1309 	default:
1310 		break;
1311 	}
1312 
1313 	return subtype;
1314 }
1315 
1316 /**
1317  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1318  *            of ARP packet.
1319  * @data: Pointer to ARP packet data buffer
1320  *
1321  * This func. returns the subtype of ARP packet.
1322  *
1323  * Return: subtype of the ARP packet.
1324  */
1325 enum qdf_proto_subtype
1326 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1327 {
1328 	uint16_t subtype;
1329 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1330 
1331 	subtype = (uint16_t)(*(uint16_t *)
1332 			(data + ARP_SUB_TYPE_OFFSET));
1333 
1334 	switch (QDF_SWAP_U16(subtype)) {
1335 	case ARP_REQUEST:
1336 		proto_subtype = QDF_PROTO_ARP_REQ;
1337 		break;
1338 	case ARP_RESPONSE:
1339 		proto_subtype = QDF_PROTO_ARP_RES;
1340 		break;
1341 	default:
1342 		break;
1343 	}
1344 
1345 	return proto_subtype;
1346 }
1347 
1348 /**
1349  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1350  *            of IPV4 ICMP packet.
1351  * @data: Pointer to IPV4 ICMP packet data buffer
1352  *
1353  * This func. returns the subtype of ICMP packet.
1354  *
1355  * Return: subtype of the ICMP packet.
1356  */
1357 enum qdf_proto_subtype
1358 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1359 {
1360 	uint8_t subtype;
1361 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1362 
1363 	subtype = (uint8_t)(*(uint8_t *)
1364 			(data + ICMP_SUBTYPE_OFFSET));
1365 
1366 	switch (subtype) {
1367 	case ICMP_REQUEST:
1368 		proto_subtype = QDF_PROTO_ICMP_REQ;
1369 		break;
1370 	case ICMP_RESPONSE:
1371 		proto_subtype = QDF_PROTO_ICMP_RES;
1372 		break;
1373 	default:
1374 		break;
1375 	}
1376 
1377 	return proto_subtype;
1378 }
1379 
1380 /**
1381  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1382  *            of IPV6 ICMPV6 packet.
1383  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1384  *
1385  * This func. returns the subtype of ICMPV6 packet.
1386  *
1387  * Return: subtype of the ICMPV6 packet.
1388  */
1389 enum qdf_proto_subtype
1390 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1391 {
1392 	uint8_t subtype;
1393 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1394 
1395 	subtype = (uint8_t)(*(uint8_t *)
1396 			(data + ICMPV6_SUBTYPE_OFFSET));
1397 
1398 	switch (subtype) {
1399 	case ICMPV6_REQUEST:
1400 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1401 		break;
1402 	case ICMPV6_RESPONSE:
1403 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1404 		break;
1405 	case ICMPV6_RS:
1406 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1407 		break;
1408 	case ICMPV6_RA:
1409 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1410 		break;
1411 	case ICMPV6_NS:
1412 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1413 		break;
1414 	case ICMPV6_NA:
1415 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1416 		break;
1417 	default:
1418 		break;
1419 	}
1420 
1421 	return proto_subtype;
1422 }
1423 
1424 /**
1425  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1426  *            of IPV4 packet.
1427  * @data: Pointer to IPV4 packet data buffer
1428  *
1429  * This func. returns the proto type of IPV4 packet.
1430  *
1431  * Return: proto type of IPV4 packet.
1432  */
1433 uint8_t
1434 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1435 {
1436 	uint8_t proto_type;
1437 
1438 	proto_type = (uint8_t)(*(uint8_t *)(data +
1439 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1440 	return proto_type;
1441 }
1442 
1443 /**
1444  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1445  *            of IPV6 packet.
1446  * @data: Pointer to IPV6 packet data buffer
1447  *
1448  * This func. returns the proto type of IPV6 packet.
1449  *
1450  * Return: proto type of IPV6 packet.
1451  */
1452 uint8_t
1453 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1454 {
1455 	uint8_t proto_type;
1456 
1457 	proto_type = (uint8_t)(*(uint8_t *)(data +
1458 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1459 	return proto_type;
1460 }
1461 
1462 /**
1463  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1464  * @data: Pointer to network data
1465  *
1466  * This api is for Tx packets.
1467  *
1468  * Return: true if packet is ipv4 packet
1469  *	   false otherwise
1470  */
1471 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1472 {
1473 	uint16_t ether_type;
1474 
1475 	ether_type = (uint16_t)(*(uint16_t *)(data +
1476 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1477 
1478 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1479 		return true;
1480 	else
1481 		return false;
1482 }
1483 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1484 
1485 /**
1486  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1487  * @data: Pointer to network data buffer
1488  *
1489  * This api is for ipv4 packet.
1490  *
1491  * Return: true if packet is DHCP packet
1492  *	   false otherwise
1493  */
1494 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1495 {
1496 	uint16_t sport;
1497 	uint16_t dport;
1498 	uint8_t ipv4_offset;
1499 	uint8_t ipv4_hdr_len;
1500 	struct iphdr *iphdr;
1501 
1502 	if (__qdf_nbuf_get_ether_type(data) !=
1503 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1504 		return false;
1505 
1506 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1507 	iphdr = (struct iphdr *)(data + ipv4_offset);
1508 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1509 
1510 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1511 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1512 			      sizeof(uint16_t));
1513 
1514 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1515 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1516 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1517 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1518 		return true;
1519 	else
1520 		return false;
1521 }
1522 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1523 
1524 /**
1525  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1526  * @data: Pointer to network data buffer
1527  *
1528  * This api is for ipv4 packet.
1529  *
1530  * Return: true if packet is EAPOL packet
1531  *	   false otherwise.
1532  */
1533 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1534 {
1535 	uint16_t ether_type;
1536 
1537 	ether_type = __qdf_nbuf_get_ether_type(data);
1538 
1539 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1540 		return true;
1541 	else
1542 		return false;
1543 }
1544 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1545 
1546 /**
1547  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1548  * @skb: Pointer to network buffer
1549  *
1550  * This api is for ipv4 packet.
1551  *
1552  * Return: true if packet is WAPI packet
1553  *	   false otherwise.
1554  */
1555 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1556 {
1557 	uint16_t ether_type;
1558 
1559 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1560 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1561 
1562 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1563 		return true;
1564 	else
1565 		return false;
1566 }
1567 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1568 
1569 /**
1570  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1571  * @skb: Pointer to network buffer
1572  *
1573  * This api is for ipv4 packet.
1574  *
1575  * Return: true if packet is tdls packet
1576  *	   false otherwise.
1577  */
1578 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1579 {
1580 	uint16_t ether_type;
1581 
1582 	ether_type = *(uint16_t *)(skb->data +
1583 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1584 
1585 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1586 		return true;
1587 	else
1588 		return false;
1589 }
1590 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1591 
1592 /**
1593  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1594  * @data: Pointer to network data buffer
1595  *
1596  * This api is for ipv4 packet.
1597  *
1598  * Return: true if packet is ARP packet
1599  *	   false otherwise.
1600  */
1601 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1602 {
1603 	uint16_t ether_type;
1604 
1605 	ether_type = __qdf_nbuf_get_ether_type(data);
1606 
1607 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1608 		return true;
1609 	else
1610 		return false;
1611 }
1612 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1613 
1614 /**
1615  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1616  * @data: Pointer to network data buffer
1617  *
1618  * This api is for ipv4 packet.
1619  *
1620  * Return: true if packet is ARP request
1621  *	   false otherwise.
1622  */
1623 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1624 {
1625 	uint16_t op_code;
1626 
1627 	op_code = (uint16_t)(*(uint16_t *)(data +
1628 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1629 
1630 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1631 		return true;
1632 	return false;
1633 }
1634 
1635 /**
1636  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1637  * @data: Pointer to network data buffer
1638  *
1639  * This api is for ipv4 packet.
1640  *
1641  * Return: true if packet is ARP response
1642  *	   false otherwise.
1643  */
1644 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1645 {
1646 	uint16_t op_code;
1647 
1648 	op_code = (uint16_t)(*(uint16_t *)(data +
1649 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1650 
1651 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1652 		return true;
1653 	return false;
1654 }
1655 
1656 /**
1657  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1658  * @data: Pointer to network data buffer
1659  *
1660  * This api is for ipv4 packet.
1661  *
1662  * Return: ARP packet source IP value.
1663  */
1664 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1665 {
1666 	uint32_t src_ip;
1667 
1668 	src_ip = (uint32_t)(*(uint32_t *)(data +
1669 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1670 
1671 	return src_ip;
1672 }
1673 
1674 /**
1675  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1676  * @data: Pointer to network data buffer
1677  *
1678  * This api is for ipv4 packet.
1679  *
1680  * Return: ARP packet target IP value.
1681  */
1682 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1683 {
1684 	uint32_t tgt_ip;
1685 
1686 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1687 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1688 
1689 	return tgt_ip;
1690 }
1691 
1692 /**
1693  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1694  * @data: Pointer to network data buffer
1695  * @len: length to copy
1696  *
1697  * This api is for dns domain name
1698  *
1699  * Return: dns domain name.
1700  */
1701 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1702 {
1703 	uint8_t *domain_name;
1704 
1705 	domain_name = (uint8_t *)
1706 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1707 	return domain_name;
1708 }
1709 
1710 
1711 /**
1712  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1713  * @data: Pointer to network data buffer
1714  *
1715  * This api is for dns query packet.
1716  *
1717  * Return: true if packet is dns query packet.
1718  *	   false otherwise.
1719  */
1720 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1721 {
1722 	uint16_t op_code;
1723 	uint16_t tgt_port;
1724 
1725 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1726 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1727 	/* Standard DNS query always happen on Dest Port 53. */
1728 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1729 		op_code = (uint16_t)(*(uint16_t *)(data +
1730 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1731 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1732 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1733 			return true;
1734 	}
1735 	return false;
1736 }
1737 
1738 /**
1739  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1740  * @data: Pointer to network data buffer
1741  *
1742  * This api is for dns query response.
1743  *
1744  * Return: true if packet is dns response packet.
1745  *	   false otherwise.
1746  */
1747 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1748 {
1749 	uint16_t op_code;
1750 	uint16_t src_port;
1751 
1752 	src_port = (uint16_t)(*(uint16_t *)(data +
1753 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1754 	/* Standard DNS response always comes on Src Port 53. */
1755 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1756 		op_code = (uint16_t)(*(uint16_t *)(data +
1757 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1758 
1759 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1760 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1761 			return true;
1762 	}
1763 	return false;
1764 }
1765 
1766 /**
1767  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1768  * @data: Pointer to network data buffer
1769  *
1770  * This api is for tcp syn packet.
1771  *
1772  * Return: true if packet is tcp syn packet.
1773  *	   false otherwise.
1774  */
1775 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1776 {
1777 	uint8_t op_code;
1778 
1779 	op_code = (uint8_t)(*(uint8_t *)(data +
1780 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1781 
1782 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1783 		return true;
1784 	return false;
1785 }
1786 
1787 /**
1788  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1789  * @data: Pointer to network data buffer
1790  *
1791  * This api is for tcp syn ack packet.
1792  *
1793  * Return: true if packet is tcp syn ack packet.
1794  *	   false otherwise.
1795  */
1796 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1797 {
1798 	uint8_t op_code;
1799 
1800 	op_code = (uint8_t)(*(uint8_t *)(data +
1801 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1802 
1803 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1804 		return true;
1805 	return false;
1806 }
1807 
1808 /**
1809  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1810  * @data: Pointer to network data buffer
1811  *
1812  * This api is for tcp ack packet.
1813  *
1814  * Return: true if packet is tcp ack packet.
1815  *	   false otherwise.
1816  */
1817 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1818 {
1819 	uint8_t op_code;
1820 
1821 	op_code = (uint8_t)(*(uint8_t *)(data +
1822 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1823 
1824 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1825 		return true;
1826 	return false;
1827 }
1828 
1829 /**
1830  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1831  * @data: Pointer to network data buffer
1832  *
1833  * This api is for tcp packet.
1834  *
1835  * Return: tcp source port value.
1836  */
1837 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1838 {
1839 	uint16_t src_port;
1840 
1841 	src_port = (uint16_t)(*(uint16_t *)(data +
1842 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1843 
1844 	return src_port;
1845 }
1846 
1847 /**
1848  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1849  * @data: Pointer to network data buffer
1850  *
1851  * This api is for tcp packet.
1852  *
1853  * Return: tcp destination port value.
1854  */
1855 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1856 {
1857 	uint16_t tgt_port;
1858 
1859 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1860 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1861 
1862 	return tgt_port;
1863 }
1864 
1865 /**
1866  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1867  * @data: Pointer to network data buffer
1868  *
1869  * This api is for ipv4 req packet.
1870  *
1871  * Return: true if packet is icmpv4 request
1872  *	   false otherwise.
1873  */
1874 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1875 {
1876 	uint8_t op_code;
1877 
1878 	op_code = (uint8_t)(*(uint8_t *)(data +
1879 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1880 
1881 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1882 		return true;
1883 	return false;
1884 }
1885 
1886 /**
1887  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1888  * @data: Pointer to network data buffer
1889  *
1890  * This api is for ipv4 res packet.
1891  *
1892  * Return: true if packet is icmpv4 response
1893  *	   false otherwise.
1894  */
1895 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1896 {
1897 	uint8_t op_code;
1898 
1899 	op_code = (uint8_t)(*(uint8_t *)(data +
1900 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1901 
1902 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1903 		return true;
1904 	return false;
1905 }
1906 
1907 /**
1908  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1909  * @data: Pointer to network data buffer
1910  *
1911  * This api is for ipv4 packet.
1912  *
1913  * Return: icmpv4 packet source IP value.
1914  */
1915 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1916 {
1917 	uint32_t src_ip;
1918 
1919 	src_ip = (uint32_t)(*(uint32_t *)(data +
1920 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1921 
1922 	return src_ip;
1923 }
1924 
1925 /**
1926  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1927  * @data: Pointer to network data buffer
1928  *
1929  * This api is for ipv4 packet.
1930  *
1931  * Return: icmpv4 packet target IP value.
1932  */
1933 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1934 {
1935 	uint32_t tgt_ip;
1936 
1937 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1938 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1939 
1940 	return tgt_ip;
1941 }
1942 
1943 
1944 /**
1945  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1946  * @data: Pointer to IPV6 packet data buffer
1947  *
1948  * This func. checks whether it is a IPV6 packet or not.
1949  *
1950  * Return: TRUE if it is a IPV6 packet
1951  *         FALSE if not
1952  */
1953 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
1954 {
1955 	uint16_t ether_type;
1956 
1957 	ether_type = (uint16_t)(*(uint16_t *)(data +
1958 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1959 
1960 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1961 		return true;
1962 	else
1963 		return false;
1964 }
1965 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
1966 
1967 /**
1968  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
1969  * @data: Pointer to network data buffer
1970  *
1971  * This api is for ipv6 packet.
1972  *
1973  * Return: true if packet is DHCP packet
1974  *	   false otherwise
1975  */
1976 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
1977 {
1978 	uint16_t sport;
1979 	uint16_t dport;
1980 	uint8_t ipv6_offset;
1981 
1982 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
1983 	sport = *(uint16_t *)(data + ipv6_offset +
1984 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
1985 	dport = *(uint16_t *)(data + ipv6_offset +
1986 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
1987 			      sizeof(uint16_t));
1988 
1989 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
1990 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
1991 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
1992 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
1993 		return true;
1994 	else
1995 		return false;
1996 }
1997 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
1998 
1999 /**
2000  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2001  * @data: Pointer to network data buffer
2002  *
2003  * This api is for ipv6 packet.
2004  *
2005  * Return: true if packet is MDNS packet
2006  *	   false otherwise
2007  */
2008 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2009 {
2010 	uint16_t sport;
2011 	uint16_t dport;
2012 
2013 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2014 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2015 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2016 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2017 					sizeof(uint16_t));
2018 
2019 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2020 	    dport == sport)
2021 		return true;
2022 	else
2023 		return false;
2024 }
2025 
2026 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2027 
2028 /**
2029  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2030  * @data: Pointer to IPV4 packet data buffer
2031  *
2032  * This func. checks whether it is a IPV4 multicast packet or not.
2033  *
2034  * Return: TRUE if it is a IPV4 multicast packet
2035  *         FALSE if not
2036  */
2037 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2038 {
2039 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2040 		uint32_t *dst_addr =
2041 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2042 
2043 		/*
2044 		 * Check first word of the IPV4 address and if it is
2045 		 * equal to 0xE then it represents multicast IP.
2046 		 */
2047 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2048 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2049 			return true;
2050 		else
2051 			return false;
2052 	} else
2053 		return false;
2054 }
2055 
2056 /**
2057  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2058  * @data: Pointer to IPV6 packet data buffer
2059  *
2060  * This func. checks whether it is a IPV6 multicast packet or not.
2061  *
2062  * Return: TRUE if it is a IPV6 multicast packet
2063  *         FALSE if not
2064  */
2065 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2066 {
2067 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2068 		uint16_t *dst_addr;
2069 
2070 		dst_addr = (uint16_t *)
2071 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2072 
2073 		/*
2074 		 * Check first byte of the IP address and if it
2075 		 * 0xFF00 then it is a IPV6 mcast packet.
2076 		 */
2077 		if (*dst_addr ==
2078 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2079 			return true;
2080 		else
2081 			return false;
2082 	} else
2083 		return false;
2084 }
2085 
2086 /**
2087  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2088  * @data: Pointer to IPV4 ICMP packet data buffer
2089  *
2090  * This func. checks whether it is a ICMP packet or not.
2091  *
2092  * Return: TRUE if it is a ICMP packet
2093  *         FALSE if not
2094  */
2095 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2096 {
2097 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2098 		uint8_t pkt_type;
2099 
2100 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2101 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2102 
2103 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2104 			return true;
2105 		else
2106 			return false;
2107 	} else
2108 		return false;
2109 }
2110 
2111 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2112 
2113 /**
2114  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2115  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2116  *
2117  * This func. checks whether it is a ICMPV6 packet or not.
2118  *
2119  * Return: TRUE if it is a ICMPV6 packet
2120  *         FALSE if not
2121  */
2122 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2123 {
2124 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2125 		uint8_t pkt_type;
2126 
2127 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2128 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2129 
2130 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2131 			return true;
2132 		else
2133 			return false;
2134 	} else
2135 		return false;
2136 }
2137 
2138 /**
2139  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2140  * @data: Pointer to IPV4 UDP packet data buffer
2141  *
2142  * This func. checks whether it is a IPV4 UDP packet or not.
2143  *
2144  * Return: TRUE if it is a IPV4 UDP packet
2145  *         FALSE if not
2146  */
2147 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2148 {
2149 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2150 		uint8_t pkt_type;
2151 
2152 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2153 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2154 
2155 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2156 			return true;
2157 		else
2158 			return false;
2159 	} else
2160 		return false;
2161 }
2162 
2163 /**
2164  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2165  * @data: Pointer to IPV4 TCP packet data buffer
2166  *
2167  * This func. checks whether it is a IPV4 TCP packet or not.
2168  *
2169  * Return: TRUE if it is a IPV4 TCP packet
2170  *         FALSE if not
2171  */
2172 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2173 {
2174 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2175 		uint8_t pkt_type;
2176 
2177 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2178 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2179 
2180 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2181 			return true;
2182 		else
2183 			return false;
2184 	} else
2185 		return false;
2186 }
2187 
2188 /**
2189  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2190  * @data: Pointer to IPV6 UDP packet data buffer
2191  *
2192  * This func. checks whether it is a IPV6 UDP packet or not.
2193  *
2194  * Return: TRUE if it is a IPV6 UDP packet
2195  *         FALSE if not
2196  */
2197 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2198 {
2199 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2200 		uint8_t pkt_type;
2201 
2202 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2203 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2204 
2205 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2206 			return true;
2207 		else
2208 			return false;
2209 	} else
2210 		return false;
2211 }
2212 
2213 /**
2214  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2215  * @data: Pointer to IPV6 TCP packet data buffer
2216  *
2217  * This func. checks whether it is a IPV6 TCP packet or not.
2218  *
2219  * Return: TRUE if it is a IPV6 TCP packet
2220  *         FALSE if not
2221  */
2222 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2223 {
2224 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2225 		uint8_t pkt_type;
2226 
2227 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2228 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2229 
2230 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2231 			return true;
2232 		else
2233 			return false;
2234 	} else
2235 		return false;
2236 }
2237 
2238 /**
2239  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2240  * @nbuf - sk buff
2241  *
2242  * Return: true if packet is broadcast
2243  *	   false otherwise
2244  */
2245 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2246 {
2247 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2248 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2249 }
2250 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2251 
2252 #ifdef NBUF_MEMORY_DEBUG
2253 
2254 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2255 
2256 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2257 static struct kmem_cache *nbuf_tracking_cache;
2258 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2259 static spinlock_t qdf_net_buf_track_free_list_lock;
2260 static uint32_t qdf_net_buf_track_free_list_count;
2261 static uint32_t qdf_net_buf_track_used_list_count;
2262 static uint32_t qdf_net_buf_track_max_used;
2263 static uint32_t qdf_net_buf_track_max_free;
2264 static uint32_t qdf_net_buf_track_max_allocated;
2265 
2266 /**
2267  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2268  *
2269  * tracks the max number of network buffers that the wlan driver was tracking
2270  * at any one time.
2271  *
2272  * Return: none
2273  */
2274 static inline void update_max_used(void)
2275 {
2276 	int sum;
2277 
2278 	if (qdf_net_buf_track_max_used <
2279 	    qdf_net_buf_track_used_list_count)
2280 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2281 	sum = qdf_net_buf_track_free_list_count +
2282 		qdf_net_buf_track_used_list_count;
2283 	if (qdf_net_buf_track_max_allocated < sum)
2284 		qdf_net_buf_track_max_allocated = sum;
2285 }
2286 
2287 /**
2288  * update_max_free() - update qdf_net_buf_track_free_list_count
2289  *
2290  * tracks the max number tracking buffers kept in the freelist.
2291  *
2292  * Return: none
2293  */
2294 static inline void update_max_free(void)
2295 {
2296 	if (qdf_net_buf_track_max_free <
2297 	    qdf_net_buf_track_free_list_count)
2298 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2299 }
2300 
2301 /**
2302  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2303  *
2304  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2305  * This function also ads fexibility to adjust the allocation and freelist
2306  * scheems.
2307  *
2308  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2309  */
2310 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2311 {
2312 	int flags = GFP_KERNEL;
2313 	unsigned long irq_flag;
2314 	QDF_NBUF_TRACK *new_node = NULL;
2315 
2316 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2317 	qdf_net_buf_track_used_list_count++;
2318 	if (qdf_net_buf_track_free_list) {
2319 		new_node = qdf_net_buf_track_free_list;
2320 		qdf_net_buf_track_free_list =
2321 			qdf_net_buf_track_free_list->p_next;
2322 		qdf_net_buf_track_free_list_count--;
2323 	}
2324 	update_max_used();
2325 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2326 
2327 	if (new_node)
2328 		return new_node;
2329 
2330 	if (in_interrupt() || irqs_disabled() || in_atomic())
2331 		flags = GFP_ATOMIC;
2332 
2333 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2334 }
2335 
2336 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2337 #define FREEQ_POOLSIZE 2048
2338 
2339 /**
2340  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2341  *
2342  * Matches calls to qdf_nbuf_track_alloc.
2343  * Either frees the tracking cookie to kernel or an internal
2344  * freelist based on the size of the freelist.
2345  *
2346  * Return: none
2347  */
2348 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2349 {
2350 	unsigned long irq_flag;
2351 
2352 	if (!node)
2353 		return;
2354 
2355 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2356 	 * only shrink the freelist if it is bigger than twice the number of
2357 	 * nbufs in use. If the driver is stalling in a consistent bursty
2358 	 * fasion, this will keep 3/4 of thee allocations from the free list
2359 	 * while also allowing the system to recover memory as less frantic
2360 	 * traffic occurs.
2361 	 */
2362 
2363 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2364 
2365 	qdf_net_buf_track_used_list_count--;
2366 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2367 	   (qdf_net_buf_track_free_list_count >
2368 	    qdf_net_buf_track_used_list_count << 1)) {
2369 		kmem_cache_free(nbuf_tracking_cache, node);
2370 	} else {
2371 		node->p_next = qdf_net_buf_track_free_list;
2372 		qdf_net_buf_track_free_list = node;
2373 		qdf_net_buf_track_free_list_count++;
2374 	}
2375 	update_max_free();
2376 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2377 }
2378 
2379 /**
2380  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2381  *
2382  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2383  * the freelist first makes it performant for the first iperf udp burst
2384  * as well as steady state.
2385  *
2386  * Return: None
2387  */
2388 static void qdf_nbuf_track_prefill(void)
2389 {
2390 	int i;
2391 	QDF_NBUF_TRACK *node, *head;
2392 
2393 	/* prepopulate the freelist */
2394 	head = NULL;
2395 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2396 		node = qdf_nbuf_track_alloc();
2397 		if (!node)
2398 			continue;
2399 		node->p_next = head;
2400 		head = node;
2401 	}
2402 	while (head) {
2403 		node = head->p_next;
2404 		qdf_nbuf_track_free(head);
2405 		head = node;
2406 	}
2407 
2408 	/* prefilled buffers should not count as used */
2409 	qdf_net_buf_track_max_used = 0;
2410 }
2411 
2412 /**
2413  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2414  *
2415  * This initializes the memory manager for the nbuf tracking cookies.  Because
2416  * these cookies are all the same size and only used in this feature, we can
2417  * use a kmem_cache to provide tracking as well as to speed up allocations.
2418  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2419  * features) a freelist is prepopulated here.
2420  *
2421  * Return: None
2422  */
2423 static void qdf_nbuf_track_memory_manager_create(void)
2424 {
2425 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2426 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2427 						sizeof(QDF_NBUF_TRACK),
2428 						0, 0, NULL);
2429 
2430 	qdf_nbuf_track_prefill();
2431 }
2432 
2433 /**
2434  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2435  *
2436  * Empty the freelist and print out usage statistics when it is no longer
2437  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2438  * any nbuf tracking cookies were leaked.
2439  *
2440  * Return: None
2441  */
2442 static void qdf_nbuf_track_memory_manager_destroy(void)
2443 {
2444 	QDF_NBUF_TRACK *node, *tmp;
2445 	unsigned long irq_flag;
2446 
2447 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2448 	node = qdf_net_buf_track_free_list;
2449 
2450 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2451 		qdf_print("%s: unexpectedly large max_used count %d",
2452 			  __func__, qdf_net_buf_track_max_used);
2453 
2454 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2455 		qdf_print("%s: %d unused trackers were allocated",
2456 			  __func__,
2457 			  qdf_net_buf_track_max_allocated -
2458 			  qdf_net_buf_track_max_used);
2459 
2460 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2461 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2462 		qdf_print("%s: check freelist shrinking functionality",
2463 			  __func__);
2464 
2465 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2466 		  "%s: %d residual freelist size",
2467 		  __func__, qdf_net_buf_track_free_list_count);
2468 
2469 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2470 		  "%s: %d max freelist size observed",
2471 		  __func__, qdf_net_buf_track_max_free);
2472 
2473 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2474 		  "%s: %d max buffers used observed",
2475 		  __func__, qdf_net_buf_track_max_used);
2476 
2477 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2478 		  "%s: %d max buffers allocated observed",
2479 		  __func__, qdf_net_buf_track_max_allocated);
2480 
2481 	while (node) {
2482 		tmp = node;
2483 		node = node->p_next;
2484 		kmem_cache_free(nbuf_tracking_cache, tmp);
2485 		qdf_net_buf_track_free_list_count--;
2486 	}
2487 
2488 	if (qdf_net_buf_track_free_list_count != 0)
2489 		qdf_info("%d unfreed tracking memory lost in freelist",
2490 			 qdf_net_buf_track_free_list_count);
2491 
2492 	if (qdf_net_buf_track_used_list_count != 0)
2493 		qdf_info("%d unfreed tracking memory still in use",
2494 			 qdf_net_buf_track_used_list_count);
2495 
2496 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2497 	kmem_cache_destroy(nbuf_tracking_cache);
2498 	qdf_net_buf_track_free_list = NULL;
2499 }
2500 
2501 /**
2502  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2503  *
2504  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2505  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2506  * WLAN driver module whose allocated SKB is freed by network stack are
2507  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2508  * reported as memory leak.
2509  *
2510  * Return: none
2511  */
2512 void qdf_net_buf_debug_init(void)
2513 {
2514 	uint32_t i;
2515 
2516 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2517 
2518 	if (is_initial_mem_debug_disabled)
2519 		return;
2520 
2521 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2522 
2523 	qdf_nbuf_map_tracking_init();
2524 	qdf_nbuf_track_memory_manager_create();
2525 
2526 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2527 		gp_qdf_net_buf_track_tbl[i] = NULL;
2528 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2529 	}
2530 }
2531 qdf_export_symbol(qdf_net_buf_debug_init);
2532 
2533 /**
2534  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2535  *
2536  * Exit network buffer tracking debug functionality and log SKB memory leaks
2537  * As part of exiting the functionality, free the leaked memory and
2538  * cleanup the tracking buffers.
2539  *
2540  * Return: none
2541  */
2542 void qdf_net_buf_debug_exit(void)
2543 {
2544 	uint32_t i;
2545 	uint32_t count = 0;
2546 	unsigned long irq_flag;
2547 	QDF_NBUF_TRACK *p_node;
2548 	QDF_NBUF_TRACK *p_prev;
2549 
2550 	if (is_initial_mem_debug_disabled)
2551 		return;
2552 
2553 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2554 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2555 		p_node = gp_qdf_net_buf_track_tbl[i];
2556 		while (p_node) {
2557 			p_prev = p_node;
2558 			p_node = p_node->p_next;
2559 			count++;
2560 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2561 				 p_prev->func_name, p_prev->line_num,
2562 				 p_prev->size, p_prev->net_buf);
2563 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2564 				 p_prev->map_func_name,
2565 				 p_prev->map_line_num,
2566 				 p_prev->unmap_func_name,
2567 				 p_prev->unmap_line_num,
2568 				 p_prev->is_nbuf_mapped);
2569 			qdf_nbuf_track_free(p_prev);
2570 		}
2571 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2572 	}
2573 
2574 	qdf_nbuf_track_memory_manager_destroy();
2575 	qdf_nbuf_map_tracking_deinit();
2576 
2577 #ifdef CONFIG_HALT_KMEMLEAK
2578 	if (count) {
2579 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2580 		QDF_BUG(0);
2581 	}
2582 #endif
2583 }
2584 qdf_export_symbol(qdf_net_buf_debug_exit);
2585 
2586 /**
2587  * qdf_net_buf_debug_hash() - hash network buffer pointer
2588  *
2589  * Return: hash value
2590  */
2591 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2592 {
2593 	uint32_t i;
2594 
2595 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2596 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2597 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2598 
2599 	return i;
2600 }
2601 
2602 /**
2603  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2604  *
2605  * Return: If skb is found in hash table then return pointer to network buffer
2606  *	else return %NULL
2607  */
2608 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2609 {
2610 	uint32_t i;
2611 	QDF_NBUF_TRACK *p_node;
2612 
2613 	i = qdf_net_buf_debug_hash(net_buf);
2614 	p_node = gp_qdf_net_buf_track_tbl[i];
2615 
2616 	while (p_node) {
2617 		if (p_node->net_buf == net_buf)
2618 			return p_node;
2619 		p_node = p_node->p_next;
2620 	}
2621 
2622 	return NULL;
2623 }
2624 
2625 /**
2626  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2627  *
2628  * Return: none
2629  */
2630 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2631 				const char *func_name, uint32_t line_num)
2632 {
2633 	uint32_t i;
2634 	unsigned long irq_flag;
2635 	QDF_NBUF_TRACK *p_node;
2636 	QDF_NBUF_TRACK *new_node;
2637 
2638 	if (is_initial_mem_debug_disabled)
2639 		return;
2640 
2641 	new_node = qdf_nbuf_track_alloc();
2642 
2643 	i = qdf_net_buf_debug_hash(net_buf);
2644 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2645 
2646 	p_node = qdf_net_buf_debug_look_up(net_buf);
2647 
2648 	if (p_node) {
2649 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2650 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2651 			  net_buf, func_name, line_num);
2652 		qdf_nbuf_track_free(new_node);
2653 	} else {
2654 		p_node = new_node;
2655 		if (p_node) {
2656 			p_node->net_buf = net_buf;
2657 			qdf_str_lcopy(p_node->func_name, func_name,
2658 				      QDF_MEM_FUNC_NAME_SIZE);
2659 			p_node->line_num = line_num;
2660 			p_node->is_nbuf_mapped = false;
2661 			p_node->map_line_num = 0;
2662 			p_node->unmap_line_num = 0;
2663 			p_node->map_func_name[0] = '\0';
2664 			p_node->unmap_func_name[0] = '\0';
2665 			p_node->size = size;
2666 			p_node->time = qdf_get_log_timestamp();
2667 			qdf_mem_skb_inc(size);
2668 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2669 			gp_qdf_net_buf_track_tbl[i] = p_node;
2670 		} else
2671 			qdf_print(
2672 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2673 				  func_name, line_num, size);
2674 	}
2675 
2676 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2677 }
2678 qdf_export_symbol(qdf_net_buf_debug_add_node);
2679 
2680 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2681 				   uint32_t line_num)
2682 {
2683 	uint32_t i;
2684 	unsigned long irq_flag;
2685 	QDF_NBUF_TRACK *p_node;
2686 
2687 	if (is_initial_mem_debug_disabled)
2688 		return;
2689 
2690 	i = qdf_net_buf_debug_hash(net_buf);
2691 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2692 
2693 	p_node = qdf_net_buf_debug_look_up(net_buf);
2694 
2695 	if (p_node) {
2696 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2697 			      QDF_MEM_FUNC_NAME_SIZE);
2698 		p_node->line_num = line_num;
2699 	}
2700 
2701 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2702 }
2703 
2704 qdf_export_symbol(qdf_net_buf_debug_update_node);
2705 
2706 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
2707 				       const char *func_name,
2708 				       uint32_t line_num)
2709 {
2710 	uint32_t i;
2711 	unsigned long irq_flag;
2712 	QDF_NBUF_TRACK *p_node;
2713 
2714 	if (is_initial_mem_debug_disabled)
2715 		return;
2716 
2717 	i = qdf_net_buf_debug_hash(net_buf);
2718 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2719 
2720 	p_node = qdf_net_buf_debug_look_up(net_buf);
2721 
2722 	if (p_node) {
2723 		qdf_str_lcopy(p_node->map_func_name, func_name,
2724 			      QDF_MEM_FUNC_NAME_SIZE);
2725 		p_node->map_line_num = line_num;
2726 		p_node->is_nbuf_mapped = true;
2727 	}
2728 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2729 }
2730 
2731 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
2732 					 const char *func_name,
2733 					 uint32_t line_num)
2734 {
2735 	uint32_t i;
2736 	unsigned long irq_flag;
2737 	QDF_NBUF_TRACK *p_node;
2738 
2739 	if (is_initial_mem_debug_disabled)
2740 		return;
2741 
2742 	i = qdf_net_buf_debug_hash(net_buf);
2743 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2744 
2745 	p_node = qdf_net_buf_debug_look_up(net_buf);
2746 
2747 	if (p_node) {
2748 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
2749 			      QDF_MEM_FUNC_NAME_SIZE);
2750 		p_node->unmap_line_num = line_num;
2751 		p_node->is_nbuf_mapped = false;
2752 	}
2753 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2754 }
2755 
2756 /**
2757  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2758  *
2759  * Return: none
2760  */
2761 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2762 {
2763 	uint32_t i;
2764 	QDF_NBUF_TRACK *p_head;
2765 	QDF_NBUF_TRACK *p_node = NULL;
2766 	unsigned long irq_flag;
2767 	QDF_NBUF_TRACK *p_prev;
2768 
2769 	if (is_initial_mem_debug_disabled)
2770 		return;
2771 
2772 	i = qdf_net_buf_debug_hash(net_buf);
2773 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2774 
2775 	p_head = gp_qdf_net_buf_track_tbl[i];
2776 
2777 	/* Unallocated SKB */
2778 	if (!p_head)
2779 		goto done;
2780 
2781 	p_node = p_head;
2782 	/* Found at head of the table */
2783 	if (p_head->net_buf == net_buf) {
2784 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2785 		goto done;
2786 	}
2787 
2788 	/* Search in collision list */
2789 	while (p_node) {
2790 		p_prev = p_node;
2791 		p_node = p_node->p_next;
2792 		if ((p_node) && (p_node->net_buf == net_buf)) {
2793 			p_prev->p_next = p_node->p_next;
2794 			break;
2795 		}
2796 	}
2797 
2798 done:
2799 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2800 
2801 	if (p_node) {
2802 		qdf_mem_skb_dec(p_node->size);
2803 		qdf_nbuf_track_free(p_node);
2804 	} else {
2805 		QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
2806 				   net_buf);
2807 	}
2808 }
2809 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2810 
2811 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2812 				   const char *func_name, uint32_t line_num)
2813 {
2814 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2815 
2816 	if (is_initial_mem_debug_disabled)
2817 		return;
2818 
2819 	while (ext_list) {
2820 		/*
2821 		 * Take care to add if it is Jumbo packet connected using
2822 		 * frag_list
2823 		 */
2824 		qdf_nbuf_t next;
2825 
2826 		next = qdf_nbuf_queue_next(ext_list);
2827 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2828 		ext_list = next;
2829 	}
2830 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2831 }
2832 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2833 
2834 /**
2835  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2836  * @net_buf: Network buf holding head segment (single)
2837  *
2838  * WLAN driver module whose allocated SKB is freed by network stack are
2839  * suppose to call this API before returning SKB to network stack such
2840  * that the SKB is not reported as memory leak.
2841  *
2842  * Return: none
2843  */
2844 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2845 {
2846 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2847 
2848 	if (is_initial_mem_debug_disabled)
2849 		return;
2850 
2851 	while (ext_list) {
2852 		/*
2853 		 * Take care to free if it is Jumbo packet connected using
2854 		 * frag_list
2855 		 */
2856 		qdf_nbuf_t next;
2857 
2858 		next = qdf_nbuf_queue_next(ext_list);
2859 
2860 		if (qdf_nbuf_get_users(ext_list) > 1) {
2861 			ext_list = next;
2862 			continue;
2863 		}
2864 
2865 		qdf_net_buf_debug_delete_node(ext_list);
2866 		ext_list = next;
2867 	}
2868 
2869 	if (qdf_nbuf_get_users(net_buf) > 1)
2870 		return;
2871 
2872 	qdf_net_buf_debug_delete_node(net_buf);
2873 }
2874 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2875 
2876 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2877 				int reserve, int align, int prio,
2878 				const char *func, uint32_t line)
2879 {
2880 	qdf_nbuf_t nbuf;
2881 
2882 	if (is_initial_mem_debug_disabled)
2883 		return __qdf_nbuf_alloc(osdev, size,
2884 					reserve, align,
2885 					prio, func, line);
2886 
2887 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2888 
2889 	/* Store SKB in internal QDF tracking table */
2890 	if (qdf_likely(nbuf)) {
2891 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2892 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2893 	} else {
2894 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2895 	}
2896 
2897 	return nbuf;
2898 }
2899 qdf_export_symbol(qdf_nbuf_alloc_debug);
2900 
2901 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
2902 					    const char *func, uint32_t line)
2903 {
2904 	qdf_nbuf_t nbuf;
2905 
2906 	if (is_initial_mem_debug_disabled)
2907 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
2908 						    line);
2909 
2910 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
2911 
2912 	/* Store SKB in internal QDF tracking table */
2913 	if (qdf_likely(nbuf)) {
2914 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2915 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2916 	} else {
2917 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2918 	}
2919 
2920 	return nbuf;
2921 }
2922 
2923 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
2924 
2925 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2926 {
2927 	qdf_nbuf_t ext_list;
2928 	qdf_frag_t p_frag;
2929 	uint32_t num_nr_frags;
2930 	uint32_t idx = 0;
2931 
2932 	if (qdf_unlikely(!nbuf))
2933 		return;
2934 
2935 	if (is_initial_mem_debug_disabled)
2936 		goto free_buf;
2937 
2938 	if (qdf_nbuf_get_users(nbuf) > 1)
2939 		goto free_buf;
2940 
2941 	/* Remove SKB from internal QDF tracking table */
2942 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2943 	qdf_net_buf_debug_delete_node(nbuf);
2944 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2945 
2946 	/* Take care to delete the debug entries for frags */
2947 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
2948 
2949 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
2950 
2951 	while (idx < num_nr_frags) {
2952 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
2953 		if (qdf_likely(p_frag))
2954 			qdf_frag_debug_refcount_dec(p_frag, func, line);
2955 		idx++;
2956 	}
2957 
2958 	/**
2959 	 * Take care to update the debug entries for frag_list and also
2960 	 * for the frags attached to frag_list
2961 	 */
2962 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2963 	while (ext_list) {
2964 		if (qdf_nbuf_get_users(ext_list) == 1) {
2965 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
2966 			idx = 0;
2967 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
2968 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
2969 			while (idx < num_nr_frags) {
2970 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
2971 				if (qdf_likely(p_frag))
2972 					qdf_frag_debug_refcount_dec(p_frag,
2973 								    func, line);
2974 				idx++;
2975 			}
2976 			qdf_net_buf_debug_delete_node(ext_list);
2977 		}
2978 
2979 		ext_list = qdf_nbuf_queue_next(ext_list);
2980 	}
2981 
2982 free_buf:
2983 	__qdf_nbuf_free(nbuf);
2984 }
2985 qdf_export_symbol(qdf_nbuf_free_debug);
2986 
2987 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
2988 {
2989 	uint32_t num_nr_frags;
2990 	uint32_t idx = 0;
2991 	qdf_nbuf_t ext_list;
2992 	qdf_frag_t p_frag;
2993 
2994 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
2995 
2996 	if (is_initial_mem_debug_disabled)
2997 		return cloned_buf;
2998 
2999 	if (qdf_unlikely(!cloned_buf))
3000 		return NULL;
3001 
3002 	/* Take care to update the debug entries for frags */
3003 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3004 
3005 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3006 
3007 	while (idx < num_nr_frags) {
3008 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3009 		if (qdf_likely(p_frag))
3010 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3011 		idx++;
3012 	}
3013 
3014 	/* Take care to update debug entries for frags attached to frag_list */
3015 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3016 	while (ext_list) {
3017 		idx = 0;
3018 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3019 
3020 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3021 
3022 		while (idx < num_nr_frags) {
3023 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3024 			if (qdf_likely(p_frag))
3025 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3026 			idx++;
3027 		}
3028 		ext_list = qdf_nbuf_queue_next(ext_list);
3029 	}
3030 
3031 	/* Store SKB in internal QDF tracking table */
3032 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3033 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3034 
3035 	return cloned_buf;
3036 }
3037 qdf_export_symbol(qdf_nbuf_clone_debug);
3038 
3039 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3040 {
3041 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3042 
3043 	if (is_initial_mem_debug_disabled)
3044 		return copied_buf;
3045 
3046 	if (qdf_unlikely(!copied_buf))
3047 		return NULL;
3048 
3049 	/* Store SKB in internal QDF tracking table */
3050 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3051 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3052 
3053 	return copied_buf;
3054 }
3055 qdf_export_symbol(qdf_nbuf_copy_debug);
3056 
3057 qdf_nbuf_t
3058 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3059 			   const char *func, uint32_t line)
3060 {
3061 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3062 
3063 	if (qdf_unlikely(!copied_buf))
3064 		return NULL;
3065 
3066 	if (is_initial_mem_debug_disabled)
3067 		return copied_buf;
3068 
3069 	/* Store SKB in internal QDF tracking table */
3070 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3071 	qdf_nbuf_history_add(copied_buf, func, line,
3072 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3073 
3074 	return copied_buf;
3075 }
3076 
3077 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3078 
3079 qdf_nbuf_t
3080 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3081 		       uint32_t line_num)
3082 {
3083 	qdf_nbuf_t unshared_buf;
3084 	qdf_frag_t p_frag;
3085 	uint32_t num_nr_frags;
3086 	uint32_t idx = 0;
3087 
3088 	if (is_initial_mem_debug_disabled)
3089 		return __qdf_nbuf_unshare(buf);
3090 
3091 	/* Take care to delete the debug entries for frags */
3092 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3093 
3094 	while (idx < num_nr_frags) {
3095 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3096 		if (qdf_likely(p_frag))
3097 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3098 						    line_num);
3099 		idx++;
3100 	}
3101 
3102 	unshared_buf = __qdf_nbuf_unshare(buf);
3103 
3104 	if (qdf_likely(buf != unshared_buf)) {
3105 		qdf_net_buf_debug_delete_node(buf);
3106 
3107 		if (unshared_buf)
3108 			qdf_net_buf_debug_add_node(unshared_buf, 0,
3109 						   func_name, line_num);
3110 	}
3111 
3112 	if (unshared_buf) {
3113 		/* Take care to add the debug entries for frags */
3114 		num_nr_frags = qdf_nbuf_get_nr_frags(unshared_buf);
3115 
3116 		idx = 0;
3117 		while (idx < num_nr_frags) {
3118 			p_frag = qdf_nbuf_get_frag_addr(unshared_buf, idx);
3119 			if (qdf_likely(p_frag))
3120 				qdf_frag_debug_refcount_inc(p_frag, func_name,
3121 							    line_num);
3122 			idx++;
3123 		}
3124 	}
3125 
3126 	return unshared_buf;
3127 }
3128 
3129 qdf_export_symbol(qdf_nbuf_unshare_debug);
3130 
3131 #endif /* NBUF_MEMORY_DEBUG */
3132 
3133 #if defined(FEATURE_TSO)
3134 
3135 /**
3136  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3137  *
3138  * @ethproto: ethernet type of the msdu
3139  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3140  * @l2_len: L2 length for the msdu
3141  * @eit_hdr: pointer to EIT header
3142  * @eit_hdr_len: EIT header length for the msdu
3143  * @eit_hdr_dma_map_addr: dma addr for EIT header
3144  * @tcphdr: pointer to tcp header
3145  * @ipv4_csum_en: ipv4 checksum enable
3146  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3147  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3148  * @ip_id: IP id
3149  * @tcp_seq_num: TCP sequence number
3150  *
3151  * This structure holds the TSO common info that is common
3152  * across all the TCP segments of the jumbo packet.
3153  */
3154 struct qdf_tso_cmn_seg_info_t {
3155 	uint16_t ethproto;
3156 	uint16_t ip_tcp_hdr_len;
3157 	uint16_t l2_len;
3158 	uint8_t *eit_hdr;
3159 	uint32_t eit_hdr_len;
3160 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3161 	struct tcphdr *tcphdr;
3162 	uint16_t ipv4_csum_en;
3163 	uint16_t tcp_ipv4_csum_en;
3164 	uint16_t tcp_ipv6_csum_en;
3165 	uint16_t ip_id;
3166 	uint32_t tcp_seq_num;
3167 };
3168 
3169 /**
3170  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
3171  *
3172  * @skb: network buffer
3173  *
3174  * Return: byte offset length of 8 bytes aligned.
3175  */
3176 #ifdef FIX_TXDMA_LIMITATION
3177 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3178 {
3179 	uint32_t eit_hdr_len;
3180 	uint8_t *eit_hdr;
3181 	uint8_t byte_8_align_offset;
3182 
3183 	eit_hdr = skb->data;
3184 	eit_hdr_len = (skb_transport_header(skb)
3185 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3186 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
3187 	if (qdf_unlikely(byte_8_align_offset)) {
3188 		TSO_DEBUG("%pK,Len %d %d",
3189 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
3190 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
3191 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
3192 				  __LINE__, skb->head, skb->data,
3193 				 byte_8_align_offset);
3194 			return 0;
3195 		}
3196 		qdf_nbuf_push_head(skb, byte_8_align_offset);
3197 		qdf_mem_move(skb->data,
3198 			     skb->data + byte_8_align_offset,
3199 			     eit_hdr_len);
3200 		skb->len -= byte_8_align_offset;
3201 		skb->mac_header -= byte_8_align_offset;
3202 		skb->network_header -= byte_8_align_offset;
3203 		skb->transport_header -= byte_8_align_offset;
3204 	}
3205 	return byte_8_align_offset;
3206 }
3207 #else
3208 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3209 {
3210 	return 0;
3211 }
3212 #endif
3213 
3214 /**
3215  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
3216  * information
3217  * @osdev: qdf device handle
3218  * @skb: skb buffer
3219  * @tso_info: Parameters common to all segements
3220  *
3221  * Get the TSO information that is common across all the TCP
3222  * segments of the jumbo packet
3223  *
3224  * Return: 0 - success 1 - failure
3225  */
3226 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3227 			struct sk_buff *skb,
3228 			struct qdf_tso_cmn_seg_info_t *tso_info)
3229 {
3230 	/* Get ethernet type and ethernet header length */
3231 	tso_info->ethproto = vlan_get_protocol(skb);
3232 
3233 	/* Determine whether this is an IPv4 or IPv6 packet */
3234 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3235 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3236 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3237 
3238 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3239 		tso_info->ipv4_csum_en = 1;
3240 		tso_info->tcp_ipv4_csum_en = 1;
3241 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3242 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3243 				ipv4_hdr->protocol);
3244 			return 1;
3245 		}
3246 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3247 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3248 		tso_info->tcp_ipv6_csum_en = 1;
3249 	} else {
3250 		qdf_err("TSO: ethertype 0x%x is not supported!",
3251 			tso_info->ethproto);
3252 		return 1;
3253 	}
3254 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3255 	tso_info->tcphdr = tcp_hdr(skb);
3256 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3257 	/* get pointer to the ethernet + IP + TCP header and their length */
3258 	tso_info->eit_hdr = skb->data;
3259 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3260 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3261 	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
3262 							tso_info->eit_hdr,
3263 							tso_info->eit_hdr_len,
3264 							DMA_TO_DEVICE);
3265 	if (unlikely(dma_mapping_error(osdev->dev,
3266 				       tso_info->eit_hdr_dma_map_addr))) {
3267 		qdf_err("DMA mapping error!");
3268 		qdf_assert(0);
3269 		return 1;
3270 	}
3271 
3272 	if (tso_info->ethproto == htons(ETH_P_IP)) {
3273 		/* inlcude IPv4 header length for IPV4 (total length) */
3274 		tso_info->ip_tcp_hdr_len =
3275 			tso_info->eit_hdr_len - tso_info->l2_len;
3276 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
3277 		/* exclude IPv6 header length for IPv6 (payload length) */
3278 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3279 	}
3280 	/*
3281 	 * The length of the payload (application layer data) is added to
3282 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3283 	 * descriptor.
3284 	 */
3285 
3286 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3287 		tso_info->tcp_seq_num,
3288 		tso_info->eit_hdr_len,
3289 		tso_info->l2_len,
3290 		skb->len);
3291 	return 0;
3292 }
3293 
3294 
3295 /**
3296  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3297  *
3298  * @curr_seg: Segment whose contents are initialized
3299  * @tso_cmn_info: Parameters common to all segements
3300  *
3301  * Return: None
3302  */
3303 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3304 				struct qdf_tso_seg_elem_t *curr_seg,
3305 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3306 {
3307 	/* Initialize the flags to 0 */
3308 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3309 
3310 	/*
3311 	 * The following fields remain the same across all segments of
3312 	 * a jumbo packet
3313 	 */
3314 	curr_seg->seg.tso_flags.tso_enable = 1;
3315 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3316 		tso_cmn_info->ipv4_csum_en;
3317 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3318 		tso_cmn_info->tcp_ipv6_csum_en;
3319 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3320 		tso_cmn_info->tcp_ipv4_csum_en;
3321 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3322 
3323 	/* The following fields change for the segments */
3324 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3325 	tso_cmn_info->ip_id++;
3326 
3327 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3328 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3329 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3330 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3331 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3332 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3333 
3334 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3335 
3336 	/*
3337 	 * First fragment for each segment always contains the ethernet,
3338 	 * IP and TCP header
3339 	 */
3340 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3341 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3342 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3343 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3344 
3345 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3346 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3347 		   tso_cmn_info->eit_hdr_len,
3348 		   curr_seg->seg.tso_flags.tcp_seq_num,
3349 		   curr_seg->seg.total_len);
3350 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3351 }
3352 
3353 /**
3354  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3355  * into segments
3356  * @nbuf: network buffer to be segmented
3357  * @tso_info: This is the output. The information about the
3358  *           TSO segments will be populated within this.
3359  *
3360  * This function fragments a TCP jumbo packet into smaller
3361  * segments to be transmitted by the driver. It chains the TSO
3362  * segments created into a list.
3363  *
3364  * Return: number of TSO segments
3365  */
3366 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3367 		struct qdf_tso_info_t *tso_info)
3368 {
3369 	/* common across all segments */
3370 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3371 	/* segment specific */
3372 	void *tso_frag_vaddr;
3373 	qdf_dma_addr_t tso_frag_paddr = 0;
3374 	uint32_t num_seg = 0;
3375 	struct qdf_tso_seg_elem_t *curr_seg;
3376 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3377 	skb_frag_t *frag = NULL;
3378 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3379 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3380 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3381 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3382 	int j = 0; /* skb fragment index */
3383 	uint8_t byte_8_align_offset;
3384 
3385 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3386 	total_num_seg = tso_info->tso_num_seg_list;
3387 	curr_seg = tso_info->tso_seg_list;
3388 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3389 
3390 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3391 
3392 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3393 						skb, &tso_cmn_info))) {
3394 		qdf_warn("TSO: error getting common segment info");
3395 		return 0;
3396 	}
3397 
3398 	/* length of the first chunk of data in the skb */
3399 	skb_frag_len = skb_headlen(skb);
3400 
3401 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3402 	/* update the remaining skb fragment length and TSO segment length */
3403 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3404 	skb_proc -= tso_cmn_info.eit_hdr_len;
3405 
3406 	/* get the address to the next tso fragment */
3407 	tso_frag_vaddr = skb->data +
3408 			 tso_cmn_info.eit_hdr_len +
3409 			 byte_8_align_offset;
3410 	/* get the length of the next tso fragment */
3411 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3412 
3413 	if (tso_frag_len != 0) {
3414 		tso_frag_paddr = dma_map_single(osdev->dev,
3415 				tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
3416 	}
3417 
3418 	if (unlikely(dma_mapping_error(osdev->dev,
3419 					tso_frag_paddr))) {
3420 		qdf_err("DMA mapping error!");
3421 		qdf_assert_always(0);
3422 		return 0;
3423 	}
3424 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3425 		__LINE__, skb_frag_len, tso_frag_len);
3426 	num_seg = tso_info->num_segs;
3427 	tso_info->num_segs = 0;
3428 	tso_info->is_tso = 1;
3429 
3430 	while (num_seg && curr_seg) {
3431 		int i = 1; /* tso fragment index */
3432 		uint8_t more_tso_frags = 1;
3433 
3434 		curr_seg->seg.num_frags = 0;
3435 		tso_info->num_segs++;
3436 		total_num_seg->num_seg.tso_cmn_num_seg++;
3437 
3438 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3439 						 &tso_cmn_info);
3440 
3441 		/* If TCP PSH flag is set, set it in the last or only segment */
3442 		if (num_seg == 1)
3443 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3444 
3445 		if (unlikely(skb_proc == 0))
3446 			return tso_info->num_segs;
3447 
3448 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3449 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3450 		/* frag len is added to ip_len in while loop below*/
3451 
3452 		curr_seg->seg.num_frags++;
3453 
3454 		while (more_tso_frags) {
3455 			if (tso_frag_len != 0) {
3456 				curr_seg->seg.tso_frags[i].vaddr =
3457 					tso_frag_vaddr;
3458 				curr_seg->seg.tso_frags[i].length =
3459 					tso_frag_len;
3460 				curr_seg->seg.total_len += tso_frag_len;
3461 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3462 				curr_seg->seg.num_frags++;
3463 				skb_proc = skb_proc - tso_frag_len;
3464 
3465 				/* increment the TCP sequence number */
3466 
3467 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3468 				curr_seg->seg.tso_frags[i].paddr =
3469 					tso_frag_paddr;
3470 
3471 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
3472 			}
3473 
3474 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3475 					__func__, __LINE__,
3476 					i,
3477 					tso_frag_len,
3478 					curr_seg->seg.total_len,
3479 					curr_seg->seg.tso_frags[i].vaddr);
3480 
3481 			/* if there is no more data left in the skb */
3482 			if (!skb_proc)
3483 				return tso_info->num_segs;
3484 
3485 			/* get the next payload fragment information */
3486 			/* check if there are more fragments in this segment */
3487 			if (tso_frag_len < tso_seg_size) {
3488 				more_tso_frags = 1;
3489 				if (tso_frag_len != 0) {
3490 					tso_seg_size = tso_seg_size -
3491 						tso_frag_len;
3492 					i++;
3493 					if (curr_seg->seg.num_frags ==
3494 								FRAG_NUM_MAX) {
3495 						more_tso_frags = 0;
3496 						/*
3497 						 * reset i and the tso
3498 						 * payload size
3499 						 */
3500 						i = 1;
3501 						tso_seg_size =
3502 							skb_shinfo(skb)->
3503 								gso_size;
3504 					}
3505 				}
3506 			} else {
3507 				more_tso_frags = 0;
3508 				/* reset i and the tso payload size */
3509 				i = 1;
3510 				tso_seg_size = skb_shinfo(skb)->gso_size;
3511 			}
3512 
3513 			/* if the next fragment is contiguous */
3514 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3515 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3516 				skb_frag_len = skb_frag_len - tso_frag_len;
3517 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3518 
3519 			} else { /* the next fragment is not contiguous */
3520 				if (skb_shinfo(skb)->nr_frags == 0) {
3521 					qdf_info("TSO: nr_frags == 0!");
3522 					qdf_assert(0);
3523 					return 0;
3524 				}
3525 				if (j >= skb_shinfo(skb)->nr_frags) {
3526 					qdf_info("TSO: nr_frags %d j %d",
3527 						 skb_shinfo(skb)->nr_frags, j);
3528 					qdf_assert(0);
3529 					return 0;
3530 				}
3531 				frag = &skb_shinfo(skb)->frags[j];
3532 				skb_frag_len = skb_frag_size(frag);
3533 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3534 				tso_frag_vaddr = skb_frag_address_safe(frag);
3535 				j++;
3536 			}
3537 
3538 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3539 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3540 				tso_seg_size);
3541 
3542 			if (!(tso_frag_vaddr)) {
3543 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3544 						__func__);
3545 				return 0;
3546 			}
3547 
3548 			tso_frag_paddr =
3549 					 dma_map_single(osdev->dev,
3550 						 tso_frag_vaddr,
3551 						 tso_frag_len,
3552 						 DMA_TO_DEVICE);
3553 			if (unlikely(dma_mapping_error(osdev->dev,
3554 							tso_frag_paddr))) {
3555 				qdf_err("DMA mapping error!");
3556 				qdf_assert_always(0);
3557 				return 0;
3558 			}
3559 		}
3560 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3561 				curr_seg->seg.tso_flags.tcp_seq_num);
3562 		num_seg--;
3563 		/* if TCP FIN flag was set, set it in the last segment */
3564 		if (!num_seg)
3565 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3566 
3567 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3568 		curr_seg = curr_seg->next;
3569 	}
3570 	return tso_info->num_segs;
3571 }
3572 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3573 
3574 /**
3575  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3576  *
3577  * @osdev: qdf device handle
3578  * @tso_seg: TSO segment element to be unmapped
3579  * @is_last_seg: whether this is last tso seg or not
3580  *
3581  * Return: none
3582  */
3583 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3584 			  struct qdf_tso_seg_elem_t *tso_seg,
3585 			  bool is_last_seg)
3586 {
3587 	uint32_t num_frags = 0;
3588 
3589 	if (tso_seg->seg.num_frags > 0)
3590 		num_frags = tso_seg->seg.num_frags - 1;
3591 
3592 	/*Num of frags in a tso seg cannot be less than 2 */
3593 	if (num_frags < 1) {
3594 		/*
3595 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3596 		 * this may happen when qdf_nbuf_get_tso_info failed,
3597 		 * do dma unmap for the 0th frag in this seg.
3598 		 */
3599 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3600 			goto last_seg_free_first_frag;
3601 
3602 		qdf_assert(0);
3603 		qdf_err("ERROR: num of frags in a tso segment is %d",
3604 			(num_frags + 1));
3605 		return;
3606 	}
3607 
3608 	while (num_frags) {
3609 		/*Do dma unmap the tso seg except the 0th frag */
3610 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3611 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3612 				num_frags);
3613 			qdf_assert(0);
3614 			return;
3615 		}
3616 		dma_unmap_single(osdev->dev,
3617 				 tso_seg->seg.tso_frags[num_frags].paddr,
3618 				 tso_seg->seg.tso_frags[num_frags].length,
3619 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3620 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3621 		num_frags--;
3622 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3623 	}
3624 
3625 last_seg_free_first_frag:
3626 	if (is_last_seg) {
3627 		/*Do dma unmap for the tso seg 0th frag */
3628 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3629 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3630 			qdf_assert(0);
3631 			return;
3632 		}
3633 		dma_unmap_single(osdev->dev,
3634 				 tso_seg->seg.tso_frags[0].paddr,
3635 				 tso_seg->seg.tso_frags[0].length,
3636 				 __qdf_dma_dir_to_os(QDF_DMA_TO_DEVICE));
3637 		tso_seg->seg.tso_frags[0].paddr = 0;
3638 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3639 	}
3640 }
3641 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3642 
3643 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3644 {
3645 	size_t packet_len;
3646 
3647 	packet_len = skb->len -
3648 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3649 		 tcp_hdrlen(skb));
3650 
3651 	return packet_len;
3652 }
3653 
3654 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3655 
3656 /**
3657  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3658  * into segments
3659  * @nbuf:   network buffer to be segmented
3660  * @tso_info:  This is the output. The information about the
3661  *      TSO segments will be populated within this.
3662  *
3663  * This function fragments a TCP jumbo packet into smaller
3664  * segments to be transmitted by the driver. It chains the TSO
3665  * segments created into a list.
3666  *
3667  * Return: 0 - success, 1 - failure
3668  */
3669 #ifndef BUILD_X86
3670 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3671 {
3672 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3673 	uint32_t remainder, num_segs = 0;
3674 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3675 	uint8_t frags_per_tso = 0;
3676 	uint32_t skb_frag_len = 0;
3677 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3678 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3679 	skb_frag_t *frag = NULL;
3680 	int j = 0;
3681 	uint32_t temp_num_seg = 0;
3682 
3683 	/* length of the first chunk of data in the skb minus eit header*/
3684 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3685 
3686 	/* Calculate num of segs for skb's first chunk of data*/
3687 	remainder = skb_frag_len % tso_seg_size;
3688 	num_segs = skb_frag_len / tso_seg_size;
3689 	/**
3690 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3691 	 * In that case, one more tso seg is required to accommodate
3692 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3693 	 * then remaining data will be accomodated while doing the calculation
3694 	 * for nr_frags data. Hence, frags_per_tso++.
3695 	 */
3696 	if (remainder) {
3697 		if (!skb_nr_frags)
3698 			num_segs++;
3699 		else
3700 			frags_per_tso++;
3701 	}
3702 
3703 	while (skb_nr_frags) {
3704 		if (j >= skb_shinfo(skb)->nr_frags) {
3705 			qdf_info("TSO: nr_frags %d j %d",
3706 				 skb_shinfo(skb)->nr_frags, j);
3707 			qdf_assert(0);
3708 			return 0;
3709 		}
3710 		/**
3711 		 * Calculate the number of tso seg for nr_frags data:
3712 		 * Get the length of each frag in skb_frag_len, add to
3713 		 * remainder.Get the number of segments by dividing it to
3714 		 * tso_seg_size and calculate the new remainder.
3715 		 * Decrement the nr_frags value and keep
3716 		 * looping all the skb_fragments.
3717 		 */
3718 		frag = &skb_shinfo(skb)->frags[j];
3719 		skb_frag_len = skb_frag_size(frag);
3720 		temp_num_seg = num_segs;
3721 		remainder += skb_frag_len;
3722 		num_segs += remainder / tso_seg_size;
3723 		remainder = remainder % tso_seg_size;
3724 		skb_nr_frags--;
3725 		if (remainder) {
3726 			if (num_segs > temp_num_seg)
3727 				frags_per_tso = 0;
3728 			/**
3729 			 * increment the tso per frags whenever remainder is
3730 			 * positive. If frags_per_tso reaches the (max-1),
3731 			 * [First frags always have EIT header, therefore max-1]
3732 			 * increment the num_segs as no more data can be
3733 			 * accomodated in the curr tso seg. Reset the remainder
3734 			 * and frags per tso and keep looping.
3735 			 */
3736 			frags_per_tso++;
3737 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3738 				num_segs++;
3739 				frags_per_tso = 0;
3740 				remainder = 0;
3741 			}
3742 			/**
3743 			 * If this is the last skb frag and still remainder is
3744 			 * non-zero(frags_per_tso is not reached to the max-1)
3745 			 * then increment the num_segs to take care of the
3746 			 * remaining length.
3747 			 */
3748 			if (!skb_nr_frags && remainder) {
3749 				num_segs++;
3750 				frags_per_tso = 0;
3751 			}
3752 		} else {
3753 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3754 			frags_per_tso = 0;
3755 		}
3756 		j++;
3757 	}
3758 
3759 	return num_segs;
3760 }
3761 #elif !defined(QCA_WIFI_QCN9000)
3762 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3763 {
3764 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3765 	skb_frag_t *frag = NULL;
3766 
3767 	/*
3768 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3769 	 * region which cannot be accessed by Target
3770 	 */
3771 	if (virt_to_phys(skb->data) < 0x50000040) {
3772 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3773 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3774 				virt_to_phys(skb->data));
3775 		goto fail;
3776 
3777 	}
3778 
3779 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3780 		frag = &skb_shinfo(skb)->frags[i];
3781 
3782 		if (!frag)
3783 			goto fail;
3784 
3785 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3786 			goto fail;
3787 	}
3788 
3789 
3790 	gso_size = skb_shinfo(skb)->gso_size;
3791 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3792 			+ tcp_hdrlen(skb));
3793 	while (tmp_len) {
3794 		num_segs++;
3795 		if (tmp_len > gso_size)
3796 			tmp_len -= gso_size;
3797 		else
3798 			break;
3799 	}
3800 
3801 	return num_segs;
3802 
3803 	/*
3804 	 * Do not free this frame, just do socket level accounting
3805 	 * so that this is not reused.
3806 	 */
3807 fail:
3808 	if (skb->sk)
3809 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3810 
3811 	return 0;
3812 }
3813 #else
3814 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3815 {
3816 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3817 	skb_frag_t *frag = NULL;
3818 
3819 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3820 		frag = &skb_shinfo(skb)->frags[i];
3821 
3822 		if (!frag)
3823 			goto fail;
3824 	}
3825 
3826 	gso_size = skb_shinfo(skb)->gso_size;
3827 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3828 			+ tcp_hdrlen(skb));
3829 	while (tmp_len) {
3830 		num_segs++;
3831 		if (tmp_len > gso_size)
3832 			tmp_len -= gso_size;
3833 		else
3834 			break;
3835 	}
3836 
3837 	return num_segs;
3838 
3839 	/*
3840 	 * Do not free this frame, just do socket level accounting
3841 	 * so that this is not reused.
3842 	 */
3843 fail:
3844 	if (skb->sk)
3845 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3846 
3847 	return 0;
3848 }
3849 #endif
3850 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3851 
3852 #endif /* FEATURE_TSO */
3853 
3854 /**
3855  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3856  *
3857  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3858  *
3859  * Return: N/A
3860  */
3861 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3862 			  uint32_t *lo, uint32_t *hi)
3863 {
3864 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3865 		*lo = lower_32_bits(dmaaddr);
3866 		*hi = upper_32_bits(dmaaddr);
3867 	} else {
3868 		*lo = dmaaddr;
3869 		*hi = 0;
3870 	}
3871 }
3872 
3873 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3874 
3875 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3876 {
3877 	qdf_nbuf_users_inc(&skb->users);
3878 	return skb;
3879 }
3880 qdf_export_symbol(__qdf_nbuf_inc_users);
3881 
3882 int __qdf_nbuf_get_users(struct sk_buff *skb)
3883 {
3884 	return qdf_nbuf_users_read(&skb->users);
3885 }
3886 qdf_export_symbol(__qdf_nbuf_get_users);
3887 
3888 /**
3889  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3890  * @skb: sk_buff handle
3891  *
3892  * Return: none
3893  */
3894 
3895 void __qdf_nbuf_ref(struct sk_buff *skb)
3896 {
3897 	skb_get(skb);
3898 }
3899 qdf_export_symbol(__qdf_nbuf_ref);
3900 
3901 /**
3902  * __qdf_nbuf_shared() - Check whether the buffer is shared
3903  *  @skb: sk_buff buffer
3904  *
3905  *  Return: true if more than one person has a reference to this buffer.
3906  */
3907 int __qdf_nbuf_shared(struct sk_buff *skb)
3908 {
3909 	return skb_shared(skb);
3910 }
3911 qdf_export_symbol(__qdf_nbuf_shared);
3912 
3913 /**
3914  * __qdf_nbuf_dmamap_create() - create a DMA map.
3915  * @osdev: qdf device handle
3916  * @dmap: dma map handle
3917  *
3918  * This can later be used to map networking buffers. They :
3919  * - need space in adf_drv's software descriptor
3920  * - are typically created during adf_drv_create
3921  * - need to be created before any API(qdf_nbuf_map) that uses them
3922  *
3923  * Return: QDF STATUS
3924  */
3925 QDF_STATUS
3926 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
3927 {
3928 	QDF_STATUS error = QDF_STATUS_SUCCESS;
3929 	/*
3930 	 * driver can tell its SG capablity, it must be handled.
3931 	 * Bounce buffers if they are there
3932 	 */
3933 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
3934 	if (!(*dmap))
3935 		error = QDF_STATUS_E_NOMEM;
3936 
3937 	return error;
3938 }
3939 qdf_export_symbol(__qdf_nbuf_dmamap_create);
3940 /**
3941  * __qdf_nbuf_dmamap_destroy() - delete a dma map
3942  * @osdev: qdf device handle
3943  * @dmap: dma map handle
3944  *
3945  * Return: none
3946  */
3947 void
3948 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
3949 {
3950 	kfree(dmap);
3951 }
3952 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
3953 
3954 /**
3955  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
3956  * @osdev: os device
3957  * @skb: skb handle
3958  * @dir: dma direction
3959  * @nbytes: number of bytes to be mapped
3960  *
3961  * Return: QDF_STATUS
3962  */
3963 #ifdef QDF_OS_DEBUG
3964 QDF_STATUS
3965 __qdf_nbuf_map_nbytes(
3966 	qdf_device_t osdev,
3967 	struct sk_buff *skb,
3968 	qdf_dma_dir_t dir,
3969 	int nbytes)
3970 {
3971 	struct skb_shared_info  *sh = skb_shinfo(skb);
3972 
3973 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
3974 
3975 	/*
3976 	 * Assume there's only a single fragment.
3977 	 * To support multiple fragments, it would be necessary to change
3978 	 * adf_nbuf_t to be a separate object that stores meta-info
3979 	 * (including the bus address for each fragment) and a pointer
3980 	 * to the underlying sk_buff.
3981 	 */
3982 	qdf_assert(sh->nr_frags == 0);
3983 
3984 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3985 }
3986 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3987 #else
3988 QDF_STATUS
3989 __qdf_nbuf_map_nbytes(
3990 	qdf_device_t osdev,
3991 	struct sk_buff *skb,
3992 	qdf_dma_dir_t dir,
3993 	int nbytes)
3994 {
3995 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
3996 }
3997 qdf_export_symbol(__qdf_nbuf_map_nbytes);
3998 #endif
3999 /**
4000  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
4001  * @osdev: OS device
4002  * @skb: skb handle
4003  * @dir: direction
4004  * @nbytes: number of bytes
4005  *
4006  * Return: none
4007  */
4008 void
4009 __qdf_nbuf_unmap_nbytes(
4010 	qdf_device_t osdev,
4011 	struct sk_buff *skb,
4012 	qdf_dma_dir_t dir,
4013 	int nbytes)
4014 {
4015 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4016 
4017 	/*
4018 	 * Assume there's a single fragment.
4019 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4020 	 */
4021 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4022 }
4023 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4024 
4025 /**
4026  * __qdf_nbuf_dma_map_info() - return the dma map info
4027  * @bmap: dma map
4028  * @sg: dma map info
4029  *
4030  * Return: none
4031  */
4032 void
4033 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4034 {
4035 	qdf_assert(bmap->mapped);
4036 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4037 
4038 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4039 			sizeof(struct __qdf_segment));
4040 	sg->nsegs = bmap->nsegs;
4041 }
4042 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4043 /**
4044  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
4045  *			specified by the index
4046  * @skb: sk buff
4047  * @sg: scatter/gather list of all the frags
4048  *
4049  * Return: none
4050  */
4051 #if defined(__QDF_SUPPORT_FRAG_MEM)
4052 void
4053 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4054 {
4055 	qdf_assert(skb);
4056 	sg->sg_segs[0].vaddr = skb->data;
4057 	sg->sg_segs[0].len   = skb->len;
4058 	sg->nsegs            = 1;
4059 
4060 	for (int i = 1; i <= sh->nr_frags; i++) {
4061 		skb_frag_t    *f        = &sh->frags[i - 1];
4062 
4063 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4064 			f->page_offset);
4065 		sg->sg_segs[i].len      = f->size;
4066 
4067 		qdf_assert(i < QDF_MAX_SGLIST);
4068 	}
4069 	sg->nsegs += i;
4070 
4071 }
4072 qdf_export_symbol(__qdf_nbuf_frag_info);
4073 #else
4074 #ifdef QDF_OS_DEBUG
4075 void
4076 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4077 {
4078 
4079 	struct skb_shared_info  *sh = skb_shinfo(skb);
4080 
4081 	qdf_assert(skb);
4082 	sg->sg_segs[0].vaddr = skb->data;
4083 	sg->sg_segs[0].len   = skb->len;
4084 	sg->nsegs            = 1;
4085 
4086 	qdf_assert(sh->nr_frags == 0);
4087 }
4088 qdf_export_symbol(__qdf_nbuf_frag_info);
4089 #else
4090 void
4091 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4092 {
4093 	sg->sg_segs[0].vaddr = skb->data;
4094 	sg->sg_segs[0].len   = skb->len;
4095 	sg->nsegs            = 1;
4096 }
4097 qdf_export_symbol(__qdf_nbuf_frag_info);
4098 #endif
4099 #endif
4100 /**
4101  * __qdf_nbuf_get_frag_size() - get frag size
4102  * @nbuf: sk buffer
4103  * @cur_frag: current frag
4104  *
4105  * Return: frag size
4106  */
4107 uint32_t
4108 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4109 {
4110 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4111 	const skb_frag_t *frag = sh->frags + cur_frag;
4112 
4113 	return skb_frag_size(frag);
4114 }
4115 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4116 
4117 /**
4118  * __qdf_nbuf_frag_map() - dma map frag
4119  * @osdev: os device
4120  * @nbuf: sk buff
4121  * @offset: offset
4122  * @dir: direction
4123  * @cur_frag: current fragment
4124  *
4125  * Return: QDF status
4126  */
4127 #ifdef A_SIMOS_DEVHOST
4128 QDF_STATUS __qdf_nbuf_frag_map(
4129 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4130 	int offset, qdf_dma_dir_t dir, int cur_frag)
4131 {
4132 	int32_t paddr, frag_len;
4133 
4134 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4135 	return QDF_STATUS_SUCCESS;
4136 }
4137 qdf_export_symbol(__qdf_nbuf_frag_map);
4138 #else
4139 QDF_STATUS __qdf_nbuf_frag_map(
4140 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4141 	int offset, qdf_dma_dir_t dir, int cur_frag)
4142 {
4143 	dma_addr_t paddr, frag_len;
4144 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4145 	const skb_frag_t *frag = sh->frags + cur_frag;
4146 
4147 	frag_len = skb_frag_size(frag);
4148 
4149 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4150 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4151 					__qdf_dma_dir_to_os(dir));
4152 	return dma_mapping_error(osdev->dev, paddr) ?
4153 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4154 }
4155 qdf_export_symbol(__qdf_nbuf_frag_map);
4156 #endif
4157 /**
4158  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
4159  * @dmap: dma map
4160  * @cb: callback
4161  * @arg: argument
4162  *
4163  * Return: none
4164  */
4165 void
4166 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4167 {
4168 	return;
4169 }
4170 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4171 
4172 
4173 /**
4174  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4175  * @osdev: os device
4176  * @buf: sk buff
4177  * @dir: direction
4178  *
4179  * Return: none
4180  */
4181 #if defined(A_SIMOS_DEVHOST)
4182 static void __qdf_nbuf_sync_single_for_cpu(
4183 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4184 {
4185 	return;
4186 }
4187 #else
4188 static void __qdf_nbuf_sync_single_for_cpu(
4189 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4190 {
4191 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4192 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4193 		return;
4194 	}
4195 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4196 		skb_end_offset(buf) - skb_headroom(buf),
4197 		__qdf_dma_dir_to_os(dir));
4198 }
4199 #endif
4200 /**
4201  * __qdf_nbuf_sync_for_cpu() - nbuf sync
4202  * @osdev: os device
4203  * @skb: sk buff
4204  * @dir: direction
4205  *
4206  * Return: none
4207  */
4208 void
4209 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4210 	struct sk_buff *skb, qdf_dma_dir_t dir)
4211 {
4212 	qdf_assert(
4213 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4214 
4215 	/*
4216 	 * Assume there's a single fragment.
4217 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4218 	 */
4219 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4220 }
4221 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4222 
4223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4224 /**
4225  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4226  * @rx_status: Pointer to rx_status.
4227  * @rtap_buf: Buf to which VHT info has to be updated.
4228  * @rtap_len: Current length of radiotap buffer
4229  *
4230  * Return: Length of radiotap after VHT flags updated.
4231  */
4232 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4233 					struct mon_rx_status *rx_status,
4234 					int8_t *rtap_buf,
4235 					uint32_t rtap_len)
4236 {
4237 	uint16_t vht_flags = 0;
4238 
4239 	rtap_len = qdf_align(rtap_len, 2);
4240 
4241 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4242 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4243 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4244 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4245 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4246 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4247 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4248 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4249 	rtap_len += 2;
4250 
4251 	rtap_buf[rtap_len] |=
4252 		(rx_status->is_stbc ?
4253 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4254 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4255 		(rx_status->ldpc ?
4256 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4257 		(rx_status->beamformed ?
4258 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4259 	rtap_len += 1;
4260 	switch (rx_status->vht_flag_values2) {
4261 	case IEEE80211_RADIOTAP_VHT_BW_20:
4262 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4263 		break;
4264 	case IEEE80211_RADIOTAP_VHT_BW_40:
4265 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4266 		break;
4267 	case IEEE80211_RADIOTAP_VHT_BW_80:
4268 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4269 		break;
4270 	case IEEE80211_RADIOTAP_VHT_BW_160:
4271 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4272 		break;
4273 	}
4274 	rtap_len += 1;
4275 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
4276 	rtap_len += 1;
4277 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
4278 	rtap_len += 1;
4279 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4280 	rtap_len += 1;
4281 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4282 	rtap_len += 1;
4283 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4284 	rtap_len += 1;
4285 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4286 	rtap_len += 1;
4287 	put_unaligned_le16(rx_status->vht_flag_values6,
4288 			   &rtap_buf[rtap_len]);
4289 	rtap_len += 2;
4290 
4291 	return rtap_len;
4292 }
4293 
4294 /**
4295  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4296  * @rx_status: Pointer to rx_status.
4297  * @rtap_buf: buffer to which radiotap has to be updated
4298  * @rtap_len: radiotap length
4299  *
4300  * API update high-efficiency (11ax) fields in the radiotap header
4301  *
4302  * Return: length of rtap_len updated.
4303  */
4304 static unsigned int
4305 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4306 				     int8_t *rtap_buf, uint32_t rtap_len)
4307 {
4308 	/*
4309 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4310 	 * Enable all "known" HE radiotap flags for now
4311 	 */
4312 	rtap_len = qdf_align(rtap_len, 2);
4313 
4314 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4315 	rtap_len += 2;
4316 
4317 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4318 	rtap_len += 2;
4319 
4320 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4321 	rtap_len += 2;
4322 
4323 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4324 	rtap_len += 2;
4325 
4326 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4327 	rtap_len += 2;
4328 
4329 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4330 	rtap_len += 2;
4331 	qdf_rl_debug("he data %x %x %x %x %x %x",
4332 		     rx_status->he_data1,
4333 		     rx_status->he_data2, rx_status->he_data3,
4334 		     rx_status->he_data4, rx_status->he_data5,
4335 		     rx_status->he_data6);
4336 	return rtap_len;
4337 }
4338 
4339 
4340 /**
4341  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4342  * @rx_status: Pointer to rx_status.
4343  * @rtap_buf: buffer to which radiotap has to be updated
4344  * @rtap_len: radiotap length
4345  *
4346  * API update HE-MU fields in the radiotap header
4347  *
4348  * Return: length of rtap_len updated.
4349  */
4350 static unsigned int
4351 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4352 				     int8_t *rtap_buf, uint32_t rtap_len)
4353 {
4354 	rtap_len = qdf_align(rtap_len, 2);
4355 
4356 	/*
4357 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4358 	 * Enable all "known" he-mu radiotap flags for now
4359 	 */
4360 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4361 	rtap_len += 2;
4362 
4363 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4364 	rtap_len += 2;
4365 
4366 	rtap_buf[rtap_len] = rx_status->he_RU[0];
4367 	rtap_len += 1;
4368 
4369 	rtap_buf[rtap_len] = rx_status->he_RU[1];
4370 	rtap_len += 1;
4371 
4372 	rtap_buf[rtap_len] = rx_status->he_RU[2];
4373 	rtap_len += 1;
4374 
4375 	rtap_buf[rtap_len] = rx_status->he_RU[3];
4376 	rtap_len += 1;
4377 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4378 		  rx_status->he_flags1,
4379 		  rx_status->he_flags2, rx_status->he_RU[0],
4380 		  rx_status->he_RU[1], rx_status->he_RU[2],
4381 		  rx_status->he_RU[3]);
4382 
4383 	return rtap_len;
4384 }
4385 
4386 /**
4387  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4388  * @rx_status: Pointer to rx_status.
4389  * @rtap_buf: buffer to which radiotap has to be updated
4390  * @rtap_len: radiotap length
4391  *
4392  * API update he-mu-other fields in the radiotap header
4393  *
4394  * Return: length of rtap_len updated.
4395  */
4396 static unsigned int
4397 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4398 				     int8_t *rtap_buf, uint32_t rtap_len)
4399 {
4400 	rtap_len = qdf_align(rtap_len, 2);
4401 
4402 	/*
4403 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4404 	 * Enable all "known" he-mu-other radiotap flags for now
4405 	 */
4406 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
4407 	rtap_len += 2;
4408 
4409 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
4410 	rtap_len += 2;
4411 
4412 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
4413 	rtap_len += 1;
4414 
4415 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4416 	rtap_len += 1;
4417 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4418 		  rx_status->he_per_user_1,
4419 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4420 		  rx_status->he_per_user_known);
4421 	return rtap_len;
4422 }
4423 
4424 #define IEEE80211_RADIOTAP_TX_STATUS 0
4425 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
4426 
4427 /**
4428  * This is the length for radiotap, combined length
4429  * (Mandatory part struct ieee80211_radiotap_header + RADIOTAP_HEADER_LEN)
4430  * cannot be more than available headroom_sz.
4431  * increase this when we add more radiotap elements.
4432  * Number after '+' indicates maximum possible increase due to alignment
4433  */
4434 
4435 #define RADIOTAP_VHT_FLAGS_LEN (12 + 1)
4436 #define RADIOTAP_HE_FLAGS_LEN (12 + 1)
4437 #define RADIOTAP_HE_MU_FLAGS_LEN (8 + 1)
4438 #define RADIOTAP_HE_MU_OTHER_FLAGS_LEN (18 + 1)
4439 #define RADIOTAP_FIXED_HEADER_LEN 17
4440 #define RADIOTAP_HT_FLAGS_LEN 3
4441 #define RADIOTAP_AMPDU_STATUS_LEN (8 + 3)
4442 #define RADIOTAP_VENDOR_NS_LEN \
4443 	(sizeof(struct qdf_radiotap_vendor_ns_ath) + 1)
4444 /* This is Radio Tap Header Extension Length.
4445  * 4 Bytes for Extended it_present bit map +
4446  * 4 bytes padding for alignment
4447  */
4448 #define RADIOTAP_HEADER_EXT_LEN (2 * sizeof(uint32_t))
4449 #define RADIOTAP_HEADER_LEN (sizeof(struct ieee80211_radiotap_header) + \
4450 				RADIOTAP_FIXED_HEADER_LEN + \
4451 				RADIOTAP_HT_FLAGS_LEN + \
4452 				RADIOTAP_VHT_FLAGS_LEN + \
4453 				RADIOTAP_AMPDU_STATUS_LEN + \
4454 				RADIOTAP_HE_FLAGS_LEN + \
4455 				RADIOTAP_HE_MU_FLAGS_LEN + \
4456 				RADIOTAP_HE_MU_OTHER_FLAGS_LEN + \
4457 				RADIOTAP_VENDOR_NS_LEN + \
4458 				RADIOTAP_HEADER_EXT_LEN)
4459 
4460 #define IEEE80211_RADIOTAP_HE 23
4461 #define IEEE80211_RADIOTAP_HE_MU	24
4462 #define IEEE80211_RADIOTAP_HE_MU_OTHER	25
4463 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4464 
4465 /**
4466  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4467  * @rx_status: Pointer to rx_status.
4468  * @rtap_buf: Buf to which AMPDU info has to be updated.
4469  * @rtap_len: Current length of radiotap buffer
4470  *
4471  * Return: Length of radiotap after AMPDU flags updated.
4472  */
4473 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4474 					struct mon_rx_status *rx_status,
4475 					uint8_t *rtap_buf,
4476 					uint32_t rtap_len)
4477 {
4478 	/*
4479 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4480 	 * First 32 bits of AMPDU represents the reference number
4481 	 */
4482 
4483 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4484 	uint16_t ampdu_flags = 0;
4485 	uint16_t ampdu_reserved_flags = 0;
4486 
4487 	rtap_len = qdf_align(rtap_len, 4);
4488 
4489 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4490 	rtap_len += 4;
4491 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4492 	rtap_len += 2;
4493 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4494 	rtap_len += 2;
4495 
4496 	return rtap_len;
4497 }
4498 
4499 /**
4500  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4501  * @rx_status: Pointer to rx_status.
4502  * @nbuf:      nbuf pointer to which radiotap has to be updated
4503  * @headroom_sz: Available headroom size.
4504  *
4505  * Return: length of rtap_len updated.
4506  */
4507 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4508 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4509 {
4510 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4511 	struct ieee80211_radiotap_header *rthdr =
4512 		(struct ieee80211_radiotap_header *)rtap_buf;
4513 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4514 	uint32_t rtap_len = rtap_hdr_len;
4515 	uint8_t length = rtap_len;
4516 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4517 	uint32_t *rtap_ext = NULL;
4518 
4519 	/* Adding Extended Header space */
4520 	if (rx_status->add_rtap_ext) {
4521 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
4522 		rtap_len = rtap_hdr_len;
4523 	}
4524 	length = rtap_len;
4525 
4526 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4527 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4528 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4529 	rtap_len += 8;
4530 
4531 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4532 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4533 
4534 	if (rx_status->rs_fcs_err)
4535 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4536 
4537 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4538 	rtap_len += 1;
4539 
4540 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4541 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4542 	    !rx_status->he_flags) {
4543 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4544 		rtap_buf[rtap_len] = rx_status->rate;
4545 	} else
4546 		rtap_buf[rtap_len] = 0;
4547 	rtap_len += 1;
4548 
4549 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4550 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4551 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4552 	rtap_len += 2;
4553 	/* Channel flags. */
4554 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
4555 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4556 	else
4557 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4558 	if (rx_status->cck_flag)
4559 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4560 	if (rx_status->ofdm_flag)
4561 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4562 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4563 	rtap_len += 2;
4564 
4565 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4566 	 *					(dBm)
4567 	 */
4568 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4569 	/*
4570 	 * rssi_comb is int dB, need to convert it to dBm.
4571 	 * normalize value to noise floor of -96 dBm
4572 	 */
4573 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4574 	rtap_len += 1;
4575 
4576 	/* RX signal noise floor */
4577 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4578 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4579 	rtap_len += 1;
4580 
4581 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4582 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4583 	rtap_buf[rtap_len] = rx_status->nr_ant;
4584 	rtap_len += 1;
4585 
4586 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4587 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4588 		return 0;
4589 	}
4590 
4591 	if (rx_status->ht_flags) {
4592 		length = rtap_len;
4593 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4594 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4595 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4596 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4597 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4598 		rtap_len += 1;
4599 
4600 		if (rx_status->sgi)
4601 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4602 		if (rx_status->bw)
4603 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4604 		else
4605 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4606 		rtap_len += 1;
4607 
4608 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4609 		rtap_len += 1;
4610 
4611 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4612 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4613 			return 0;
4614 		}
4615 	}
4616 
4617 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4618 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4619 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4620 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4621 								rtap_buf,
4622 								rtap_len);
4623 	}
4624 
4625 	if (rx_status->vht_flags) {
4626 		length = rtap_len;
4627 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4628 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4629 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4630 								rtap_buf,
4631 								rtap_len);
4632 
4633 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4634 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4635 			return 0;
4636 		}
4637 	}
4638 
4639 	if (rx_status->he_flags) {
4640 		length = rtap_len;
4641 		/* IEEE80211_RADIOTAP_HE */
4642 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4643 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4644 								rtap_buf,
4645 								rtap_len);
4646 
4647 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4648 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4649 			return 0;
4650 		}
4651 	}
4652 
4653 	if (rx_status->he_mu_flags) {
4654 		length = rtap_len;
4655 		/* IEEE80211_RADIOTAP_HE-MU */
4656 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4657 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4658 								rtap_buf,
4659 								rtap_len);
4660 
4661 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4662 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4663 			return 0;
4664 		}
4665 	}
4666 
4667 	if (rx_status->he_mu_other_flags) {
4668 		length = rtap_len;
4669 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4670 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4671 		rtap_len =
4672 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4673 								rtap_buf,
4674 								rtap_len);
4675 
4676 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4677 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4678 			return 0;
4679 		}
4680 	}
4681 
4682 	rtap_len = qdf_align(rtap_len, 2);
4683 	/*
4684 	 * Radiotap Vendor Namespace
4685 	 */
4686 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4687 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4688 					(rtap_buf + rtap_len);
4689 	/*
4690 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4691 	 */
4692 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4693 	/*
4694 	 * Name space selector = 0
4695 	 * We only will have one namespace for now
4696 	 */
4697 	radiotap_vendor_ns_ath->hdr.selector = 0;
4698 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4699 					sizeof(*radiotap_vendor_ns_ath) -
4700 					sizeof(radiotap_vendor_ns_ath->hdr));
4701 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4702 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4703 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4704 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4705 				cpu_to_le32(rx_status->ppdu_timestamp);
4706 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4707 
4708 	/* Add Extension to Radiotap Header & corresponding data */
4709 	if (rx_status->add_rtap_ext) {
4710 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_EXT);
4711 		rtap_ext = (uint32_t *)&rthdr->it_present;
4712 		rtap_ext++;
4713 		*rtap_ext = cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_STATUS);
4714 		*rtap_ext |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RETRY_COUNT);
4715 
4716 		rtap_buf[rtap_len] = rx_status->tx_status;
4717 		rtap_len += 1;
4718 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
4719 		rtap_len += 1;
4720 	}
4721 
4722 	rthdr->it_len = cpu_to_le16(rtap_len);
4723 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4724 
4725 	if (headroom_sz < rtap_len) {
4726 		qdf_debug("DEBUG: Not enough space to update radiotap");
4727 		return 0;
4728 	}
4729 	qdf_nbuf_push_head(nbuf, rtap_len);
4730 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4731 	return rtap_len;
4732 }
4733 #else
4734 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4735 					struct mon_rx_status *rx_status,
4736 					int8_t *rtap_buf,
4737 					uint32_t rtap_len)
4738 {
4739 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4740 	return 0;
4741 }
4742 
4743 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4744 				      int8_t *rtap_buf, uint32_t rtap_len)
4745 {
4746 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4747 	return 0;
4748 }
4749 
4750 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4751 					struct mon_rx_status *rx_status,
4752 					uint8_t *rtap_buf,
4753 					uint32_t rtap_len)
4754 {
4755 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4756 	return 0;
4757 }
4758 
4759 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4760 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4761 {
4762 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4763 	return 0;
4764 }
4765 #endif
4766 qdf_export_symbol(qdf_nbuf_update_radiotap);
4767 
4768 /**
4769  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4770  * @cb_func_ptr: function pointer to the nbuf free callback
4771  *
4772  * This function registers a callback function for nbuf free.
4773  *
4774  * Return: none
4775  */
4776 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4777 {
4778 	nbuf_free_cb = cb_func_ptr;
4779 }
4780 
4781 /**
4782  * qdf_nbuf_classify_pkt() - classify packet
4783  * @skb - sk buff
4784  *
4785  * Return: none
4786  */
4787 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4788 {
4789 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4790 
4791 	/* check destination mac address is broadcast/multicast */
4792 	if (is_broadcast_ether_addr((uint8_t *)eh))
4793 		QDF_NBUF_CB_SET_BCAST(skb);
4794 	else if (is_multicast_ether_addr((uint8_t *)eh))
4795 		QDF_NBUF_CB_SET_MCAST(skb);
4796 
4797 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4798 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4799 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4800 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4801 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4802 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4803 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4804 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4805 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4806 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4807 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4808 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4809 }
4810 qdf_export_symbol(qdf_nbuf_classify_pkt);
4811 
4812 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4813 {
4814 	qdf_nbuf_users_set(&nbuf->users, 1);
4815 	nbuf->data = nbuf->head + NET_SKB_PAD;
4816 	skb_reset_tail_pointer(nbuf);
4817 }
4818 qdf_export_symbol(__qdf_nbuf_init);
4819 
4820 #ifdef WLAN_FEATURE_FASTPATH
4821 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4822 {
4823 	qdf_nbuf_users_set(&nbuf->users, 1);
4824 	nbuf->data = nbuf->head + NET_SKB_PAD;
4825 	skb_reset_tail_pointer(nbuf);
4826 }
4827 qdf_export_symbol(qdf_nbuf_init_fast);
4828 #endif /* WLAN_FEATURE_FASTPATH */
4829 
4830 
4831 #ifdef QDF_NBUF_GLOBAL_COUNT
4832 /**
4833  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4834  *
4835  * Return void
4836  */
4837 void __qdf_nbuf_mod_init(void)
4838 {
4839 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
4840 	qdf_atomic_init(&nbuf_count);
4841 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4842 }
4843 
4844 /**
4845  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4846  *
4847  * Return void
4848  */
4849 void __qdf_nbuf_mod_exit(void)
4850 {
4851 }
4852 #endif
4853 
4854 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
4855 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
4856 					    int offset)
4857 {
4858 	unsigned int frag_offset;
4859 	skb_frag_t *frag;
4860 
4861 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
4862 		return QDF_STATUS_E_FAILURE;
4863 
4864 	frag = &skb_shinfo(nbuf)->frags[idx];
4865 	frag_offset = skb_frag_off(frag);
4866 
4867 	frag_offset += offset;
4868 	skb_frag_off_set(frag, frag_offset);
4869 
4870 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
4871 
4872 	return QDF_STATUS_SUCCESS;
4873 }
4874 
4875 #else
4876 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
4877 					    int offset)
4878 {
4879 	uint16_t frag_offset;
4880 	skb_frag_t *frag;
4881 
4882 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
4883 		return QDF_STATUS_E_FAILURE;
4884 
4885 	frag = &skb_shinfo(nbuf)->frags[idx];
4886 	frag_offset = frag->page_offset;
4887 
4888 	frag_offset += offset;
4889 	frag->page_offset = frag_offset;
4890 
4891 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
4892 
4893 	return QDF_STATUS_SUCCESS;
4894 }
4895 #endif
4896 
4897 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
4898 
4899 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
4900 			    int offset, int frag_len,
4901 			    unsigned int truesize, bool take_frag_ref)
4902 {
4903 	struct page *page;
4904 	int frag_offset;
4905 	uint8_t nr_frag;
4906 
4907 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
4908 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
4909 
4910 	page = virt_to_head_page(buf);
4911 	frag_offset = buf - page_address(page);
4912 
4913 	skb_add_rx_frag(nbuf, nr_frag, page,
4914 			(frag_offset + offset),
4915 			frag_len, truesize);
4916 
4917 	if (unlikely(take_frag_ref)) {
4918 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
4919 		skb_frag_ref(nbuf, nr_frag);
4920 	}
4921 }
4922 
4923 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
4924 
4925 #ifdef NBUF_FRAG_MEMORY_DEBUG
4926 
4927 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
4928 						int offset, const char *func,
4929 						uint32_t line)
4930 {
4931 	QDF_STATUS result;
4932 	qdf_frag_t p_fragp, n_fragp;
4933 
4934 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
4935 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
4936 
4937 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
4938 
4939 	/*
4940 	 * Update frag address in frag debug tracker
4941 	 * when frag offset is successfully changed in skb
4942 	 */
4943 	if (result == QDF_STATUS_SUCCESS)
4944 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
4945 
4946 	return result;
4947 }
4948 
4949 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
4950 
4951 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
4952 				int offset, int frag_len,
4953 				unsigned int truesize, bool take_frag_ref,
4954 				const char *func, uint32_t line)
4955 {
4956 	qdf_frag_t fragp;
4957 	uint32_t num_nr_frags;
4958 
4959 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
4960 			       frag_len, truesize, take_frag_ref);
4961 
4962 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
4963 
4964 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
4965 
4966 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
4967 
4968 	/* Update frag address in frag debug tracking table */
4969 	if (fragp != buf)
4970 		qdf_frag_debug_update_addr(buf, fragp, func, line);
4971 
4972 	/* Update frag refcount in frag debug tracking table */
4973 	qdf_frag_debug_refcount_inc(fragp, func, line);
4974 }
4975 
4976 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
4977 
4978 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
4979 				    uint32_t line)
4980 {
4981 	uint32_t num_nr_frags;
4982 	uint32_t idx = 0;
4983 	qdf_nbuf_t ext_list;
4984 	qdf_frag_t p_frag;
4985 
4986 	if (qdf_unlikely(!buf))
4987 		return;
4988 
4989 	/* Take care to update the refcount in the debug entries for frags */
4990 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
4991 
4992 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
4993 
4994 	while (idx < num_nr_frags) {
4995 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
4996 		if (qdf_likely(p_frag))
4997 			qdf_frag_debug_refcount_inc(p_frag, func, line);
4998 		idx++;
4999 	}
5000 
5001 	/**
5002 	 * Take care to update the refcount in the debug entries for the
5003 	 * frags attached to frag_list
5004 	 */
5005 	ext_list = qdf_nbuf_get_ext_list(buf);
5006 	while (ext_list) {
5007 		idx = 0;
5008 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5009 
5010 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5011 
5012 		while (idx < num_nr_frags) {
5013 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5014 			if (qdf_likely(p_frag))
5015 				qdf_frag_debug_refcount_inc(p_frag, func, line);
5016 			idx++;
5017 		}
5018 		ext_list = qdf_nbuf_queue_next(ext_list);
5019 	}
5020 }
5021 
5022 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
5023 
5024 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
5025 				    uint32_t line)
5026 {
5027 	uint32_t num_nr_frags;
5028 	qdf_nbuf_t ext_list;
5029 	uint32_t idx = 0;
5030 	qdf_frag_t p_frag;
5031 
5032 	if (qdf_unlikely(!buf))
5033 		return;
5034 
5035 	/**
5036 	 * Decrement refcount for frag debug nodes only when last user
5037 	 * of nbuf calls this API so as to avoid decrementing refcount
5038 	 * on every call expect the last one in case where nbuf has multiple
5039 	 * users
5040 	 */
5041 	if (qdf_nbuf_get_users(buf) > 1)
5042 		return;
5043 
5044 	/* Take care to update the refcount in the debug entries for frags */
5045 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5046 
5047 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5048 
5049 	while (idx < num_nr_frags) {
5050 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5051 		if (qdf_likely(p_frag))
5052 			qdf_frag_debug_refcount_dec(p_frag, func, line);
5053 		idx++;
5054 	}
5055 
5056 	/* Take care to update debug entries for frags attached to frag_list */
5057 	ext_list = qdf_nbuf_get_ext_list(buf);
5058 	while (ext_list) {
5059 		if (qdf_nbuf_get_users(ext_list) == 1) {
5060 			idx = 0;
5061 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5062 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5063 			while (idx < num_nr_frags) {
5064 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5065 				if (qdf_likely(p_frag))
5066 					qdf_frag_debug_refcount_dec(p_frag,
5067 								    func, line);
5068 				idx++;
5069 			}
5070 		}
5071 		ext_list = qdf_nbuf_queue_next(ext_list);
5072 	}
5073 }
5074 
5075 qdf_export_symbol(qdf_net_buf_debug_release_frag);
5076 #endif /* NBUF_FRAG_MEMORY_DEBUG */
5077 
5078 #ifdef MEMORY_DEBUG
5079 void qdf_nbuf_acquire_track_lock(uint32_t index,
5080 				 unsigned long irq_flag)
5081 {
5082 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
5083 			  irq_flag);
5084 }
5085 
5086 void qdf_nbuf_release_track_lock(uint32_t index,
5087 				 unsigned long irq_flag)
5088 {
5089 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
5090 			       irq_flag);
5091 }
5092 
5093 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
5094 {
5095 	return gp_qdf_net_buf_track_tbl[index];
5096 }
5097 #endif /* MEMORY_DEBUG */
5098