xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf.c
22  * QCA driver framework(QDF) network buffer management APIs
23  */
24 #ifdef IPA_OFFLOAD
25 #include <i_qdf_ipa_wdi3.h>
26 #endif
27 #include <linux/hashtable.h>
28 #include <linux/kernel.h>
29 #include <linux/version.h>
30 #include <linux/skbuff.h>
31 #include <linux/module.h>
32 #include <linux/proc_fs.h>
33 #include <linux/inetdevice.h>
34 #include <qdf_atomic.h>
35 #include <qdf_debugfs.h>
36 #include <qdf_lock.h>
37 #include <qdf_mem.h>
38 #include <qdf_module.h>
39 #include <qdf_nbuf.h>
40 #include <qdf_status.h>
41 #include "qdf_str.h"
42 #include <qdf_trace.h>
43 #include "qdf_tracker.h"
44 #include <qdf_types.h>
45 #include <net/ieee80211_radiotap.h>
46 #include <pld_common.h>
47 
48 #if defined(FEATURE_TSO)
49 #include <net/ipv6.h>
50 #include <linux/ipv6.h>
51 #include <linux/tcp.h>
52 #include <linux/if_vlan.h>
53 #include <linux/ip.h>
54 #endif /* FEATURE_TSO */
55 
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
57 
58 #define qdf_nbuf_users_inc atomic_inc
59 #define qdf_nbuf_users_dec atomic_dec
60 #define qdf_nbuf_users_set atomic_set
61 #define qdf_nbuf_users_read atomic_read
62 #else
63 #define qdf_nbuf_users_inc refcount_inc
64 #define qdf_nbuf_users_dec refcount_dec
65 #define qdf_nbuf_users_set refcount_set
66 #define qdf_nbuf_users_read refcount_read
67 #endif /* KERNEL_VERSION(4, 13, 0) */
68 
69 #define IEEE80211_RADIOTAP_VHT_BW_20	0
70 #define IEEE80211_RADIOTAP_VHT_BW_40	1
71 #define IEEE80211_RADIOTAP_VHT_BW_80	2
72 #define IEEE80211_RADIOTAP_VHT_BW_160	3
73 
74 #define RADIOTAP_VHT_BW_20	0
75 #define RADIOTAP_VHT_BW_40	1
76 #define RADIOTAP_VHT_BW_80	4
77 #define RADIOTAP_VHT_BW_160	11
78 
79 /* tx status */
80 #define RADIOTAP_TX_STATUS_FAIL		1
81 #define RADIOTAP_TX_STATUS_NOACK	2
82 
83 /* channel number to freq conversion */
84 #define CHANNEL_NUM_14 14
85 #define CHANNEL_NUM_15 15
86 #define CHANNEL_NUM_27 27
87 #define CHANNEL_NUM_35 35
88 #define CHANNEL_NUM_182 182
89 #define CHANNEL_NUM_197 197
90 #define CHANNEL_FREQ_2484 2484
91 #define CHANNEL_FREQ_2407 2407
92 #define CHANNEL_FREQ_2512 2512
93 #define CHANNEL_FREQ_5000 5000
94 #define CHANNEL_FREQ_4000 4000
95 #define CHANNEL_FREQ_5150 5150
96 #define FREQ_MULTIPLIER_CONST_5MHZ 5
97 #define FREQ_MULTIPLIER_CONST_20MHZ 20
98 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
99 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
100 #define RADIOTAP_CCK_CHANNEL 0x0020
101 #define RADIOTAP_OFDM_CHANNEL 0x0040
102 
103 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
104 #include <qdf_mc_timer.h>
105 
106 struct qdf_track_timer {
107 	qdf_mc_timer_t track_timer;
108 	qdf_atomic_t alloc_fail_cnt;
109 };
110 
111 static struct qdf_track_timer alloc_track_timer;
112 
113 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
114 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
115 #endif
116 
117 #ifdef NBUF_MEMORY_DEBUG
118 /* SMMU crash indication*/
119 static qdf_atomic_t smmu_crashed;
120 /* Number of nbuf not added to history*/
121 unsigned long g_histroy_add_drop;
122 #endif
123 
124 /* Packet Counter */
125 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
126 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
127 #ifdef QDF_NBUF_GLOBAL_COUNT
128 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
129 static qdf_atomic_t nbuf_count;
130 #endif
131 
132 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
133 static bool is_initial_mem_debug_disabled;
134 #endif
135 
136 /**
137  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
138  * @data: Pointer to network data buffer
139  *
140  * Get the IP header offset in case of 8021Q and 8021AD
141  * tag is present in L2 header.
142  *
143  * Return: IP header offset
144  */
145 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
146 {
147 	uint16_t ether_type;
148 
149 	ether_type = *(uint16_t *)(data +
150 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
151 
152 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
153 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
154 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
155 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
156 
157 	return QDF_NBUF_TRAC_IP_OFFSET;
158 }
159 
160 /**
161  *  __qdf_nbuf_get_ether_type - Get the ether type
162  * @data: Pointer to network data buffer
163  *
164  * Get the ether type in case of 8021Q and 8021AD tag
165  * is present in L2 header, e.g for the returned ether type
166  * value, if IPV4 data ether type 0x0800, return 0x0008.
167  *
168  * Return ether type.
169  */
170 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
171 {
172 	uint16_t ether_type;
173 
174 	ether_type = *(uint16_t *)(data +
175 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
176 
177 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
178 		ether_type = *(uint16_t *)(data +
179 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
180 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
181 		ether_type = *(uint16_t *)(data +
182 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
183 
184 	return ether_type;
185 }
186 
187 /**
188  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
189  *
190  * Return: none
191  */
192 void qdf_nbuf_tx_desc_count_display(void)
193 {
194 	qdf_debug("Current Snapshot of the Driver:");
195 	qdf_debug("Data Packets:");
196 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
198 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
199 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
204 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
205 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
207 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
208 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
209 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
210 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
211 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
212 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
213 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
214 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
215 	qdf_debug("Mgmt Packets:");
216 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
219 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
220 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
222 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
223 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
224 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
225 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
226 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
227 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
228 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
229 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
230 }
231 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
232 
233 /**
234  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
235  * @packet_type   : packet type either mgmt/data
236  * @current_state : layer at which the packet currently present
237  *
238  * Return: none
239  */
240 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
241 			uint8_t current_state)
242 {
243 	switch (packet_type) {
244 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
245 		nbuf_tx_mgmt[current_state]++;
246 		break;
247 	case QDF_NBUF_TX_PKT_DATA_TRACK:
248 		nbuf_tx_data[current_state]++;
249 		break;
250 	default:
251 		break;
252 	}
253 }
254 
255 /**
256  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
257  *
258  * Return: none
259  */
260 void qdf_nbuf_tx_desc_count_clear(void)
261 {
262 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
263 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
264 }
265 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
266 
267 /**
268  * qdf_nbuf_set_state() - Updates the packet state
269  * @nbuf:            network buffer
270  * @current_state :  layer at which the packet currently is
271  *
272  * This function updates the packet state to the layer at which the packet
273  * currently is
274  *
275  * Return: none
276  */
277 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
278 {
279 	/*
280 	 * Only Mgmt, Data Packets are tracked. WMI messages
281 	 * such as scan commands are not tracked
282 	 */
283 	uint8_t packet_type;
284 
285 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
286 
287 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
288 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
289 		return;
290 	}
291 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
292 	qdf_nbuf_tx_desc_count_update(packet_type,
293 					current_state);
294 }
295 qdf_export_symbol(qdf_nbuf_set_state);
296 
297 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
298 /**
299  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
300  *
301  * This function starts the alloc fail replenish timer.
302  *
303  * Return: void
304  */
305 static inline void __qdf_nbuf_start_replenish_timer(void)
306 {
307 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
308 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
309 	    QDF_TIMER_STATE_RUNNING)
310 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
311 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
312 }
313 
314 /**
315  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
316  *
317  * This function stops the alloc fail replenish timer.
318  *
319  * Return: void
320  */
321 static inline void __qdf_nbuf_stop_replenish_timer(void)
322 {
323 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
324 		return;
325 
326 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
327 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
328 	    QDF_TIMER_STATE_RUNNING)
329 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
330 }
331 
332 /**
333  * qdf_replenish_expire_handler - Replenish expire handler
334  *
335  * This function triggers when the alloc fail replenish timer expires.
336  *
337  * Return: void
338  */
339 static void qdf_replenish_expire_handler(void *arg)
340 {
341 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
342 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
343 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
344 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
345 
346 		/* Error handling here */
347 	}
348 }
349 
350 /**
351  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
352  *
353  * This function initializes the nbuf alloc fail replenish timer.
354  *
355  * Return: void
356  */
357 void __qdf_nbuf_init_replenish_timer(void)
358 {
359 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
360 			  qdf_replenish_expire_handler, NULL);
361 }
362 
363 /**
364  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
365  *
366  * This function deinitializes the nbuf alloc fail replenish timer.
367  *
368  * Return: void
369  */
370 void __qdf_nbuf_deinit_replenish_timer(void)
371 {
372 	__qdf_nbuf_stop_replenish_timer();
373 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
374 }
375 
376 void qdf_nbuf_stop_replenish_timer(void)
377 {
378 	__qdf_nbuf_stop_replenish_timer();
379 }
380 #else
381 
382 static inline void __qdf_nbuf_start_replenish_timer(void) {}
383 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
384 void qdf_nbuf_stop_replenish_timer(void)
385 {
386 }
387 #endif
388 
389 /* globals do not need to be initialized to NULL/0 */
390 qdf_nbuf_trace_update_t qdf_trace_update_cb;
391 qdf_nbuf_free_t nbuf_free_cb;
392 
393 #ifdef QDF_NBUF_GLOBAL_COUNT
394 
395 /**
396  * __qdf_nbuf_count_get() - get nbuf global count
397  *
398  * Return: nbuf global count
399  */
400 int __qdf_nbuf_count_get(void)
401 {
402 	return qdf_atomic_read(&nbuf_count);
403 }
404 qdf_export_symbol(__qdf_nbuf_count_get);
405 
406 /**
407  * __qdf_nbuf_count_inc() - increment nbuf global count
408  *
409  * @buf: sk buff
410  *
411  * Return: void
412  */
413 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
414 {
415 	int num_nbuf = 1;
416 	qdf_nbuf_t ext_list;
417 
418 	if (qdf_likely(is_initial_mem_debug_disabled))
419 		return;
420 
421 	ext_list = qdf_nbuf_get_ext_list(nbuf);
422 
423 	/* Take care to account for frag_list */
424 	while (ext_list) {
425 		++num_nbuf;
426 		ext_list = qdf_nbuf_queue_next(ext_list);
427 	}
428 
429 	qdf_atomic_add(num_nbuf, &nbuf_count);
430 }
431 qdf_export_symbol(__qdf_nbuf_count_inc);
432 
433 /**
434  * __qdf_nbuf_count_dec() - decrement nbuf global count
435  *
436  * @buf: sk buff
437  *
438  * Return: void
439  */
440 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
441 {
442 	qdf_nbuf_t ext_list;
443 	int num_nbuf;
444 
445 	if (qdf_likely(is_initial_mem_debug_disabled))
446 		return;
447 
448 	if (qdf_nbuf_get_users(nbuf) > 1)
449 		return;
450 
451 	num_nbuf = 1;
452 
453 	/* Take care to account for frag_list */
454 	ext_list = qdf_nbuf_get_ext_list(nbuf);
455 	while (ext_list) {
456 		if (qdf_nbuf_get_users(ext_list) == 1)
457 			++num_nbuf;
458 		ext_list = qdf_nbuf_queue_next(ext_list);
459 	}
460 
461 	qdf_atomic_sub(num_nbuf, &nbuf_count);
462 }
463 qdf_export_symbol(__qdf_nbuf_count_dec);
464 #endif
465 
466 #ifdef NBUF_FRAG_MEMORY_DEBUG
467 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
468 {
469 	qdf_nbuf_t ext_list;
470 	uint32_t num_nr_frags;
471 	uint32_t total_num_nr_frags;
472 
473 	if (qdf_likely(is_initial_mem_debug_disabled))
474 		return;
475 
476 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
477 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
478 
479 	total_num_nr_frags = num_nr_frags;
480 
481 	/* Take into account the frags attached to frag_list */
482 	ext_list = qdf_nbuf_get_ext_list(nbuf);
483 	while (ext_list) {
484 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
485 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
486 		total_num_nr_frags += num_nr_frags;
487 		ext_list = qdf_nbuf_queue_next(ext_list);
488 	}
489 
490 	qdf_frag_count_inc(total_num_nr_frags);
491 }
492 
493 qdf_export_symbol(qdf_nbuf_frag_count_inc);
494 
495 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
496 {
497 	qdf_nbuf_t ext_list;
498 	uint32_t num_nr_frags;
499 	uint32_t total_num_nr_frags;
500 
501 	if (qdf_likely(is_initial_mem_debug_disabled))
502 		return;
503 
504 	if (qdf_nbuf_get_users(nbuf) > 1)
505 		return;
506 
507 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
508 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
509 
510 	total_num_nr_frags = num_nr_frags;
511 
512 	/* Take into account the frags attached to frag_list */
513 	ext_list = qdf_nbuf_get_ext_list(nbuf);
514 	while (ext_list) {
515 		if (qdf_nbuf_get_users(ext_list) == 1) {
516 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
517 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
518 			total_num_nr_frags += num_nr_frags;
519 		}
520 		ext_list = qdf_nbuf_queue_next(ext_list);
521 	}
522 
523 	qdf_frag_count_dec(total_num_nr_frags);
524 }
525 
526 qdf_export_symbol(qdf_nbuf_frag_count_dec);
527 
528 #endif
529 
530 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
531 	!defined(QCA_WIFI_QCN9000)
532 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
533 				 int align, int prio, const char *func,
534 				 uint32_t line)
535 {
536 	struct sk_buff *skb;
537 	unsigned long offset;
538 	uint32_t lowmem_alloc_tries = 0;
539 
540 	if (align)
541 		size += (align - 1);
542 
543 realloc:
544 	skb = dev_alloc_skb(size);
545 
546 	if (skb)
547 		goto skb_alloc;
548 
549 	skb = pld_nbuf_pre_alloc(size);
550 
551 	if (!skb) {
552 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
553 				size, func, line);
554 		return NULL;
555 	}
556 
557 skb_alloc:
558 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
559 	 * Though we are trying to reserve low memory upfront to prevent this,
560 	 * we sometimes see SKBs allocated from low memory.
561 	 */
562 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
563 		lowmem_alloc_tries++;
564 		if (lowmem_alloc_tries > 100) {
565 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
566 				     size, func, line);
567 			return NULL;
568 		} else {
569 			/* Not freeing to make sure it
570 			 * will not get allocated again
571 			 */
572 			goto realloc;
573 		}
574 	}
575 	memset(skb->cb, 0x0, sizeof(skb->cb));
576 
577 	/*
578 	 * The default is for netbuf fragments to be interpreted
579 	 * as wordstreams rather than bytestreams.
580 	 */
581 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
582 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
583 
584 	/*
585 	 * XXX:how about we reserve first then align
586 	 * Align & make sure that the tail & data are adjusted properly
587 	 */
588 
589 	if (align) {
590 		offset = ((unsigned long)skb->data) % align;
591 		if (offset)
592 			skb_reserve(skb, align - offset);
593 	}
594 
595 	/*
596 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
597 	 * pointer
598 	 */
599 	skb_reserve(skb, reserve);
600 	qdf_nbuf_count_inc(skb);
601 
602 	return skb;
603 }
604 #else
605 
606 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
607 				 int align, int prio, const char *func,
608 				 uint32_t line)
609 {
610 	struct sk_buff *skb;
611 	unsigned long offset;
612 	int flags = GFP_KERNEL;
613 
614 	if (align)
615 		size += (align - 1);
616 
617 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
618 		flags = GFP_ATOMIC;
619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
620 		/*
621 		 * Observed that kcompactd burns out CPU to make order-3 page.
622 		 *__netdev_alloc_skb has 4k page fallback option just in case of
623 		 * failing high order page allocation so we don't need to be
624 		 * hard. Make kcompactd rest in piece.
625 		 */
626 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
627 #endif
628 	}
629 
630 	skb = __netdev_alloc_skb(NULL, size, flags);
631 
632 	if (skb)
633 		goto skb_alloc;
634 
635 	skb = pld_nbuf_pre_alloc(size);
636 
637 	if (!skb) {
638 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
639 				size, func, line);
640 		__qdf_nbuf_start_replenish_timer();
641 		return NULL;
642 	} else {
643 		__qdf_nbuf_stop_replenish_timer();
644 	}
645 
646 skb_alloc:
647 	memset(skb->cb, 0x0, sizeof(skb->cb));
648 
649 	/*
650 	 * The default is for netbuf fragments to be interpreted
651 	 * as wordstreams rather than bytestreams.
652 	 */
653 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
654 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
655 
656 	/*
657 	 * XXX:how about we reserve first then align
658 	 * Align & make sure that the tail & data are adjusted properly
659 	 */
660 
661 	if (align) {
662 		offset = ((unsigned long)skb->data) % align;
663 		if (offset)
664 			skb_reserve(skb, align - offset);
665 	}
666 
667 	/*
668 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
669 	 * pointer
670 	 */
671 	skb_reserve(skb, reserve);
672 	qdf_nbuf_count_inc(skb);
673 
674 	return skb;
675 }
676 #endif
677 qdf_export_symbol(__qdf_nbuf_alloc);
678 
679 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
680 					  const char *func, uint32_t line)
681 {
682 	qdf_nbuf_t nbuf;
683 	unsigned long offset;
684 
685 	if (align)
686 		size += (align - 1);
687 
688 	nbuf = alloc_skb(size, GFP_ATOMIC);
689 	if (!nbuf)
690 		goto ret_nbuf;
691 
692 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
693 
694 	skb_reserve(nbuf, reserve);
695 
696 	if (align) {
697 		offset = ((unsigned long)nbuf->data) % align;
698 		if (offset)
699 			skb_reserve(nbuf, align - offset);
700 	}
701 
702 	qdf_nbuf_count_inc(nbuf);
703 
704 ret_nbuf:
705 	return nbuf;
706 }
707 
708 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
709 
710 /**
711  * __qdf_nbuf_free() - free the nbuf its interrupt safe
712  * @skb: Pointer to network buffer
713  *
714  * Return: none
715  */
716 
717 void __qdf_nbuf_free(struct sk_buff *skb)
718 {
719 	if (pld_nbuf_pre_alloc_free(skb))
720 		return;
721 
722 	qdf_nbuf_frag_count_dec(skb);
723 
724 	qdf_nbuf_count_dec(skb);
725 	if (nbuf_free_cb)
726 		nbuf_free_cb(skb);
727 	else
728 		dev_kfree_skb_any(skb);
729 }
730 
731 qdf_export_symbol(__qdf_nbuf_free);
732 
733 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
734 {
735 	qdf_nbuf_t skb_new = NULL;
736 
737 	skb_new = skb_clone(skb, GFP_ATOMIC);
738 	if (skb_new) {
739 		qdf_nbuf_frag_count_inc(skb_new);
740 		qdf_nbuf_count_inc(skb_new);
741 	}
742 	return skb_new;
743 }
744 
745 qdf_export_symbol(__qdf_nbuf_clone);
746 
747 #ifdef NBUF_MEMORY_DEBUG
748 struct qdf_nbuf_event {
749 	qdf_nbuf_t nbuf;
750 	char func[QDF_MEM_FUNC_NAME_SIZE];
751 	uint32_t line;
752 	enum qdf_nbuf_event_type type;
753 	uint64_t timestamp;
754 	qdf_dma_addr_t iova;
755 };
756 
757 #ifndef QDF_NBUF_HISTORY_SIZE
758 #define QDF_NBUF_HISTORY_SIZE 4096
759 #endif
760 static qdf_atomic_t qdf_nbuf_history_index;
761 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
762 
763 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
764 {
765 	int32_t next = qdf_atomic_inc_return(index);
766 
767 	if (next == size)
768 		qdf_atomic_sub(size, index);
769 
770 	return next % size;
771 }
772 
773 void
774 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
775 		     enum qdf_nbuf_event_type type)
776 {
777 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
778 						   QDF_NBUF_HISTORY_SIZE);
779 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
780 
781 	if (qdf_atomic_read(&smmu_crashed)) {
782 		g_histroy_add_drop++;
783 		return;
784 	}
785 
786 	event->nbuf = nbuf;
787 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
788 	event->line = line;
789 	event->type = type;
790 	event->timestamp = qdf_get_log_timestamp();
791 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
792 	    type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
793 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
794 	else
795 		event->iova = 0;
796 }
797 
798 void qdf_set_smmu_fault_state(bool smmu_fault_state)
799 {
800 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
801 	if (!smmu_fault_state)
802 		g_histroy_add_drop = 0;
803 }
804 qdf_export_symbol(qdf_set_smmu_fault_state);
805 #endif /* NBUF_MEMORY_DEBUG */
806 
807 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
808 #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
809 qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
810 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
811 
812 static void qdf_nbuf_smmu_map_tracking_init(void)
813 {
814 	qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
815 }
816 
817 static void qdf_nbuf_smmu_map_tracking_deinit(void)
818 {
819 	qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
820 }
821 
822 static QDF_STATUS
823 qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
824 {
825 	if (is_initial_mem_debug_disabled)
826 		return QDF_STATUS_SUCCESS;
827 
828 	return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
829 }
830 
831 static void
832 qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
833 {
834 	if (is_initial_mem_debug_disabled)
835 		return;
836 
837 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
838 	qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
839 }
840 
841 void qdf_nbuf_map_check_for_smmu_leaks(void)
842 {
843 	qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
844 }
845 
846 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
847 				   uint8_t hdl,
848 				   uint8_t num_buffers,
849 				   qdf_mem_info_t *info,
850 				   const char *func,
851 				   uint32_t line)
852 {
853 	QDF_STATUS status;
854 
855 	status = qdf_nbuf_track_smmu_map(nbuf, func, line);
856 	if (QDF_IS_STATUS_ERROR(status))
857 		return status;
858 
859 	status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
860 
861 	if (QDF_IS_STATUS_ERROR(status)) {
862 		qdf_nbuf_untrack_smmu_map(nbuf, func, line);
863 	} else {
864 		if (!is_initial_mem_debug_disabled)
865 			qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
866 		qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
867 						       info->pa, func, line);
868 	}
869 
870 	return status;
871 }
872 
873 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
874 
875 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
876 				     uint8_t hdl,
877 				     uint8_t num_buffers,
878 				     qdf_mem_info_t *info,
879 				     const char *func,
880 				     uint32_t line)
881 {
882 	QDF_STATUS status;
883 
884 	qdf_nbuf_untrack_smmu_map(nbuf, func, line);
885 	status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
886 	qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
887 						 info->pa, func, line);
888 	return status;
889 }
890 
891 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
892 
893 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
894 						  const char *func,
895 						  uint32_t line)
896 {
897 	char map_func[QDF_TRACKER_FUNC_SIZE];
898 	uint32_t map_line;
899 
900 	if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
901 				&map_func, &map_line))
902 		return;
903 
904 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
905 			   func, line, map_func, map_line);
906 }
907 
908 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
909 {
910 	p_node->smmu_unmap_line_num = 0;
911 	p_node->is_nbuf_smmu_mapped = false;
912 	p_node->smmu_map_line_num = 0;
913 	p_node->smmu_map_func_name[0] = '\0';
914 	p_node->smmu_unmap_func_name[0] = '\0';
915 	p_node->smmu_unmap_iova_addr = 0;
916 	p_node->smmu_unmap_pa_addr = 0;
917 	p_node->smmu_map_iova_addr = 0;
918 	p_node->smmu_map_pa_addr = 0;
919 }
920 #else
921 #ifdef NBUF_MEMORY_DEBUG
922 static void qdf_nbuf_smmu_map_tracking_init(void)
923 {
924 }
925 
926 static void qdf_nbuf_smmu_map_tracking_deinit(void)
927 {
928 }
929 
930 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
931 						  const char *func,
932 						  uint32_t line)
933 {
934 }
935 
936 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
937 {
938 }
939 #endif
940 
941 #ifdef IPA_OFFLOAD
942 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
943 				   uint8_t hdl,
944 				   uint8_t num_buffers,
945 				   qdf_mem_info_t *info,
946 				   const char *func,
947 				   uint32_t line)
948 {
949 	return  __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
950 }
951 
952 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
953 
954 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
955 				     uint8_t hdl,
956 				     uint8_t num_buffers,
957 				     qdf_mem_info_t *info,
958 				     const char *func,
959 				     uint32_t line)
960 {
961 	return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
962 }
963 
964 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
965 #endif
966 #endif
967 
968 #ifdef NBUF_MAP_UNMAP_DEBUG
969 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
970 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
971 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
972 
973 static void qdf_nbuf_map_tracking_init(void)
974 {
975 	qdf_tracker_init(&qdf_nbuf_map_tracker);
976 }
977 
978 static void qdf_nbuf_map_tracking_deinit(void)
979 {
980 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
981 }
982 
983 static QDF_STATUS
984 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
985 {
986 	if (is_initial_mem_debug_disabled)
987 		return QDF_STATUS_SUCCESS;
988 
989 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
990 }
991 
992 static void
993 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
994 {
995 	if (is_initial_mem_debug_disabled)
996 		return;
997 
998 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
999 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
1000 }
1001 
1002 void qdf_nbuf_map_check_for_leaks(void)
1003 {
1004 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
1005 }
1006 
1007 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
1008 			      qdf_nbuf_t buf,
1009 			      qdf_dma_dir_t dir,
1010 			      const char *func,
1011 			      uint32_t line)
1012 {
1013 	QDF_STATUS status;
1014 
1015 	status = qdf_nbuf_track_map(buf, func, line);
1016 	if (QDF_IS_STATUS_ERROR(status))
1017 		return status;
1018 
1019 	status = __qdf_nbuf_map(osdev, buf, dir);
1020 	if (QDF_IS_STATUS_ERROR(status)) {
1021 		qdf_nbuf_untrack_map(buf, func, line);
1022 	} else {
1023 		if (!is_initial_mem_debug_disabled)
1024 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1025 		qdf_net_buf_debug_update_map_node(buf, func, line);
1026 	}
1027 
1028 	return status;
1029 }
1030 
1031 qdf_export_symbol(qdf_nbuf_map_debug);
1032 
1033 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
1034 			  qdf_nbuf_t buf,
1035 			  qdf_dma_dir_t dir,
1036 			  const char *func,
1037 			  uint32_t line)
1038 {
1039 	qdf_nbuf_untrack_map(buf, func, line);
1040 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1041 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1042 }
1043 
1044 qdf_export_symbol(qdf_nbuf_unmap_debug);
1045 
1046 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
1047 				     qdf_nbuf_t buf,
1048 				     qdf_dma_dir_t dir,
1049 				     const char *func,
1050 				     uint32_t line)
1051 {
1052 	QDF_STATUS status;
1053 
1054 	status = qdf_nbuf_track_map(buf, func, line);
1055 	if (QDF_IS_STATUS_ERROR(status))
1056 		return status;
1057 
1058 	status = __qdf_nbuf_map_single(osdev, buf, dir);
1059 	if (QDF_IS_STATUS_ERROR(status)) {
1060 		qdf_nbuf_untrack_map(buf, func, line);
1061 	} else {
1062 		if (!is_initial_mem_debug_disabled)
1063 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1064 		qdf_net_buf_debug_update_map_node(buf, func, line);
1065 	}
1066 
1067 	return status;
1068 }
1069 
1070 qdf_export_symbol(qdf_nbuf_map_single_debug);
1071 
1072 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
1073 				 qdf_nbuf_t buf,
1074 				 qdf_dma_dir_t dir,
1075 				 const char *func,
1076 				 uint32_t line)
1077 {
1078 	qdf_nbuf_untrack_map(buf, func, line);
1079 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1080 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1081 }
1082 
1083 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
1084 
1085 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
1086 				     qdf_nbuf_t buf,
1087 				     qdf_dma_dir_t dir,
1088 				     int nbytes,
1089 				     const char *func,
1090 				     uint32_t line)
1091 {
1092 	QDF_STATUS status;
1093 
1094 	status = qdf_nbuf_track_map(buf, func, line);
1095 	if (QDF_IS_STATUS_ERROR(status))
1096 		return status;
1097 
1098 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
1099 	if (QDF_IS_STATUS_ERROR(status)) {
1100 		qdf_nbuf_untrack_map(buf, func, line);
1101 	} else {
1102 		if (!is_initial_mem_debug_disabled)
1103 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1104 		qdf_net_buf_debug_update_map_node(buf, func, line);
1105 	}
1106 
1107 	return status;
1108 }
1109 
1110 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
1111 
1112 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
1113 				 qdf_nbuf_t buf,
1114 				 qdf_dma_dir_t dir,
1115 				 int nbytes,
1116 				 const char *func,
1117 				 uint32_t line)
1118 {
1119 	qdf_nbuf_untrack_map(buf, func, line);
1120 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
1121 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1122 }
1123 
1124 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
1125 
1126 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
1127 					    qdf_nbuf_t buf,
1128 					    qdf_dma_dir_t dir,
1129 					    int nbytes,
1130 					    const char *func,
1131 					    uint32_t line)
1132 {
1133 	QDF_STATUS status;
1134 
1135 	status = qdf_nbuf_track_map(buf, func, line);
1136 	if (QDF_IS_STATUS_ERROR(status))
1137 		return status;
1138 
1139 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
1140 	if (QDF_IS_STATUS_ERROR(status)) {
1141 		qdf_nbuf_untrack_map(buf, func, line);
1142 	} else {
1143 		if (!is_initial_mem_debug_disabled)
1144 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1145 		qdf_net_buf_debug_update_map_node(buf, func, line);
1146 	}
1147 
1148 	return status;
1149 }
1150 
1151 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
1152 
1153 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
1154 					qdf_nbuf_t buf,
1155 					qdf_dma_dir_t dir,
1156 					int nbytes,
1157 					const char *func,
1158 					uint32_t line)
1159 {
1160 	qdf_nbuf_untrack_map(buf, func, line);
1161 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
1162 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1163 }
1164 
1165 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1166 
1167 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1168 					      qdf_nbuf_t buf,
1169 					      qdf_dma_addr_t phy_addr,
1170 					      qdf_dma_dir_t dir, int nbytes,
1171 					      const char *func, uint32_t line)
1172 {
1173 	qdf_nbuf_untrack_map(buf, func, line);
1174 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1175 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1176 }
1177 
1178 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1179 
1180 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1181 					     const char *func,
1182 					     uint32_t line)
1183 {
1184 	char map_func[QDF_TRACKER_FUNC_SIZE];
1185 	uint32_t map_line;
1186 
1187 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1188 				&map_func, &map_line))
1189 		return;
1190 
1191 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1192 			   func, line, map_func, map_line);
1193 }
1194 #else
1195 static inline void qdf_nbuf_map_tracking_init(void)
1196 {
1197 }
1198 
1199 static inline void qdf_nbuf_map_tracking_deinit(void)
1200 {
1201 }
1202 
1203 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1204 						    const char *func,
1205 						    uint32_t line)
1206 {
1207 }
1208 #endif /* NBUF_MAP_UNMAP_DEBUG */
1209 
1210 /**
1211  * __qdf_nbuf_map() - map a buffer to local bus address space
1212  * @osdev: OS device
1213  * @bmap: Bitmap
1214  * @skb: Pointer to network buffer
1215  * @dir: Direction
1216  *
1217  * Return: QDF_STATUS
1218  */
1219 #ifdef QDF_OS_DEBUG
1220 QDF_STATUS
1221 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1222 {
1223 	struct skb_shared_info *sh = skb_shinfo(skb);
1224 
1225 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1226 			|| (dir == QDF_DMA_FROM_DEVICE));
1227 
1228 	/*
1229 	 * Assume there's only a single fragment.
1230 	 * To support multiple fragments, it would be necessary to change
1231 	 * qdf_nbuf_t to be a separate object that stores meta-info
1232 	 * (including the bus address for each fragment) and a pointer
1233 	 * to the underlying sk_buff.
1234 	 */
1235 	qdf_assert(sh->nr_frags == 0);
1236 
1237 	return __qdf_nbuf_map_single(osdev, skb, dir);
1238 }
1239 qdf_export_symbol(__qdf_nbuf_map);
1240 
1241 #else
1242 QDF_STATUS
1243 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1244 {
1245 	return __qdf_nbuf_map_single(osdev, skb, dir);
1246 }
1247 qdf_export_symbol(__qdf_nbuf_map);
1248 #endif
1249 /**
1250  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1251  * @osdev: OS device
1252  * @skb: Pointer to network buffer
1253  * @dir: dma direction
1254  *
1255  * Return: none
1256  */
1257 void
1258 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1259 			qdf_dma_dir_t dir)
1260 {
1261 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1262 		   || (dir == QDF_DMA_FROM_DEVICE));
1263 
1264 	/*
1265 	 * Assume there's a single fragment.
1266 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1267 	 */
1268 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1269 }
1270 qdf_export_symbol(__qdf_nbuf_unmap);
1271 
1272 /**
1273  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1274  * @osdev: OS device
1275  * @skb: Pointer to network buffer
1276  * @dir: Direction
1277  *
1278  * Return: QDF_STATUS
1279  */
1280 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1281 QDF_STATUS
1282 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1283 {
1284 	qdf_dma_addr_t paddr;
1285 
1286 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1287 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1288 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1289 	return QDF_STATUS_SUCCESS;
1290 }
1291 qdf_export_symbol(__qdf_nbuf_map_single);
1292 #else
1293 QDF_STATUS
1294 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1295 {
1296 	qdf_dma_addr_t paddr;
1297 
1298 	/* assume that the OS only provides a single fragment */
1299 	QDF_NBUF_CB_PADDR(buf) = paddr =
1300 		dma_map_single(osdev->dev, buf->data,
1301 				skb_end_pointer(buf) - buf->data,
1302 				__qdf_dma_dir_to_os(dir));
1303 	__qdf_record_nbuf_nbytes(
1304 		__qdf_nbuf_get_end_offset(buf), dir, true);
1305 	return dma_mapping_error(osdev->dev, paddr)
1306 		? QDF_STATUS_E_FAILURE
1307 		: QDF_STATUS_SUCCESS;
1308 }
1309 qdf_export_symbol(__qdf_nbuf_map_single);
1310 #endif
1311 /**
1312  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1313  * @osdev: OS device
1314  * @skb: Pointer to network buffer
1315  * @dir: Direction
1316  *
1317  * Return: none
1318  */
1319 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1320 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1321 				qdf_dma_dir_t dir)
1322 {
1323 }
1324 #else
1325 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1326 					qdf_dma_dir_t dir)
1327 {
1328 	if (QDF_NBUF_CB_PADDR(buf)) {
1329 		__qdf_record_nbuf_nbytes(
1330 			__qdf_nbuf_get_end_offset(buf), dir, false);
1331 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1332 			skb_end_pointer(buf) - buf->data,
1333 			__qdf_dma_dir_to_os(dir));
1334 	}
1335 }
1336 #endif
1337 qdf_export_symbol(__qdf_nbuf_unmap_single);
1338 
1339 /**
1340  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1341  * @skb: Pointer to network buffer
1342  * @cksum: Pointer to checksum value
1343  *
1344  * Return: QDF_STATUS
1345  */
1346 QDF_STATUS
1347 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1348 {
1349 	switch (cksum->l4_result) {
1350 	case QDF_NBUF_RX_CKSUM_NONE:
1351 		skb->ip_summed = CHECKSUM_NONE;
1352 		break;
1353 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1354 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1355 		break;
1356 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1357 		skb->ip_summed = CHECKSUM_PARTIAL;
1358 		skb->csum = cksum->val;
1359 		break;
1360 	default:
1361 		pr_err("Unknown checksum type\n");
1362 		qdf_assert(0);
1363 		return QDF_STATUS_E_NOSUPPORT;
1364 	}
1365 	return QDF_STATUS_SUCCESS;
1366 }
1367 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1368 
1369 /**
1370  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1371  * @skb: Pointer to network buffer
1372  *
1373  * Return: TX checksum value
1374  */
1375 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1376 {
1377 	switch (skb->ip_summed) {
1378 	case CHECKSUM_NONE:
1379 		return QDF_NBUF_TX_CKSUM_NONE;
1380 	case CHECKSUM_PARTIAL:
1381 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1382 	case CHECKSUM_COMPLETE:
1383 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1384 	default:
1385 		return QDF_NBUF_TX_CKSUM_NONE;
1386 	}
1387 }
1388 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1389 
1390 /**
1391  * __qdf_nbuf_get_tid() - get tid
1392  * @skb: Pointer to network buffer
1393  *
1394  * Return: tid
1395  */
1396 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1397 {
1398 	return skb->priority;
1399 }
1400 qdf_export_symbol(__qdf_nbuf_get_tid);
1401 
1402 /**
1403  * __qdf_nbuf_set_tid() - set tid
1404  * @skb: Pointer to network buffer
1405  *
1406  * Return: none
1407  */
1408 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1409 {
1410 	skb->priority = tid;
1411 }
1412 qdf_export_symbol(__qdf_nbuf_set_tid);
1413 
1414 /**
1415  * __qdf_nbuf_set_tid() - set tid
1416  * @skb: Pointer to network buffer
1417  *
1418  * Return: none
1419  */
1420 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1421 {
1422 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1423 }
1424 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1425 
1426 /**
1427  * __qdf_nbuf_reg_trace_cb() - register trace callback
1428  * @cb_func_ptr: Pointer to trace callback function
1429  *
1430  * Return: none
1431  */
1432 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1433 {
1434 	qdf_trace_update_cb = cb_func_ptr;
1435 }
1436 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1437 
1438 /**
1439  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1440  *              of DHCP packet.
1441  * @data: Pointer to DHCP packet data buffer
1442  *
1443  * This func. returns the subtype of DHCP packet.
1444  *
1445  * Return: subtype of the DHCP packet.
1446  */
1447 enum qdf_proto_subtype
1448 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1449 {
1450 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1451 
1452 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1453 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1454 					QDF_DHCP_OPTION53_LENGTH)) {
1455 
1456 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1457 		case QDF_DHCP_DISCOVER:
1458 			subtype = QDF_PROTO_DHCP_DISCOVER;
1459 			break;
1460 		case QDF_DHCP_REQUEST:
1461 			subtype = QDF_PROTO_DHCP_REQUEST;
1462 			break;
1463 		case QDF_DHCP_OFFER:
1464 			subtype = QDF_PROTO_DHCP_OFFER;
1465 			break;
1466 		case QDF_DHCP_ACK:
1467 			subtype = QDF_PROTO_DHCP_ACK;
1468 			break;
1469 		case QDF_DHCP_NAK:
1470 			subtype = QDF_PROTO_DHCP_NACK;
1471 			break;
1472 		case QDF_DHCP_RELEASE:
1473 			subtype = QDF_PROTO_DHCP_RELEASE;
1474 			break;
1475 		case QDF_DHCP_INFORM:
1476 			subtype = QDF_PROTO_DHCP_INFORM;
1477 			break;
1478 		case QDF_DHCP_DECLINE:
1479 			subtype = QDF_PROTO_DHCP_DECLINE;
1480 			break;
1481 		default:
1482 			break;
1483 		}
1484 	}
1485 
1486 	return subtype;
1487 }
1488 
1489 #define EAPOL_MASK				0x8002
1490 #define EAPOL_M1_BIT_MASK			0x8000
1491 #define EAPOL_M2_BIT_MASK			0x0000
1492 #define EAPOL_M3_BIT_MASK			0x8002
1493 #define EAPOL_M4_BIT_MASK			0x0002
1494 /**
1495  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1496  *            of EAPOL packet.
1497  * @data: Pointer to EAPOL packet data buffer
1498  *
1499  * This func. returns the subtype of EAPOL packet.
1500  *
1501  * Return: subtype of the EAPOL packet.
1502  */
1503 enum qdf_proto_subtype
1504 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1505 {
1506 	uint16_t eapol_key_info;
1507 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1508 	uint16_t mask;
1509 
1510 	eapol_key_info = (uint16_t)(*(uint16_t *)
1511 			(data + EAPOL_KEY_INFO_OFFSET));
1512 
1513 	mask = eapol_key_info & EAPOL_MASK;
1514 
1515 	switch (mask) {
1516 	case EAPOL_M1_BIT_MASK:
1517 		subtype = QDF_PROTO_EAPOL_M1;
1518 		break;
1519 	case EAPOL_M2_BIT_MASK:
1520 		subtype = QDF_PROTO_EAPOL_M2;
1521 		break;
1522 	case EAPOL_M3_BIT_MASK:
1523 		subtype = QDF_PROTO_EAPOL_M3;
1524 		break;
1525 	case EAPOL_M4_BIT_MASK:
1526 		subtype = QDF_PROTO_EAPOL_M4;
1527 		break;
1528 	}
1529 
1530 	return subtype;
1531 }
1532 
1533 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1534 
1535 /**
1536  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1537  *            of ARP packet.
1538  * @data: Pointer to ARP packet data buffer
1539  *
1540  * This func. returns the subtype of ARP packet.
1541  *
1542  * Return: subtype of the ARP packet.
1543  */
1544 enum qdf_proto_subtype
1545 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1546 {
1547 	uint16_t subtype;
1548 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1549 
1550 	subtype = (uint16_t)(*(uint16_t *)
1551 			(data + ARP_SUB_TYPE_OFFSET));
1552 
1553 	switch (QDF_SWAP_U16(subtype)) {
1554 	case ARP_REQUEST:
1555 		proto_subtype = QDF_PROTO_ARP_REQ;
1556 		break;
1557 	case ARP_RESPONSE:
1558 		proto_subtype = QDF_PROTO_ARP_RES;
1559 		break;
1560 	default:
1561 		break;
1562 	}
1563 
1564 	return proto_subtype;
1565 }
1566 
1567 /**
1568  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1569  *            of IPV4 ICMP packet.
1570  * @data: Pointer to IPV4 ICMP packet data buffer
1571  *
1572  * This func. returns the subtype of ICMP packet.
1573  *
1574  * Return: subtype of the ICMP packet.
1575  */
1576 enum qdf_proto_subtype
1577 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1578 {
1579 	uint8_t subtype;
1580 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1581 
1582 	subtype = (uint8_t)(*(uint8_t *)
1583 			(data + ICMP_SUBTYPE_OFFSET));
1584 
1585 	switch (subtype) {
1586 	case ICMP_REQUEST:
1587 		proto_subtype = QDF_PROTO_ICMP_REQ;
1588 		break;
1589 	case ICMP_RESPONSE:
1590 		proto_subtype = QDF_PROTO_ICMP_RES;
1591 		break;
1592 	default:
1593 		break;
1594 	}
1595 
1596 	return proto_subtype;
1597 }
1598 
1599 /**
1600  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1601  *            of IPV6 ICMPV6 packet.
1602  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1603  *
1604  * This func. returns the subtype of ICMPV6 packet.
1605  *
1606  * Return: subtype of the ICMPV6 packet.
1607  */
1608 enum qdf_proto_subtype
1609 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1610 {
1611 	uint8_t subtype;
1612 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1613 
1614 	subtype = (uint8_t)(*(uint8_t *)
1615 			(data + ICMPV6_SUBTYPE_OFFSET));
1616 
1617 	switch (subtype) {
1618 	case ICMPV6_REQUEST:
1619 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1620 		break;
1621 	case ICMPV6_RESPONSE:
1622 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1623 		break;
1624 	case ICMPV6_RS:
1625 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1626 		break;
1627 	case ICMPV6_RA:
1628 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1629 		break;
1630 	case ICMPV6_NS:
1631 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1632 		break;
1633 	case ICMPV6_NA:
1634 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1635 		break;
1636 	default:
1637 		break;
1638 	}
1639 
1640 	return proto_subtype;
1641 }
1642 
1643 /**
1644  * __qdf_nbuf_is_ipv4_last_fragment() - Check if IPv4 packet is last fragment
1645  * @skb: Buffer
1646  *
1647  * This function checks IPv4 packet is last fragment or not.
1648  * Caller has to call this function for IPv4 packets only.
1649  *
1650  * Return: True if IPv4 packet is last fragment otherwise false
1651  */
1652 bool
1653 __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
1654 {
1655 	if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
1656 		return true;
1657 
1658 	return false;
1659 }
1660 
1661 /**
1662  * __qdf_nbuf_data_set_ipv4_tos() - set the TOS for IPv4 packet
1663  * @data: pointer to skb payload
1664  * @tos: value of TOS to be set
1665  *
1666  * This func. set the TOS field of IPv4 packet.
1667  *
1668  * Return: None
1669  */
1670 void
1671 __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
1672 {
1673 	*(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
1674 }
1675 
1676 /**
1677  * __qdf_nbuf_data_get_ipv4_tos() - get the TOS type of IPv4 packet
1678  * @data: Pointer to skb payload
1679  *
1680  * This func. returns the TOS type of IPv4 packet.
1681  *
1682  * Return: TOS type of IPv4 packet.
1683  */
1684 uint8_t
1685 __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
1686 {
1687 	uint8_t tos;
1688 
1689 	tos = (uint8_t)(*(uint8_t *)(data +
1690 			QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
1691 	return tos;
1692 }
1693 
1694 /**
1695  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1696  *            of IPV4 packet.
1697  * @data: Pointer to IPV4 packet data buffer
1698  *
1699  * This func. returns the proto type of IPV4 packet.
1700  *
1701  * Return: proto type of IPV4 packet.
1702  */
1703 uint8_t
1704 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1705 {
1706 	uint8_t proto_type;
1707 
1708 	proto_type = (uint8_t)(*(uint8_t *)(data +
1709 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1710 	return proto_type;
1711 }
1712 
1713 /**
1714  * __qdf_nbuf_data_get_ipv6_tc() - get the TC field
1715  *                                 of IPv6 packet.
1716  * @data: Pointer to IPv6 packet data buffer
1717  *
1718  * This func. returns the TC field of IPv6 packet.
1719  *
1720  * Return: traffic classification of IPv6 packet.
1721  */
1722 uint8_t
1723 __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
1724 {
1725 	struct ipv6hdr *hdr;
1726 
1727 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1728 	return ip6_tclass(ip6_flowinfo(hdr));
1729 }
1730 
1731 /**
1732  * __qdf_nbuf_data_set_ipv6_tc() - set the TC field
1733  *                                 of IPv6 packet.
1734  * @data: Pointer to skb payload
1735  * @tc: value to set to IPv6 header TC field
1736  *
1737  * This func. set the TC field of IPv6 header.
1738  *
1739  * Return: None
1740  */
1741 void
1742 __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
1743 {
1744 	struct ipv6hdr *hdr;
1745 
1746 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1747 	ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
1748 }
1749 
1750 /**
1751  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1752  *            of IPV6 packet.
1753  * @data: Pointer to IPV6 packet data buffer
1754  *
1755  * This func. returns the proto type of IPV6 packet.
1756  *
1757  * Return: proto type of IPV6 packet.
1758  */
1759 uint8_t
1760 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1761 {
1762 	uint8_t proto_type;
1763 
1764 	proto_type = (uint8_t)(*(uint8_t *)(data +
1765 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1766 	return proto_type;
1767 }
1768 
1769 /**
1770  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1771  * @data: Pointer to network data
1772  *
1773  * This api is for Tx packets.
1774  *
1775  * Return: true if packet is ipv4 packet
1776  *	   false otherwise
1777  */
1778 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1779 {
1780 	uint16_t ether_type;
1781 
1782 	ether_type = (uint16_t)(*(uint16_t *)(data +
1783 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1784 
1785 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1786 		return true;
1787 	else
1788 		return false;
1789 }
1790 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1791 
1792 /**
1793  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1794  * @data: Pointer to network data buffer
1795  *
1796  * This api is for ipv4 packet.
1797  *
1798  * Return: true if packet is DHCP packet
1799  *	   false otherwise
1800  */
1801 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1802 {
1803 	uint16_t sport;
1804 	uint16_t dport;
1805 	uint8_t ipv4_offset;
1806 	uint8_t ipv4_hdr_len;
1807 	struct iphdr *iphdr;
1808 
1809 	if (__qdf_nbuf_get_ether_type(data) !=
1810 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1811 		return false;
1812 
1813 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1814 	iphdr = (struct iphdr *)(data + ipv4_offset);
1815 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1816 
1817 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1818 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1819 			      sizeof(uint16_t));
1820 
1821 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1822 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1823 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1824 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1825 		return true;
1826 	else
1827 		return false;
1828 }
1829 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1830 
1831 /**
1832  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1833  * @data: Pointer to network data buffer
1834  *
1835  * This api is for ipv4 packet.
1836  *
1837  * Return: true if packet is EAPOL packet
1838  *	   false otherwise.
1839  */
1840 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1841 {
1842 	uint16_t ether_type;
1843 
1844 	ether_type = __qdf_nbuf_get_ether_type(data);
1845 
1846 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1847 		return true;
1848 	else
1849 		return false;
1850 }
1851 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1852 
1853 /**
1854  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1855  * @skb: Pointer to network buffer
1856  *
1857  * This api is for ipv4 packet.
1858  *
1859  * Return: true if packet is WAPI packet
1860  *	   false otherwise.
1861  */
1862 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1863 {
1864 	uint16_t ether_type;
1865 
1866 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1867 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1868 
1869 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1870 		return true;
1871 	else
1872 		return false;
1873 }
1874 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1875 
1876 /**
1877  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
1878  * @data: Pointer to network data buffer
1879  *
1880  * This api is for ipv4 packet.
1881  *
1882  * Return: true if packet is igmp packet
1883  *	   false otherwise.
1884  */
1885 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1886 {
1887 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1888 		uint8_t pkt_type;
1889 
1890 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1891 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1892 
1893 		if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
1894 			return true;
1895 	}
1896 	return false;
1897 }
1898 
1899 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
1900 
1901 /**
1902  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
1903  * @data: Pointer to network data buffer
1904  *
1905  * This api is for ipv6 packet.
1906  *
1907  * Return: true if packet is igmp packet
1908  *	   false otherwise.
1909  */
1910 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
1911 {
1912 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1913 		uint8_t pkt_type;
1914 		uint8_t next_hdr;
1915 
1916 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1917 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1918 		next_hdr = (uint8_t)(*(uint8_t *)(data +
1919 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
1920 
1921 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1922 			return true;
1923 		if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
1924 		    (next_hdr == QDF_NBUF_TRAC_HOPOPTS_TYPE))
1925 			return true;
1926 	}
1927 	return false;
1928 }
1929 
1930 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
1931 
1932 /**
1933  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1934  * @skb: Pointer to network buffer
1935  *
1936  * This api is for ipv4 packet.
1937  *
1938  * Return: true if packet is tdls packet
1939  *	   false otherwise.
1940  */
1941 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1942 {
1943 	uint16_t ether_type;
1944 
1945 	ether_type = *(uint16_t *)(skb->data +
1946 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1947 
1948 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1949 		return true;
1950 	else
1951 		return false;
1952 }
1953 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1954 
1955 /**
1956  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1957  * @data: Pointer to network data buffer
1958  *
1959  * This api is for ipv4 packet.
1960  *
1961  * Return: true if packet is ARP packet
1962  *	   false otherwise.
1963  */
1964 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1965 {
1966 	uint16_t ether_type;
1967 
1968 	ether_type = __qdf_nbuf_get_ether_type(data);
1969 
1970 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1971 		return true;
1972 	else
1973 		return false;
1974 }
1975 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1976 
1977 /**
1978  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1979  * @data: Pointer to network data buffer
1980  *
1981  * This api is for ipv4 packet.
1982  *
1983  * Return: true if packet is ARP request
1984  *	   false otherwise.
1985  */
1986 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1987 {
1988 	uint16_t op_code;
1989 
1990 	op_code = (uint16_t)(*(uint16_t *)(data +
1991 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1992 
1993 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1994 		return true;
1995 	return false;
1996 }
1997 
1998 /**
1999  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
2000  * @data: Pointer to network data buffer
2001  *
2002  * This api is for ipv4 packet.
2003  *
2004  * Return: true if packet is ARP response
2005  *	   false otherwise.
2006  */
2007 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
2008 {
2009 	uint16_t op_code;
2010 
2011 	op_code = (uint16_t)(*(uint16_t *)(data +
2012 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2013 
2014 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
2015 		return true;
2016 	return false;
2017 }
2018 
2019 /**
2020  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
2021  * @data: Pointer to network data buffer
2022  *
2023  * This api is for ipv4 packet.
2024  *
2025  * Return: ARP packet source IP value.
2026  */
2027 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
2028 {
2029 	uint32_t src_ip;
2030 
2031 	src_ip = (uint32_t)(*(uint32_t *)(data +
2032 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
2033 
2034 	return src_ip;
2035 }
2036 
2037 /**
2038  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
2039  * @data: Pointer to network data buffer
2040  *
2041  * This api is for ipv4 packet.
2042  *
2043  * Return: ARP packet target IP value.
2044  */
2045 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
2046 {
2047 	uint32_t tgt_ip;
2048 
2049 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2050 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
2051 
2052 	return tgt_ip;
2053 }
2054 
2055 /**
2056  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
2057  * @data: Pointer to network data buffer
2058  * @len: length to copy
2059  *
2060  * This api is for dns domain name
2061  *
2062  * Return: dns domain name.
2063  */
2064 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
2065 {
2066 	uint8_t *domain_name;
2067 
2068 	domain_name = (uint8_t *)
2069 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
2070 	return domain_name;
2071 }
2072 
2073 
2074 /**
2075  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
2076  * @data: Pointer to network data buffer
2077  *
2078  * This api is for dns query packet.
2079  *
2080  * Return: true if packet is dns query packet.
2081  *	   false otherwise.
2082  */
2083 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
2084 {
2085 	uint16_t op_code;
2086 	uint16_t tgt_port;
2087 
2088 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2089 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
2090 	/* Standard DNS query always happen on Dest Port 53. */
2091 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2092 		op_code = (uint16_t)(*(uint16_t *)(data +
2093 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2094 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2095 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
2096 			return true;
2097 	}
2098 	return false;
2099 }
2100 
2101 /**
2102  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
2103  * @data: Pointer to network data buffer
2104  *
2105  * This api is for dns query response.
2106  *
2107  * Return: true if packet is dns response packet.
2108  *	   false otherwise.
2109  */
2110 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
2111 {
2112 	uint16_t op_code;
2113 	uint16_t src_port;
2114 
2115 	src_port = (uint16_t)(*(uint16_t *)(data +
2116 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
2117 	/* Standard DNS response always comes on Src Port 53. */
2118 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2119 		op_code = (uint16_t)(*(uint16_t *)(data +
2120 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2121 
2122 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2123 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
2124 			return true;
2125 	}
2126 	return false;
2127 }
2128 
2129 /**
2130  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
2131  * @data: Pointer to network data buffer
2132  *
2133  * This api is to check if the packet is tcp fin.
2134  *
2135  * Return: true if packet is tcp fin packet.
2136  *         false otherwise.
2137  */
2138 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
2139 {
2140 	uint8_t op_code;
2141 
2142 	op_code = (uint8_t)(*(uint8_t *)(data +
2143 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2144 
2145 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
2146 		return true;
2147 
2148 	return false;
2149 }
2150 
2151 /**
2152  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
2153  * @data: Pointer to network data buffer
2154  *
2155  * This api is to check if the tcp packet is fin ack.
2156  *
2157  * Return: true if packet is tcp fin ack packet.
2158  *         false otherwise.
2159  */
2160 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
2161 {
2162 	uint8_t op_code;
2163 
2164 	op_code = (uint8_t)(*(uint8_t *)(data +
2165 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2166 
2167 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
2168 		return true;
2169 
2170 	return false;
2171 }
2172 
2173 /**
2174  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
2175  * @data: Pointer to network data buffer
2176  *
2177  * This api is for tcp syn packet.
2178  *
2179  * Return: true if packet is tcp syn packet.
2180  *	   false otherwise.
2181  */
2182 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
2183 {
2184 	uint8_t op_code;
2185 
2186 	op_code = (uint8_t)(*(uint8_t *)(data +
2187 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2188 
2189 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
2190 		return true;
2191 	return false;
2192 }
2193 
2194 /**
2195  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
2196  * @data: Pointer to network data buffer
2197  *
2198  * This api is for tcp syn ack packet.
2199  *
2200  * Return: true if packet is tcp syn ack packet.
2201  *	   false otherwise.
2202  */
2203 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
2204 {
2205 	uint8_t op_code;
2206 
2207 	op_code = (uint8_t)(*(uint8_t *)(data +
2208 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2209 
2210 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
2211 		return true;
2212 	return false;
2213 }
2214 
2215 /**
2216  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
2217  * @data: Pointer to network data buffer
2218  *
2219  * This api is to check if the tcp packet is rst.
2220  *
2221  * Return: true if packet is tcp rst packet.
2222  *         false otherwise.
2223  */
2224 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2225 {
2226 	uint8_t op_code;
2227 
2228 	op_code = (uint8_t)(*(uint8_t *)(data +
2229 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2230 
2231 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2232 		return true;
2233 
2234 	return false;
2235 }
2236 
2237 /**
2238  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
2239  * @data: Pointer to network data buffer
2240  *
2241  * This api is for tcp ack packet.
2242  *
2243  * Return: true if packet is tcp ack packet.
2244  *	   false otherwise.
2245  */
2246 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2247 {
2248 	uint8_t op_code;
2249 
2250 	op_code = (uint8_t)(*(uint8_t *)(data +
2251 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2252 
2253 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2254 		return true;
2255 	return false;
2256 }
2257 
2258 /**
2259  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
2260  * @data: Pointer to network data buffer
2261  *
2262  * This api is for tcp packet.
2263  *
2264  * Return: tcp source port value.
2265  */
2266 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2267 {
2268 	uint16_t src_port;
2269 
2270 	src_port = (uint16_t)(*(uint16_t *)(data +
2271 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2272 
2273 	return src_port;
2274 }
2275 
2276 /**
2277  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
2278  * @data: Pointer to network data buffer
2279  *
2280  * This api is for tcp packet.
2281  *
2282  * Return: tcp destination port value.
2283  */
2284 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2285 {
2286 	uint16_t tgt_port;
2287 
2288 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2289 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2290 
2291 	return tgt_port;
2292 }
2293 
2294 /**
2295  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
2296  * @data: Pointer to network data buffer
2297  *
2298  * This api is for ipv4 req packet.
2299  *
2300  * Return: true if packet is icmpv4 request
2301  *	   false otherwise.
2302  */
2303 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2304 {
2305 	uint8_t op_code;
2306 
2307 	op_code = (uint8_t)(*(uint8_t *)(data +
2308 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2309 
2310 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2311 		return true;
2312 	return false;
2313 }
2314 
2315 /**
2316  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
2317  * @data: Pointer to network data buffer
2318  *
2319  * This api is for ipv4 res packet.
2320  *
2321  * Return: true if packet is icmpv4 response
2322  *	   false otherwise.
2323  */
2324 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2325 {
2326 	uint8_t op_code;
2327 
2328 	op_code = (uint8_t)(*(uint8_t *)(data +
2329 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2330 
2331 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2332 		return true;
2333 	return false;
2334 }
2335 
2336 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2337 {
2338 	uint8_t op_code;
2339 
2340 	op_code = (uint8_t)(*(uint8_t *)(data +
2341 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2342 
2343 	if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2344 		return true;
2345 	return false;
2346 }
2347 
2348 qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2349 
2350 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2351 {
2352 	uint8_t subtype;
2353 
2354 	subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2355 
2356 	if (subtype == ICMPV6_REDIRECT)
2357 		return true;
2358 	return false;
2359 }
2360 
2361 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2362 
2363 /**
2364  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
2365  * @data: Pointer to network data buffer
2366  *
2367  * This api is for ipv4 packet.
2368  *
2369  * Return: icmpv4 packet source IP value.
2370  */
2371 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2372 {
2373 	uint32_t src_ip;
2374 
2375 	src_ip = (uint32_t)(*(uint32_t *)(data +
2376 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2377 
2378 	return src_ip;
2379 }
2380 
2381 /**
2382  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
2383  * @data: Pointer to network data buffer
2384  *
2385  * This api is for ipv4 packet.
2386  *
2387  * Return: icmpv4 packet target IP value.
2388  */
2389 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2390 {
2391 	uint32_t tgt_ip;
2392 
2393 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2394 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2395 
2396 	return tgt_ip;
2397 }
2398 
2399 
2400 /**
2401  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
2402  * @data: Pointer to IPV6 packet data buffer
2403  *
2404  * This func. checks whether it is a IPV6 packet or not.
2405  *
2406  * Return: TRUE if it is a IPV6 packet
2407  *         FALSE if not
2408  */
2409 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2410 {
2411 	uint16_t ether_type;
2412 
2413 	ether_type = (uint16_t)(*(uint16_t *)(data +
2414 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2415 
2416 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2417 		return true;
2418 	else
2419 		return false;
2420 }
2421 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2422 
2423 /**
2424  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
2425  * @data: Pointer to network data buffer
2426  *
2427  * This api is for ipv6 packet.
2428  *
2429  * Return: true if packet is DHCP packet
2430  *	   false otherwise
2431  */
2432 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2433 {
2434 	uint16_t sport;
2435 	uint16_t dport;
2436 	uint8_t ipv6_offset;
2437 
2438 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2439 		return false;
2440 
2441 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2442 	sport = *(uint16_t *)(data + ipv6_offset +
2443 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2444 	dport = *(uint16_t *)(data + ipv6_offset +
2445 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2446 			      sizeof(uint16_t));
2447 
2448 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2449 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2450 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2451 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2452 		return true;
2453 	else
2454 		return false;
2455 }
2456 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2457 
2458 /**
2459  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2460  * @data: Pointer to network data buffer
2461  *
2462  * This api is for ipv6 packet.
2463  *
2464  * Return: true if packet is MDNS packet
2465  *	   false otherwise
2466  */
2467 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2468 {
2469 	uint16_t sport;
2470 	uint16_t dport;
2471 
2472 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2473 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2474 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2475 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2476 					sizeof(uint16_t));
2477 
2478 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2479 	    dport == sport)
2480 		return true;
2481 	else
2482 		return false;
2483 }
2484 
2485 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2486 
2487 /**
2488  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2489  * @data: Pointer to IPV4 packet data buffer
2490  *
2491  * This func. checks whether it is a IPV4 multicast packet or not.
2492  *
2493  * Return: TRUE if it is a IPV4 multicast packet
2494  *         FALSE if not
2495  */
2496 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2497 {
2498 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2499 		uint32_t *dst_addr =
2500 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2501 
2502 		/*
2503 		 * Check first word of the IPV4 address and if it is
2504 		 * equal to 0xE then it represents multicast IP.
2505 		 */
2506 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2507 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2508 			return true;
2509 		else
2510 			return false;
2511 	} else
2512 		return false;
2513 }
2514 
2515 /**
2516  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2517  * @data: Pointer to IPV6 packet data buffer
2518  *
2519  * This func. checks whether it is a IPV6 multicast packet or not.
2520  *
2521  * Return: TRUE if it is a IPV6 multicast packet
2522  *         FALSE if not
2523  */
2524 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2525 {
2526 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2527 		uint16_t *dst_addr;
2528 
2529 		dst_addr = (uint16_t *)
2530 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2531 
2532 		/*
2533 		 * Check first byte of the IP address and if it
2534 		 * 0xFF00 then it is a IPV6 mcast packet.
2535 		 */
2536 		if (*dst_addr ==
2537 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2538 			return true;
2539 		else
2540 			return false;
2541 	} else
2542 		return false;
2543 }
2544 
2545 /**
2546  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2547  * @data: Pointer to IPV4 ICMP packet data buffer
2548  *
2549  * This func. checks whether it is a ICMP packet or not.
2550  *
2551  * Return: TRUE if it is a ICMP packet
2552  *         FALSE if not
2553  */
2554 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2555 {
2556 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2557 		uint8_t pkt_type;
2558 
2559 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2560 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2561 
2562 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2563 			return true;
2564 		else
2565 			return false;
2566 	} else
2567 		return false;
2568 }
2569 
2570 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2571 
2572 /**
2573  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2574  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2575  *
2576  * This func. checks whether it is a ICMPV6 packet or not.
2577  *
2578  * Return: TRUE if it is a ICMPV6 packet
2579  *         FALSE if not
2580  */
2581 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2582 {
2583 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2584 		uint8_t pkt_type;
2585 
2586 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2587 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2588 
2589 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2590 			return true;
2591 		else
2592 			return false;
2593 	} else
2594 		return false;
2595 }
2596 
2597 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2598 
2599 /**
2600  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2601  * @data: Pointer to IPV4 UDP packet data buffer
2602  *
2603  * This func. checks whether it is a IPV4 UDP packet or not.
2604  *
2605  * Return: TRUE if it is a IPV4 UDP packet
2606  *         FALSE if not
2607  */
2608 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2609 {
2610 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2611 		uint8_t pkt_type;
2612 
2613 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2614 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2615 
2616 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2617 			return true;
2618 		else
2619 			return false;
2620 	} else
2621 		return false;
2622 }
2623 
2624 /**
2625  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2626  * @data: Pointer to IPV4 TCP packet data buffer
2627  *
2628  * This func. checks whether it is a IPV4 TCP packet or not.
2629  *
2630  * Return: TRUE if it is a IPV4 TCP packet
2631  *         FALSE if not
2632  */
2633 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2634 {
2635 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2636 		uint8_t pkt_type;
2637 
2638 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2639 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2640 
2641 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2642 			return true;
2643 		else
2644 			return false;
2645 	} else
2646 		return false;
2647 }
2648 
2649 /**
2650  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2651  * @data: Pointer to IPV6 UDP packet data buffer
2652  *
2653  * This func. checks whether it is a IPV6 UDP packet or not.
2654  *
2655  * Return: TRUE if it is a IPV6 UDP packet
2656  *         FALSE if not
2657  */
2658 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2659 {
2660 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2661 		uint8_t pkt_type;
2662 
2663 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2664 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2665 
2666 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2667 			return true;
2668 		else
2669 			return false;
2670 	} else
2671 		return false;
2672 }
2673 
2674 /**
2675  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2676  * @data: Pointer to IPV6 TCP packet data buffer
2677  *
2678  * This func. checks whether it is a IPV6 TCP packet or not.
2679  *
2680  * Return: TRUE if it is a IPV6 TCP packet
2681  *         FALSE if not
2682  */
2683 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2684 {
2685 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2686 		uint8_t pkt_type;
2687 
2688 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2689 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2690 
2691 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2692 			return true;
2693 		else
2694 			return false;
2695 	} else
2696 		return false;
2697 }
2698 
2699 /**
2700  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2701  * @nbuf - sk buff
2702  *
2703  * Return: true if packet is broadcast
2704  *	   false otherwise
2705  */
2706 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2707 {
2708 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2709 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2710 }
2711 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2712 
2713 /**
2714  * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
2715  * @nbuf - sk buff
2716  *
2717  * Return: true if packet is multicast replay
2718  *	   false otherwise
2719  */
2720 bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
2721 {
2722 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2723 
2724 	if (unlikely(nbuf->pkt_type == PACKET_MULTICAST)) {
2725 		if (unlikely(ether_addr_equal(eh->h_source,
2726 					      nbuf->dev->dev_addr)))
2727 			return true;
2728 	}
2729 	return false;
2730 }
2731 
2732 /**
2733  * __qdf_nbuf_is_arp_local() - check if local or non local arp
2734  * @skb: pointer to sk_buff
2735  *
2736  * Return: true if local arp or false otherwise.
2737  */
2738 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
2739 {
2740 	struct arphdr *arp;
2741 	struct in_ifaddr **ifap = NULL;
2742 	struct in_ifaddr *ifa = NULL;
2743 	struct in_device *in_dev;
2744 	unsigned char *arp_ptr;
2745 	__be32 tip;
2746 
2747 	arp = (struct arphdr *)skb->data;
2748 	if (arp->ar_op == htons(ARPOP_REQUEST)) {
2749 		/* if fail to acquire rtnl lock, assume it's local arp */
2750 		if (!rtnl_trylock())
2751 			return true;
2752 
2753 		in_dev = __in_dev_get_rtnl(skb->dev);
2754 		if (in_dev) {
2755 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
2756 				ifap = &ifa->ifa_next) {
2757 				if (!strcmp(skb->dev->name, ifa->ifa_label))
2758 					break;
2759 			}
2760 		}
2761 
2762 		if (ifa && ifa->ifa_local) {
2763 			arp_ptr = (unsigned char *)(arp + 1);
2764 			arp_ptr += (skb->dev->addr_len + 4 +
2765 					skb->dev->addr_len);
2766 			memcpy(&tip, arp_ptr, 4);
2767 			qdf_debug("ARP packet: local IP: %x dest IP: %x",
2768 				  ifa->ifa_local, tip);
2769 			if (ifa->ifa_local == tip) {
2770 				rtnl_unlock();
2771 				return true;
2772 			}
2773 		}
2774 		rtnl_unlock();
2775 	}
2776 
2777 	return false;
2778 }
2779 
2780 #ifdef NBUF_MEMORY_DEBUG
2781 
2782 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2783 
2784 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2785 static struct kmem_cache *nbuf_tracking_cache;
2786 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2787 static spinlock_t qdf_net_buf_track_free_list_lock;
2788 static uint32_t qdf_net_buf_track_free_list_count;
2789 static uint32_t qdf_net_buf_track_used_list_count;
2790 static uint32_t qdf_net_buf_track_max_used;
2791 static uint32_t qdf_net_buf_track_max_free;
2792 static uint32_t qdf_net_buf_track_max_allocated;
2793 static uint32_t qdf_net_buf_track_fail_count;
2794 
2795 /**
2796  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2797  *
2798  * tracks the max number of network buffers that the wlan driver was tracking
2799  * at any one time.
2800  *
2801  * Return: none
2802  */
2803 static inline void update_max_used(void)
2804 {
2805 	int sum;
2806 
2807 	if (qdf_net_buf_track_max_used <
2808 	    qdf_net_buf_track_used_list_count)
2809 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2810 	sum = qdf_net_buf_track_free_list_count +
2811 		qdf_net_buf_track_used_list_count;
2812 	if (qdf_net_buf_track_max_allocated < sum)
2813 		qdf_net_buf_track_max_allocated = sum;
2814 }
2815 
2816 /**
2817  * update_max_free() - update qdf_net_buf_track_free_list_count
2818  *
2819  * tracks the max number tracking buffers kept in the freelist.
2820  *
2821  * Return: none
2822  */
2823 static inline void update_max_free(void)
2824 {
2825 	if (qdf_net_buf_track_max_free <
2826 	    qdf_net_buf_track_free_list_count)
2827 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2828 }
2829 
2830 /**
2831  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2832  *
2833  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2834  * This function also ads fexibility to adjust the allocation and freelist
2835  * scheems.
2836  *
2837  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2838  */
2839 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2840 {
2841 	int flags = GFP_KERNEL;
2842 	unsigned long irq_flag;
2843 	QDF_NBUF_TRACK *new_node = NULL;
2844 
2845 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2846 	qdf_net_buf_track_used_list_count++;
2847 	if (qdf_net_buf_track_free_list) {
2848 		new_node = qdf_net_buf_track_free_list;
2849 		qdf_net_buf_track_free_list =
2850 			qdf_net_buf_track_free_list->p_next;
2851 		qdf_net_buf_track_free_list_count--;
2852 	}
2853 	update_max_used();
2854 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2855 
2856 	if (new_node)
2857 		return new_node;
2858 
2859 	if (in_interrupt() || irqs_disabled() || in_atomic())
2860 		flags = GFP_ATOMIC;
2861 
2862 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2863 }
2864 
2865 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2866 #define FREEQ_POOLSIZE 2048
2867 
2868 /**
2869  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2870  *
2871  * Matches calls to qdf_nbuf_track_alloc.
2872  * Either frees the tracking cookie to kernel or an internal
2873  * freelist based on the size of the freelist.
2874  *
2875  * Return: none
2876  */
2877 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2878 {
2879 	unsigned long irq_flag;
2880 
2881 	if (!node)
2882 		return;
2883 
2884 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2885 	 * only shrink the freelist if it is bigger than twice the number of
2886 	 * nbufs in use. If the driver is stalling in a consistent bursty
2887 	 * fasion, this will keep 3/4 of thee allocations from the free list
2888 	 * while also allowing the system to recover memory as less frantic
2889 	 * traffic occurs.
2890 	 */
2891 
2892 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2893 
2894 	qdf_net_buf_track_used_list_count--;
2895 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2896 	   (qdf_net_buf_track_free_list_count >
2897 	    qdf_net_buf_track_used_list_count << 1)) {
2898 		kmem_cache_free(nbuf_tracking_cache, node);
2899 	} else {
2900 		node->p_next = qdf_net_buf_track_free_list;
2901 		qdf_net_buf_track_free_list = node;
2902 		qdf_net_buf_track_free_list_count++;
2903 	}
2904 	update_max_free();
2905 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2906 }
2907 
2908 /**
2909  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2910  *
2911  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2912  * the freelist first makes it performant for the first iperf udp burst
2913  * as well as steady state.
2914  *
2915  * Return: None
2916  */
2917 static void qdf_nbuf_track_prefill(void)
2918 {
2919 	int i;
2920 	QDF_NBUF_TRACK *node, *head;
2921 
2922 	/* prepopulate the freelist */
2923 	head = NULL;
2924 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2925 		node = qdf_nbuf_track_alloc();
2926 		if (!node)
2927 			continue;
2928 		node->p_next = head;
2929 		head = node;
2930 	}
2931 	while (head) {
2932 		node = head->p_next;
2933 		qdf_nbuf_track_free(head);
2934 		head = node;
2935 	}
2936 
2937 	/* prefilled buffers should not count as used */
2938 	qdf_net_buf_track_max_used = 0;
2939 }
2940 
2941 /**
2942  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2943  *
2944  * This initializes the memory manager for the nbuf tracking cookies.  Because
2945  * these cookies are all the same size and only used in this feature, we can
2946  * use a kmem_cache to provide tracking as well as to speed up allocations.
2947  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2948  * features) a freelist is prepopulated here.
2949  *
2950  * Return: None
2951  */
2952 static void qdf_nbuf_track_memory_manager_create(void)
2953 {
2954 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2955 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2956 						sizeof(QDF_NBUF_TRACK),
2957 						0, 0, NULL);
2958 
2959 	qdf_nbuf_track_prefill();
2960 }
2961 
2962 /**
2963  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2964  *
2965  * Empty the freelist and print out usage statistics when it is no longer
2966  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2967  * any nbuf tracking cookies were leaked.
2968  *
2969  * Return: None
2970  */
2971 static void qdf_nbuf_track_memory_manager_destroy(void)
2972 {
2973 	QDF_NBUF_TRACK *node, *tmp;
2974 	unsigned long irq_flag;
2975 
2976 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2977 	node = qdf_net_buf_track_free_list;
2978 
2979 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2980 		qdf_print("%s: unexpectedly large max_used count %d",
2981 			  __func__, qdf_net_buf_track_max_used);
2982 
2983 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2984 		qdf_print("%s: %d unused trackers were allocated",
2985 			  __func__,
2986 			  qdf_net_buf_track_max_allocated -
2987 			  qdf_net_buf_track_max_used);
2988 
2989 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2990 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2991 		qdf_print("%s: check freelist shrinking functionality",
2992 			  __func__);
2993 
2994 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2995 		  "%s: %d residual freelist size",
2996 		  __func__, qdf_net_buf_track_free_list_count);
2997 
2998 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2999 		  "%s: %d max freelist size observed",
3000 		  __func__, qdf_net_buf_track_max_free);
3001 
3002 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3003 		  "%s: %d max buffers used observed",
3004 		  __func__, qdf_net_buf_track_max_used);
3005 
3006 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3007 		  "%s: %d max buffers allocated observed",
3008 		  __func__, qdf_net_buf_track_max_allocated);
3009 
3010 	while (node) {
3011 		tmp = node;
3012 		node = node->p_next;
3013 		kmem_cache_free(nbuf_tracking_cache, tmp);
3014 		qdf_net_buf_track_free_list_count--;
3015 	}
3016 
3017 	if (qdf_net_buf_track_free_list_count != 0)
3018 		qdf_info("%d unfreed tracking memory lost in freelist",
3019 			 qdf_net_buf_track_free_list_count);
3020 
3021 	if (qdf_net_buf_track_used_list_count != 0)
3022 		qdf_info("%d unfreed tracking memory still in use",
3023 			 qdf_net_buf_track_used_list_count);
3024 
3025 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3026 	kmem_cache_destroy(nbuf_tracking_cache);
3027 	qdf_net_buf_track_free_list = NULL;
3028 }
3029 
3030 /**
3031  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
3032  *
3033  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
3034  * in a hash table and when driver is unloaded it reports about leaked SKBs.
3035  * WLAN driver module whose allocated SKB is freed by network stack are
3036  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
3037  * reported as memory leak.
3038  *
3039  * Return: none
3040  */
3041 void qdf_net_buf_debug_init(void)
3042 {
3043 	uint32_t i;
3044 
3045 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
3046 
3047 	if (is_initial_mem_debug_disabled)
3048 		return;
3049 
3050 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
3051 
3052 	qdf_nbuf_map_tracking_init();
3053 	qdf_nbuf_smmu_map_tracking_init();
3054 	qdf_nbuf_track_memory_manager_create();
3055 
3056 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3057 		gp_qdf_net_buf_track_tbl[i] = NULL;
3058 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
3059 	}
3060 }
3061 qdf_export_symbol(qdf_net_buf_debug_init);
3062 
3063 /**
3064  * qdf_net_buf_debug_init() - exit network buffer debug functionality
3065  *
3066  * Exit network buffer tracking debug functionality and log SKB memory leaks
3067  * As part of exiting the functionality, free the leaked memory and
3068  * cleanup the tracking buffers.
3069  *
3070  * Return: none
3071  */
3072 void qdf_net_buf_debug_exit(void)
3073 {
3074 	uint32_t i;
3075 	uint32_t count = 0;
3076 	unsigned long irq_flag;
3077 	QDF_NBUF_TRACK *p_node;
3078 	QDF_NBUF_TRACK *p_prev;
3079 
3080 	if (is_initial_mem_debug_disabled)
3081 		return;
3082 
3083 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3084 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3085 		p_node = gp_qdf_net_buf_track_tbl[i];
3086 		while (p_node) {
3087 			p_prev = p_node;
3088 			p_node = p_node->p_next;
3089 			count++;
3090 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
3091 				 p_prev->func_name, p_prev->line_num,
3092 				 p_prev->size, p_prev->net_buf);
3093 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
3094 				 p_prev->map_func_name,
3095 				 p_prev->map_line_num,
3096 				 p_prev->unmap_func_name,
3097 				 p_prev->unmap_line_num,
3098 				 p_prev->is_nbuf_mapped);
3099 			qdf_nbuf_track_free(p_prev);
3100 		}
3101 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3102 	}
3103 
3104 	qdf_nbuf_track_memory_manager_destroy();
3105 	qdf_nbuf_map_tracking_deinit();
3106 	qdf_nbuf_smmu_map_tracking_deinit();
3107 
3108 #ifdef CONFIG_HALT_KMEMLEAK
3109 	if (count) {
3110 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
3111 		QDF_BUG(0);
3112 	}
3113 #endif
3114 }
3115 qdf_export_symbol(qdf_net_buf_debug_exit);
3116 
3117 /**
3118  * qdf_net_buf_debug_hash() - hash network buffer pointer
3119  *
3120  * Return: hash value
3121  */
3122 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
3123 {
3124 	uint32_t i;
3125 
3126 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
3127 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
3128 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
3129 
3130 	return i;
3131 }
3132 
3133 /**
3134  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
3135  *
3136  * Return: If skb is found in hash table then return pointer to network buffer
3137  *	else return %NULL
3138  */
3139 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
3140 {
3141 	uint32_t i;
3142 	QDF_NBUF_TRACK *p_node;
3143 
3144 	i = qdf_net_buf_debug_hash(net_buf);
3145 	p_node = gp_qdf_net_buf_track_tbl[i];
3146 
3147 	while (p_node) {
3148 		if (p_node->net_buf == net_buf)
3149 			return p_node;
3150 		p_node = p_node->p_next;
3151 	}
3152 
3153 	return NULL;
3154 }
3155 
3156 /**
3157  * qdf_net_buf_debug_add_node() - store skb in debug hash table
3158  *
3159  * Return: none
3160  */
3161 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
3162 				const char *func_name, uint32_t line_num)
3163 {
3164 	uint32_t i;
3165 	unsigned long irq_flag;
3166 	QDF_NBUF_TRACK *p_node;
3167 	QDF_NBUF_TRACK *new_node;
3168 
3169 	if (is_initial_mem_debug_disabled)
3170 		return;
3171 
3172 	new_node = qdf_nbuf_track_alloc();
3173 
3174 	i = qdf_net_buf_debug_hash(net_buf);
3175 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3176 
3177 	p_node = qdf_net_buf_debug_look_up(net_buf);
3178 
3179 	if (p_node) {
3180 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
3181 			  p_node->net_buf, p_node->func_name, p_node->line_num,
3182 			  net_buf, func_name, line_num);
3183 		qdf_nbuf_track_free(new_node);
3184 	} else {
3185 		p_node = new_node;
3186 		if (p_node) {
3187 			p_node->net_buf = net_buf;
3188 			qdf_str_lcopy(p_node->func_name, func_name,
3189 				      QDF_MEM_FUNC_NAME_SIZE);
3190 			p_node->line_num = line_num;
3191 			p_node->is_nbuf_mapped = false;
3192 			p_node->map_line_num = 0;
3193 			p_node->unmap_line_num = 0;
3194 			p_node->map_func_name[0] = '\0';
3195 			p_node->unmap_func_name[0] = '\0';
3196 			p_node->size = size;
3197 			p_node->time = qdf_get_log_timestamp();
3198 			qdf_net_buf_update_smmu_params(p_node);
3199 			qdf_mem_skb_inc(size);
3200 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
3201 			gp_qdf_net_buf_track_tbl[i] = p_node;
3202 		} else {
3203 			qdf_net_buf_track_fail_count++;
3204 			qdf_print(
3205 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
3206 				  func_name, line_num, size);
3207 		}
3208 	}
3209 
3210 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3211 }
3212 qdf_export_symbol(qdf_net_buf_debug_add_node);
3213 
3214 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
3215 				   uint32_t line_num)
3216 {
3217 	uint32_t i;
3218 	unsigned long irq_flag;
3219 	QDF_NBUF_TRACK *p_node;
3220 
3221 	if (is_initial_mem_debug_disabled)
3222 		return;
3223 
3224 	i = qdf_net_buf_debug_hash(net_buf);
3225 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3226 
3227 	p_node = qdf_net_buf_debug_look_up(net_buf);
3228 
3229 	if (p_node) {
3230 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
3231 			      QDF_MEM_FUNC_NAME_SIZE);
3232 		p_node->line_num = line_num;
3233 	}
3234 
3235 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3236 }
3237 
3238 qdf_export_symbol(qdf_net_buf_debug_update_node);
3239 
3240 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
3241 				       const char *func_name,
3242 				       uint32_t line_num)
3243 {
3244 	uint32_t i;
3245 	unsigned long irq_flag;
3246 	QDF_NBUF_TRACK *p_node;
3247 
3248 	if (is_initial_mem_debug_disabled)
3249 		return;
3250 
3251 	i = qdf_net_buf_debug_hash(net_buf);
3252 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3253 
3254 	p_node = qdf_net_buf_debug_look_up(net_buf);
3255 
3256 	if (p_node) {
3257 		qdf_str_lcopy(p_node->map_func_name, func_name,
3258 			      QDF_MEM_FUNC_NAME_SIZE);
3259 		p_node->map_line_num = line_num;
3260 		p_node->is_nbuf_mapped = true;
3261 	}
3262 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3263 }
3264 
3265 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
3266 void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
3267 					    unsigned long iova,
3268 					    unsigned long pa,
3269 					    const char *func,
3270 					    uint32_t line)
3271 {
3272 	uint32_t i;
3273 	unsigned long irq_flag;
3274 	QDF_NBUF_TRACK *p_node;
3275 
3276 	if (is_initial_mem_debug_disabled)
3277 		return;
3278 
3279 	i = qdf_net_buf_debug_hash(nbuf);
3280 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3281 
3282 	p_node = qdf_net_buf_debug_look_up(nbuf);
3283 
3284 	if (p_node) {
3285 		qdf_str_lcopy(p_node->smmu_map_func_name, func,
3286 			      QDF_MEM_FUNC_NAME_SIZE);
3287 		p_node->smmu_map_line_num = line;
3288 		p_node->is_nbuf_smmu_mapped = true;
3289 		p_node->smmu_map_iova_addr = iova;
3290 		p_node->smmu_map_pa_addr = pa;
3291 	}
3292 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3293 }
3294 
3295 void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
3296 					      unsigned long iova,
3297 					      unsigned long pa,
3298 					      const char *func,
3299 					      uint32_t line)
3300 {
3301 	uint32_t i;
3302 	unsigned long irq_flag;
3303 	QDF_NBUF_TRACK *p_node;
3304 
3305 	if (is_initial_mem_debug_disabled)
3306 		return;
3307 
3308 	i = qdf_net_buf_debug_hash(nbuf);
3309 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3310 
3311 	p_node = qdf_net_buf_debug_look_up(nbuf);
3312 
3313 	if (p_node) {
3314 		qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
3315 			      QDF_MEM_FUNC_NAME_SIZE);
3316 		p_node->smmu_unmap_line_num = line;
3317 		p_node->is_nbuf_smmu_mapped = false;
3318 		p_node->smmu_unmap_iova_addr = iova;
3319 		p_node->smmu_unmap_pa_addr = pa;
3320 	}
3321 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3322 }
3323 #endif
3324 
3325 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3326 					 const char *func_name,
3327 					 uint32_t line_num)
3328 {
3329 	uint32_t i;
3330 	unsigned long irq_flag;
3331 	QDF_NBUF_TRACK *p_node;
3332 
3333 	if (is_initial_mem_debug_disabled)
3334 		return;
3335 
3336 	i = qdf_net_buf_debug_hash(net_buf);
3337 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3338 
3339 	p_node = qdf_net_buf_debug_look_up(net_buf);
3340 
3341 	if (p_node) {
3342 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
3343 			      QDF_MEM_FUNC_NAME_SIZE);
3344 		p_node->unmap_line_num = line_num;
3345 		p_node->is_nbuf_mapped = false;
3346 	}
3347 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3348 }
3349 
3350 /**
3351  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
3352  *
3353  * Return: none
3354  */
3355 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3356 {
3357 	uint32_t i;
3358 	QDF_NBUF_TRACK *p_head;
3359 	QDF_NBUF_TRACK *p_node = NULL;
3360 	unsigned long irq_flag;
3361 	QDF_NBUF_TRACK *p_prev;
3362 
3363 	if (is_initial_mem_debug_disabled)
3364 		return;
3365 
3366 	i = qdf_net_buf_debug_hash(net_buf);
3367 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3368 
3369 	p_head = gp_qdf_net_buf_track_tbl[i];
3370 
3371 	/* Unallocated SKB */
3372 	if (!p_head)
3373 		goto done;
3374 
3375 	p_node = p_head;
3376 	/* Found at head of the table */
3377 	if (p_head->net_buf == net_buf) {
3378 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3379 		goto done;
3380 	}
3381 
3382 	/* Search in collision list */
3383 	while (p_node) {
3384 		p_prev = p_node;
3385 		p_node = p_node->p_next;
3386 		if ((p_node) && (p_node->net_buf == net_buf)) {
3387 			p_prev->p_next = p_node->p_next;
3388 			break;
3389 		}
3390 	}
3391 
3392 done:
3393 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3394 
3395 	if (p_node) {
3396 		qdf_mem_skb_dec(p_node->size);
3397 		qdf_nbuf_track_free(p_node);
3398 	} else {
3399 		if (qdf_net_buf_track_fail_count) {
3400 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3401 				  net_buf, qdf_net_buf_track_fail_count);
3402 		} else
3403 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3404 					   net_buf);
3405 	}
3406 }
3407 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3408 
3409 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3410 				   const char *func_name, uint32_t line_num)
3411 {
3412 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3413 
3414 	if (is_initial_mem_debug_disabled)
3415 		return;
3416 
3417 	while (ext_list) {
3418 		/*
3419 		 * Take care to add if it is Jumbo packet connected using
3420 		 * frag_list
3421 		 */
3422 		qdf_nbuf_t next;
3423 
3424 		next = qdf_nbuf_queue_next(ext_list);
3425 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3426 		ext_list = next;
3427 	}
3428 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3429 }
3430 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3431 
3432 /**
3433  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
3434  * @net_buf: Network buf holding head segment (single)
3435  *
3436  * WLAN driver module whose allocated SKB is freed by network stack are
3437  * suppose to call this API before returning SKB to network stack such
3438  * that the SKB is not reported as memory leak.
3439  *
3440  * Return: none
3441  */
3442 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3443 {
3444 	qdf_nbuf_t ext_list;
3445 
3446 	if (is_initial_mem_debug_disabled)
3447 		return;
3448 
3449 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3450 	while (ext_list) {
3451 		/*
3452 		 * Take care to free if it is Jumbo packet connected using
3453 		 * frag_list
3454 		 */
3455 		qdf_nbuf_t next;
3456 
3457 		next = qdf_nbuf_queue_next(ext_list);
3458 
3459 		if (qdf_nbuf_get_users(ext_list) > 1) {
3460 			ext_list = next;
3461 			continue;
3462 		}
3463 
3464 		qdf_net_buf_debug_delete_node(ext_list);
3465 		ext_list = next;
3466 	}
3467 
3468 	if (qdf_nbuf_get_users(net_buf) > 1)
3469 		return;
3470 
3471 	qdf_net_buf_debug_delete_node(net_buf);
3472 }
3473 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3474 
3475 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3476 				int reserve, int align, int prio,
3477 				const char *func, uint32_t line)
3478 {
3479 	qdf_nbuf_t nbuf;
3480 
3481 	if (is_initial_mem_debug_disabled)
3482 		return __qdf_nbuf_alloc(osdev, size,
3483 					reserve, align,
3484 					prio, func, line);
3485 
3486 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3487 
3488 	/* Store SKB in internal QDF tracking table */
3489 	if (qdf_likely(nbuf)) {
3490 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3491 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3492 	} else {
3493 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3494 	}
3495 
3496 	return nbuf;
3497 }
3498 qdf_export_symbol(qdf_nbuf_alloc_debug);
3499 
3500 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3501 					    const char *func, uint32_t line)
3502 {
3503 	qdf_nbuf_t nbuf;
3504 
3505 	if (is_initial_mem_debug_disabled)
3506 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3507 						    line);
3508 
3509 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3510 
3511 	/* Store SKB in internal QDF tracking table */
3512 	if (qdf_likely(nbuf)) {
3513 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3514 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3515 	} else {
3516 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3517 	}
3518 
3519 	return nbuf;
3520 }
3521 
3522 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3523 
3524 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3525 {
3526 	qdf_nbuf_t ext_list;
3527 	qdf_frag_t p_frag;
3528 	uint32_t num_nr_frags;
3529 	uint32_t idx = 0;
3530 
3531 	if (qdf_unlikely(!nbuf))
3532 		return;
3533 
3534 	if (is_initial_mem_debug_disabled)
3535 		goto free_buf;
3536 
3537 	if (qdf_nbuf_get_users(nbuf) > 1)
3538 		goto free_buf;
3539 
3540 	/* Remove SKB from internal QDF tracking table */
3541 	qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
3542 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3543 	qdf_net_buf_debug_delete_node(nbuf);
3544 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3545 
3546 	/* Take care to delete the debug entries for frags */
3547 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3548 
3549 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3550 
3551 	while (idx < num_nr_frags) {
3552 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3553 		if (qdf_likely(p_frag))
3554 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3555 		idx++;
3556 	}
3557 
3558 	/**
3559 	 * Take care to update the debug entries for frag_list and also
3560 	 * for the frags attached to frag_list
3561 	 */
3562 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3563 	while (ext_list) {
3564 		if (qdf_nbuf_get_users(ext_list) == 1) {
3565 			qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
3566 							      line);
3567 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3568 			idx = 0;
3569 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3570 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3571 			while (idx < num_nr_frags) {
3572 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3573 				if (qdf_likely(p_frag))
3574 					qdf_frag_debug_refcount_dec(p_frag,
3575 								    func, line);
3576 				idx++;
3577 			}
3578 			qdf_net_buf_debug_delete_node(ext_list);
3579 		}
3580 
3581 		ext_list = qdf_nbuf_queue_next(ext_list);
3582 	}
3583 
3584 free_buf:
3585 	__qdf_nbuf_free(nbuf);
3586 }
3587 qdf_export_symbol(qdf_nbuf_free_debug);
3588 
3589 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3590 					const char *func, uint32_t line)
3591 {
3592 	struct sk_buff *skb;
3593 	int flags = GFP_KERNEL;
3594 
3595 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3596 		flags = GFP_ATOMIC;
3597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3598 		/*
3599 		 * Observed that kcompactd burns out CPU to make order-3 page.
3600 		 *__netdev_alloc_skb has 4k page fallback option just in case of
3601 		 * failing high order page allocation so we don't need to be
3602 		 * hard. Make kcompactd rest in piece.
3603 		 */
3604 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3605 #endif
3606 	}
3607 
3608 	skb = __netdev_alloc_skb(NULL, size, flags);
3609 
3610 
3611 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3612 		if (qdf_likely(skb))
3613 			qdf_nbuf_count_inc(skb);
3614 	} else {
3615 		if (qdf_likely(skb)) {
3616 			qdf_nbuf_count_inc(skb);
3617 			qdf_net_buf_debug_add_node(skb, size, func, line);
3618 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
3619 		} else {
3620 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
3621 		}
3622 	}
3623 
3624 
3625 	return skb;
3626 }
3627 
3628 qdf_export_symbol(__qdf_nbuf_alloc_simple);
3629 
3630 void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
3631 				uint32_t line)
3632 {
3633 	if (qdf_likely(nbuf)) {
3634 		if (is_initial_mem_debug_disabled) {
3635 			dev_kfree_skb_any(nbuf);
3636 		} else {
3637 			qdf_nbuf_free_debug(nbuf, func, line);
3638 		}
3639 	}
3640 }
3641 
3642 qdf_export_symbol(qdf_nbuf_free_debug_simple);
3643 
3644 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3645 {
3646 	uint32_t num_nr_frags;
3647 	uint32_t idx = 0;
3648 	qdf_nbuf_t ext_list;
3649 	qdf_frag_t p_frag;
3650 
3651 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3652 
3653 	if (is_initial_mem_debug_disabled)
3654 		return cloned_buf;
3655 
3656 	if (qdf_unlikely(!cloned_buf))
3657 		return NULL;
3658 
3659 	/* Take care to update the debug entries for frags */
3660 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3661 
3662 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3663 
3664 	while (idx < num_nr_frags) {
3665 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3666 		if (qdf_likely(p_frag))
3667 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3668 		idx++;
3669 	}
3670 
3671 	/* Take care to update debug entries for frags attached to frag_list */
3672 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3673 	while (ext_list) {
3674 		idx = 0;
3675 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3676 
3677 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3678 
3679 		while (idx < num_nr_frags) {
3680 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3681 			if (qdf_likely(p_frag))
3682 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3683 			idx++;
3684 		}
3685 		ext_list = qdf_nbuf_queue_next(ext_list);
3686 	}
3687 
3688 	/* Store SKB in internal QDF tracking table */
3689 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3690 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3691 
3692 	return cloned_buf;
3693 }
3694 qdf_export_symbol(qdf_nbuf_clone_debug);
3695 
3696 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3697 {
3698 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3699 
3700 	if (is_initial_mem_debug_disabled)
3701 		return copied_buf;
3702 
3703 	if (qdf_unlikely(!copied_buf))
3704 		return NULL;
3705 
3706 	/* Store SKB in internal QDF tracking table */
3707 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3708 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3709 
3710 	return copied_buf;
3711 }
3712 qdf_export_symbol(qdf_nbuf_copy_debug);
3713 
3714 qdf_nbuf_t
3715 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3716 			   const char *func, uint32_t line)
3717 {
3718 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3719 
3720 	if (qdf_unlikely(!copied_buf))
3721 		return NULL;
3722 
3723 	if (is_initial_mem_debug_disabled)
3724 		return copied_buf;
3725 
3726 	/* Store SKB in internal QDF tracking table */
3727 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3728 	qdf_nbuf_history_add(copied_buf, func, line,
3729 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3730 
3731 	return copied_buf;
3732 }
3733 
3734 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3735 
3736 qdf_nbuf_t
3737 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3738 		       uint32_t line_num)
3739 {
3740 	qdf_nbuf_t unshared_buf;
3741 	qdf_frag_t p_frag;
3742 	uint32_t num_nr_frags;
3743 	uint32_t idx = 0;
3744 	qdf_nbuf_t ext_list, next;
3745 
3746 	if (is_initial_mem_debug_disabled)
3747 		return __qdf_nbuf_unshare(buf);
3748 
3749 	/* Not a shared buffer, nothing to do */
3750 	if (!qdf_nbuf_is_cloned(buf))
3751 		return buf;
3752 
3753 	if (qdf_nbuf_get_users(buf) > 1)
3754 		goto unshare_buf;
3755 
3756 	/* Take care to delete the debug entries for frags */
3757 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3758 
3759 	while (idx < num_nr_frags) {
3760 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3761 		if (qdf_likely(p_frag))
3762 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3763 						    line_num);
3764 		idx++;
3765 	}
3766 
3767 	qdf_net_buf_debug_delete_node(buf);
3768 
3769 	 /* Take care of jumbo packet connected using frag_list and frags */
3770 	ext_list = qdf_nbuf_get_ext_list(buf);
3771 	while (ext_list) {
3772 		idx = 0;
3773 		next = qdf_nbuf_queue_next(ext_list);
3774 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3775 
3776 		if (qdf_nbuf_get_users(ext_list) > 1) {
3777 			ext_list = next;
3778 			continue;
3779 		}
3780 
3781 		while (idx < num_nr_frags) {
3782 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3783 			if (qdf_likely(p_frag))
3784 				qdf_frag_debug_refcount_dec(p_frag, func_name,
3785 							    line_num);
3786 			idx++;
3787 		}
3788 
3789 		qdf_net_buf_debug_delete_node(ext_list);
3790 		ext_list = next;
3791 	}
3792 
3793 unshare_buf:
3794 	unshared_buf = __qdf_nbuf_unshare(buf);
3795 
3796 	if (qdf_likely(unshared_buf))
3797 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3798 					   line_num);
3799 
3800 	return unshared_buf;
3801 }
3802 
3803 qdf_export_symbol(qdf_nbuf_unshare_debug);
3804 
3805 #endif /* NBUF_MEMORY_DEBUG */
3806 
3807 #if defined(FEATURE_TSO)
3808 
3809 /**
3810  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3811  *
3812  * @ethproto: ethernet type of the msdu
3813  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3814  * @l2_len: L2 length for the msdu
3815  * @eit_hdr: pointer to EIT header
3816  * @eit_hdr_len: EIT header length for the msdu
3817  * @eit_hdr_dma_map_addr: dma addr for EIT header
3818  * @tcphdr: pointer to tcp header
3819  * @ipv4_csum_en: ipv4 checksum enable
3820  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3821  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3822  * @ip_id: IP id
3823  * @tcp_seq_num: TCP sequence number
3824  *
3825  * This structure holds the TSO common info that is common
3826  * across all the TCP segments of the jumbo packet.
3827  */
3828 struct qdf_tso_cmn_seg_info_t {
3829 	uint16_t ethproto;
3830 	uint16_t ip_tcp_hdr_len;
3831 	uint16_t l2_len;
3832 	uint8_t *eit_hdr;
3833 	uint32_t eit_hdr_len;
3834 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3835 	struct tcphdr *tcphdr;
3836 	uint16_t ipv4_csum_en;
3837 	uint16_t tcp_ipv4_csum_en;
3838 	uint16_t tcp_ipv6_csum_en;
3839 	uint16_t ip_id;
3840 	uint32_t tcp_seq_num;
3841 };
3842 
3843 /**
3844  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
3845  *
3846  * @skb: network buffer
3847  *
3848  * Return: byte offset length of 8 bytes aligned.
3849  */
3850 #ifdef FIX_TXDMA_LIMITATION
3851 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3852 {
3853 	uint32_t eit_hdr_len;
3854 	uint8_t *eit_hdr;
3855 	uint8_t byte_8_align_offset;
3856 
3857 	eit_hdr = skb->data;
3858 	eit_hdr_len = (skb_transport_header(skb)
3859 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3860 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
3861 	if (qdf_unlikely(byte_8_align_offset)) {
3862 		TSO_DEBUG("%pK,Len %d %d",
3863 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
3864 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
3865 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
3866 				  __LINE__, skb->head, skb->data,
3867 				 byte_8_align_offset);
3868 			return 0;
3869 		}
3870 		qdf_nbuf_push_head(skb, byte_8_align_offset);
3871 		qdf_mem_move(skb->data,
3872 			     skb->data + byte_8_align_offset,
3873 			     eit_hdr_len);
3874 		skb->len -= byte_8_align_offset;
3875 		skb->mac_header -= byte_8_align_offset;
3876 		skb->network_header -= byte_8_align_offset;
3877 		skb->transport_header -= byte_8_align_offset;
3878 	}
3879 	return byte_8_align_offset;
3880 }
3881 #else
3882 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3883 {
3884 	return 0;
3885 }
3886 #endif
3887 
3888 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
3889 void qdf_record_nbuf_nbytes(
3890 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
3891 {
3892 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
3893 }
3894 
3895 qdf_export_symbol(qdf_record_nbuf_nbytes);
3896 
3897 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
3898 
3899 /**
3900  * qdf_nbuf_tso_map_frag() - Map TSO segment
3901  * @osdev: qdf device handle
3902  * @tso_frag_vaddr: addr of tso fragment
3903  * @nbytes: number of bytes
3904  * @dir: direction
3905  *
3906  * Map TSO segment and for MCL record the amount of memory mapped
3907  *
3908  * Return: DMA address of mapped TSO fragment in success and
3909  * NULL in case of DMA mapping failure
3910  */
3911 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
3912 	qdf_device_t osdev, void *tso_frag_vaddr,
3913 	uint32_t nbytes, qdf_dma_dir_t dir)
3914 {
3915 	qdf_dma_addr_t tso_frag_paddr = 0;
3916 
3917 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
3918 					nbytes, __qdf_dma_dir_to_os(dir));
3919 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
3920 		qdf_err("DMA mapping error!");
3921 		qdf_assert_always(0);
3922 		return 0;
3923 	}
3924 	qdf_record_nbuf_nbytes(nbytes, dir, true);
3925 	return tso_frag_paddr;
3926 }
3927 
3928 /**
3929  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
3930  * @osdev: qdf device handle
3931  * @tso_frag_paddr: DMA addr of tso fragment
3932  * @dir: direction
3933  * @nbytes: number of bytes
3934  *
3935  * Unmap TSO segment and for MCL record the amount of memory mapped
3936  *
3937  * Return: None
3938  */
3939 static inline void qdf_nbuf_tso_unmap_frag(
3940 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
3941 	uint32_t nbytes, qdf_dma_dir_t dir)
3942 {
3943 	qdf_record_nbuf_nbytes(nbytes, dir, false);
3944 	dma_unmap_single(osdev->dev, tso_frag_paddr,
3945 			 nbytes, __qdf_dma_dir_to_os(dir));
3946 }
3947 
3948 /**
3949  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
3950  * information
3951  * @osdev: qdf device handle
3952  * @skb: skb buffer
3953  * @tso_info: Parameters common to all segements
3954  *
3955  * Get the TSO information that is common across all the TCP
3956  * segments of the jumbo packet
3957  *
3958  * Return: 0 - success 1 - failure
3959  */
3960 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3961 			struct sk_buff *skb,
3962 			struct qdf_tso_cmn_seg_info_t *tso_info)
3963 {
3964 	/* Get ethernet type and ethernet header length */
3965 	tso_info->ethproto = vlan_get_protocol(skb);
3966 
3967 	/* Determine whether this is an IPv4 or IPv6 packet */
3968 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3969 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3970 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3971 
3972 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3973 		tso_info->ipv4_csum_en = 1;
3974 		tso_info->tcp_ipv4_csum_en = 1;
3975 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3976 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3977 				ipv4_hdr->protocol);
3978 			return 1;
3979 		}
3980 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3981 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3982 		tso_info->tcp_ipv6_csum_en = 1;
3983 	} else {
3984 		qdf_err("TSO: ethertype 0x%x is not supported!",
3985 			tso_info->ethproto);
3986 		return 1;
3987 	}
3988 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3989 	tso_info->tcphdr = tcp_hdr(skb);
3990 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3991 	/* get pointer to the ethernet + IP + TCP header and their length */
3992 	tso_info->eit_hdr = skb->data;
3993 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3994 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3995 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
3996 						osdev, tso_info->eit_hdr,
3997 						tso_info->eit_hdr_len,
3998 						QDF_DMA_TO_DEVICE);
3999 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
4000 		return 1;
4001 
4002 	if (tso_info->ethproto == htons(ETH_P_IP)) {
4003 		/* inlcude IPv4 header length for IPV4 (total length) */
4004 		tso_info->ip_tcp_hdr_len =
4005 			tso_info->eit_hdr_len - tso_info->l2_len;
4006 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
4007 		/* exclude IPv6 header length for IPv6 (payload length) */
4008 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
4009 	}
4010 	/*
4011 	 * The length of the payload (application layer data) is added to
4012 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
4013 	 * descriptor.
4014 	 */
4015 
4016 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
4017 		tso_info->tcp_seq_num,
4018 		tso_info->eit_hdr_len,
4019 		tso_info->l2_len,
4020 		skb->len);
4021 	return 0;
4022 }
4023 
4024 
4025 /**
4026  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
4027  *
4028  * @curr_seg: Segment whose contents are initialized
4029  * @tso_cmn_info: Parameters common to all segements
4030  *
4031  * Return: None
4032  */
4033 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
4034 				struct qdf_tso_seg_elem_t *curr_seg,
4035 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
4036 {
4037 	/* Initialize the flags to 0 */
4038 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
4039 
4040 	/*
4041 	 * The following fields remain the same across all segments of
4042 	 * a jumbo packet
4043 	 */
4044 	curr_seg->seg.tso_flags.tso_enable = 1;
4045 	curr_seg->seg.tso_flags.ipv4_checksum_en =
4046 		tso_cmn_info->ipv4_csum_en;
4047 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
4048 		tso_cmn_info->tcp_ipv6_csum_en;
4049 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
4050 		tso_cmn_info->tcp_ipv4_csum_en;
4051 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
4052 
4053 	/* The following fields change for the segments */
4054 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
4055 	tso_cmn_info->ip_id++;
4056 
4057 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
4058 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
4059 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
4060 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
4061 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
4062 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
4063 
4064 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
4065 
4066 	/*
4067 	 * First fragment for each segment always contains the ethernet,
4068 	 * IP and TCP header
4069 	 */
4070 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
4071 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
4072 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
4073 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
4074 
4075 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
4076 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
4077 		   tso_cmn_info->eit_hdr_len,
4078 		   curr_seg->seg.tso_flags.tcp_seq_num,
4079 		   curr_seg->seg.total_len);
4080 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
4081 }
4082 
4083 /**
4084  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
4085  * into segments
4086  * @nbuf: network buffer to be segmented
4087  * @tso_info: This is the output. The information about the
4088  *           TSO segments will be populated within this.
4089  *
4090  * This function fragments a TCP jumbo packet into smaller
4091  * segments to be transmitted by the driver. It chains the TSO
4092  * segments created into a list.
4093  *
4094  * Return: number of TSO segments
4095  */
4096 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
4097 		struct qdf_tso_info_t *tso_info)
4098 {
4099 	/* common across all segments */
4100 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
4101 	/* segment specific */
4102 	void *tso_frag_vaddr;
4103 	qdf_dma_addr_t tso_frag_paddr = 0;
4104 	uint32_t num_seg = 0;
4105 	struct qdf_tso_seg_elem_t *curr_seg;
4106 	struct qdf_tso_num_seg_elem_t *total_num_seg;
4107 	skb_frag_t *frag = NULL;
4108 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
4109 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
4110 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
4111 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4112 	int j = 0; /* skb fragment index */
4113 	uint8_t byte_8_align_offset;
4114 
4115 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
4116 	total_num_seg = tso_info->tso_num_seg_list;
4117 	curr_seg = tso_info->tso_seg_list;
4118 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
4119 
4120 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
4121 
4122 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
4123 						skb, &tso_cmn_info))) {
4124 		qdf_warn("TSO: error getting common segment info");
4125 		return 0;
4126 	}
4127 
4128 	/* length of the first chunk of data in the skb */
4129 	skb_frag_len = skb_headlen(skb);
4130 
4131 	/* the 0th tso segment's 0th fragment always contains the EIT header */
4132 	/* update the remaining skb fragment length and TSO segment length */
4133 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
4134 	skb_proc -= tso_cmn_info.eit_hdr_len;
4135 
4136 	/* get the address to the next tso fragment */
4137 	tso_frag_vaddr = skb->data +
4138 			 tso_cmn_info.eit_hdr_len +
4139 			 byte_8_align_offset;
4140 	/* get the length of the next tso fragment */
4141 	tso_frag_len = min(skb_frag_len, tso_seg_size);
4142 
4143 	if (tso_frag_len != 0) {
4144 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
4145 					osdev, tso_frag_vaddr, tso_frag_len,
4146 					QDF_DMA_TO_DEVICE);
4147 		if (qdf_unlikely(!tso_frag_paddr))
4148 			return 0;
4149 	}
4150 
4151 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
4152 		__LINE__, skb_frag_len, tso_frag_len);
4153 	num_seg = tso_info->num_segs;
4154 	tso_info->num_segs = 0;
4155 	tso_info->is_tso = 1;
4156 
4157 	while (num_seg && curr_seg) {
4158 		int i = 1; /* tso fragment index */
4159 		uint8_t more_tso_frags = 1;
4160 
4161 		curr_seg->seg.num_frags = 0;
4162 		tso_info->num_segs++;
4163 		total_num_seg->num_seg.tso_cmn_num_seg++;
4164 
4165 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
4166 						 &tso_cmn_info);
4167 
4168 		/* If TCP PSH flag is set, set it in the last or only segment */
4169 		if (num_seg == 1)
4170 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
4171 
4172 		if (unlikely(skb_proc == 0))
4173 			return tso_info->num_segs;
4174 
4175 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
4176 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
4177 		/* frag len is added to ip_len in while loop below*/
4178 
4179 		curr_seg->seg.num_frags++;
4180 
4181 		while (more_tso_frags) {
4182 			if (tso_frag_len != 0) {
4183 				curr_seg->seg.tso_frags[i].vaddr =
4184 					tso_frag_vaddr;
4185 				curr_seg->seg.tso_frags[i].length =
4186 					tso_frag_len;
4187 				curr_seg->seg.total_len += tso_frag_len;
4188 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
4189 				curr_seg->seg.num_frags++;
4190 				skb_proc = skb_proc - tso_frag_len;
4191 
4192 				/* increment the TCP sequence number */
4193 
4194 				tso_cmn_info.tcp_seq_num += tso_frag_len;
4195 				curr_seg->seg.tso_frags[i].paddr =
4196 					tso_frag_paddr;
4197 
4198 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
4199 			}
4200 
4201 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
4202 					__func__, __LINE__,
4203 					i,
4204 					tso_frag_len,
4205 					curr_seg->seg.total_len,
4206 					curr_seg->seg.tso_frags[i].vaddr);
4207 
4208 			/* if there is no more data left in the skb */
4209 			if (!skb_proc)
4210 				return tso_info->num_segs;
4211 
4212 			/* get the next payload fragment information */
4213 			/* check if there are more fragments in this segment */
4214 			if (tso_frag_len < tso_seg_size) {
4215 				more_tso_frags = 1;
4216 				if (tso_frag_len != 0) {
4217 					tso_seg_size = tso_seg_size -
4218 						tso_frag_len;
4219 					i++;
4220 					if (curr_seg->seg.num_frags ==
4221 								FRAG_NUM_MAX) {
4222 						more_tso_frags = 0;
4223 						/*
4224 						 * reset i and the tso
4225 						 * payload size
4226 						 */
4227 						i = 1;
4228 						tso_seg_size =
4229 							skb_shinfo(skb)->
4230 								gso_size;
4231 					}
4232 				}
4233 			} else {
4234 				more_tso_frags = 0;
4235 				/* reset i and the tso payload size */
4236 				i = 1;
4237 				tso_seg_size = skb_shinfo(skb)->gso_size;
4238 			}
4239 
4240 			/* if the next fragment is contiguous */
4241 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
4242 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
4243 				skb_frag_len = skb_frag_len - tso_frag_len;
4244 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4245 
4246 			} else { /* the next fragment is not contiguous */
4247 				if (skb_shinfo(skb)->nr_frags == 0) {
4248 					qdf_info("TSO: nr_frags == 0!");
4249 					qdf_assert(0);
4250 					return 0;
4251 				}
4252 				if (j >= skb_shinfo(skb)->nr_frags) {
4253 					qdf_info("TSO: nr_frags %d j %d",
4254 						 skb_shinfo(skb)->nr_frags, j);
4255 					qdf_assert(0);
4256 					return 0;
4257 				}
4258 				frag = &skb_shinfo(skb)->frags[j];
4259 				skb_frag_len = skb_frag_size(frag);
4260 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4261 				tso_frag_vaddr = skb_frag_address_safe(frag);
4262 				j++;
4263 			}
4264 
4265 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
4266 				__func__, __LINE__, skb_frag_len, tso_frag_len,
4267 				tso_seg_size);
4268 
4269 			if (!(tso_frag_vaddr)) {
4270 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
4271 						__func__);
4272 				return 0;
4273 			}
4274 
4275 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
4276 						osdev, tso_frag_vaddr,
4277 						tso_frag_len,
4278 						QDF_DMA_TO_DEVICE);
4279 			if (qdf_unlikely(!tso_frag_paddr))
4280 				return 0;
4281 		}
4282 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
4283 				curr_seg->seg.tso_flags.tcp_seq_num);
4284 		num_seg--;
4285 		/* if TCP FIN flag was set, set it in the last segment */
4286 		if (!num_seg)
4287 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
4288 
4289 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
4290 		curr_seg = curr_seg->next;
4291 	}
4292 	return tso_info->num_segs;
4293 }
4294 qdf_export_symbol(__qdf_nbuf_get_tso_info);
4295 
4296 /**
4297  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
4298  *
4299  * @osdev: qdf device handle
4300  * @tso_seg: TSO segment element to be unmapped
4301  * @is_last_seg: whether this is last tso seg or not
4302  *
4303  * Return: none
4304  */
4305 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
4306 			  struct qdf_tso_seg_elem_t *tso_seg,
4307 			  bool is_last_seg)
4308 {
4309 	uint32_t num_frags = 0;
4310 
4311 	if (tso_seg->seg.num_frags > 0)
4312 		num_frags = tso_seg->seg.num_frags - 1;
4313 
4314 	/*Num of frags in a tso seg cannot be less than 2 */
4315 	if (num_frags < 1) {
4316 		/*
4317 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
4318 		 * this may happen when qdf_nbuf_get_tso_info failed,
4319 		 * do dma unmap for the 0th frag in this seg.
4320 		 */
4321 		if (is_last_seg && tso_seg->seg.num_frags == 1)
4322 			goto last_seg_free_first_frag;
4323 
4324 		qdf_assert(0);
4325 		qdf_err("ERROR: num of frags in a tso segment is %d",
4326 			(num_frags + 1));
4327 		return;
4328 	}
4329 
4330 	while (num_frags) {
4331 		/*Do dma unmap the tso seg except the 0th frag */
4332 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
4333 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4334 				num_frags);
4335 			qdf_assert(0);
4336 			return;
4337 		}
4338 		qdf_nbuf_tso_unmap_frag(
4339 			osdev,
4340 			tso_seg->seg.tso_frags[num_frags].paddr,
4341 			tso_seg->seg.tso_frags[num_frags].length,
4342 			QDF_DMA_TO_DEVICE);
4343 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
4344 		num_frags--;
4345 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4346 	}
4347 
4348 last_seg_free_first_frag:
4349 	if (is_last_seg) {
4350 		/*Do dma unmap for the tso seg 0th frag */
4351 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
4352 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4353 			qdf_assert(0);
4354 			return;
4355 		}
4356 		qdf_nbuf_tso_unmap_frag(osdev,
4357 					tso_seg->seg.tso_frags[0].paddr,
4358 					tso_seg->seg.tso_frags[0].length,
4359 					QDF_DMA_TO_DEVICE);
4360 		tso_seg->seg.tso_frags[0].paddr = 0;
4361 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4362 	}
4363 }
4364 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4365 
4366 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4367 {
4368 	size_t packet_len;
4369 
4370 	packet_len = skb->len -
4371 		((skb_transport_header(skb) - skb_mac_header(skb)) +
4372 		 tcp_hdrlen(skb));
4373 
4374 	return packet_len;
4375 }
4376 
4377 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4378 
4379 /**
4380  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
4381  * into segments
4382  * @nbuf:   network buffer to be segmented
4383  * @tso_info:  This is the output. The information about the
4384  *      TSO segments will be populated within this.
4385  *
4386  * This function fragments a TCP jumbo packet into smaller
4387  * segments to be transmitted by the driver. It chains the TSO
4388  * segments created into a list.
4389  *
4390  * Return: 0 - success, 1 - failure
4391  */
4392 #ifndef BUILD_X86
4393 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4394 {
4395 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4396 	uint32_t remainder, num_segs = 0;
4397 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4398 	uint8_t frags_per_tso = 0;
4399 	uint32_t skb_frag_len = 0;
4400 	uint32_t eit_hdr_len = (skb_transport_header(skb)
4401 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4402 	skb_frag_t *frag = NULL;
4403 	int j = 0;
4404 	uint32_t temp_num_seg = 0;
4405 
4406 	/* length of the first chunk of data in the skb minus eit header*/
4407 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4408 
4409 	/* Calculate num of segs for skb's first chunk of data*/
4410 	remainder = skb_frag_len % tso_seg_size;
4411 	num_segs = skb_frag_len / tso_seg_size;
4412 	/**
4413 	 * Remainder non-zero and nr_frags zero implies end of skb data.
4414 	 * In that case, one more tso seg is required to accommodate
4415 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
4416 	 * then remaining data will be accomodated while doing the calculation
4417 	 * for nr_frags data. Hence, frags_per_tso++.
4418 	 */
4419 	if (remainder) {
4420 		if (!skb_nr_frags)
4421 			num_segs++;
4422 		else
4423 			frags_per_tso++;
4424 	}
4425 
4426 	while (skb_nr_frags) {
4427 		if (j >= skb_shinfo(skb)->nr_frags) {
4428 			qdf_info("TSO: nr_frags %d j %d",
4429 				 skb_shinfo(skb)->nr_frags, j);
4430 			qdf_assert(0);
4431 			return 0;
4432 		}
4433 		/**
4434 		 * Calculate the number of tso seg for nr_frags data:
4435 		 * Get the length of each frag in skb_frag_len, add to
4436 		 * remainder.Get the number of segments by dividing it to
4437 		 * tso_seg_size and calculate the new remainder.
4438 		 * Decrement the nr_frags value and keep
4439 		 * looping all the skb_fragments.
4440 		 */
4441 		frag = &skb_shinfo(skb)->frags[j];
4442 		skb_frag_len = skb_frag_size(frag);
4443 		temp_num_seg = num_segs;
4444 		remainder += skb_frag_len;
4445 		num_segs += remainder / tso_seg_size;
4446 		remainder = remainder % tso_seg_size;
4447 		skb_nr_frags--;
4448 		if (remainder) {
4449 			if (num_segs > temp_num_seg)
4450 				frags_per_tso = 0;
4451 			/**
4452 			 * increment the tso per frags whenever remainder is
4453 			 * positive. If frags_per_tso reaches the (max-1),
4454 			 * [First frags always have EIT header, therefore max-1]
4455 			 * increment the num_segs as no more data can be
4456 			 * accomodated in the curr tso seg. Reset the remainder
4457 			 * and frags per tso and keep looping.
4458 			 */
4459 			frags_per_tso++;
4460 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4461 				num_segs++;
4462 				frags_per_tso = 0;
4463 				remainder = 0;
4464 			}
4465 			/**
4466 			 * If this is the last skb frag and still remainder is
4467 			 * non-zero(frags_per_tso is not reached to the max-1)
4468 			 * then increment the num_segs to take care of the
4469 			 * remaining length.
4470 			 */
4471 			if (!skb_nr_frags && remainder) {
4472 				num_segs++;
4473 				frags_per_tso = 0;
4474 			}
4475 		} else {
4476 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4477 			frags_per_tso = 0;
4478 		}
4479 		j++;
4480 	}
4481 
4482 	return num_segs;
4483 }
4484 #elif !defined(QCA_WIFI_QCN9000)
4485 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4486 {
4487 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4488 	skb_frag_t *frag = NULL;
4489 
4490 	/*
4491 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4492 	 * region which cannot be accessed by Target
4493 	 */
4494 	if (virt_to_phys(skb->data) < 0x50000040) {
4495 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4496 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4497 				virt_to_phys(skb->data));
4498 		goto fail;
4499 
4500 	}
4501 
4502 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4503 		frag = &skb_shinfo(skb)->frags[i];
4504 
4505 		if (!frag)
4506 			goto fail;
4507 
4508 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4509 			goto fail;
4510 	}
4511 
4512 
4513 	gso_size = skb_shinfo(skb)->gso_size;
4514 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4515 			+ tcp_hdrlen(skb));
4516 	while (tmp_len) {
4517 		num_segs++;
4518 		if (tmp_len > gso_size)
4519 			tmp_len -= gso_size;
4520 		else
4521 			break;
4522 	}
4523 
4524 	return num_segs;
4525 
4526 	/*
4527 	 * Do not free this frame, just do socket level accounting
4528 	 * so that this is not reused.
4529 	 */
4530 fail:
4531 	if (skb->sk)
4532 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4533 
4534 	return 0;
4535 }
4536 #else
4537 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4538 {
4539 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4540 	skb_frag_t *frag = NULL;
4541 
4542 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4543 		frag = &skb_shinfo(skb)->frags[i];
4544 
4545 		if (!frag)
4546 			goto fail;
4547 	}
4548 
4549 	gso_size = skb_shinfo(skb)->gso_size;
4550 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4551 			+ tcp_hdrlen(skb));
4552 	while (tmp_len) {
4553 		num_segs++;
4554 		if (tmp_len > gso_size)
4555 			tmp_len -= gso_size;
4556 		else
4557 			break;
4558 	}
4559 
4560 	return num_segs;
4561 
4562 	/*
4563 	 * Do not free this frame, just do socket level accounting
4564 	 * so that this is not reused.
4565 	 */
4566 fail:
4567 	if (skb->sk)
4568 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4569 
4570 	return 0;
4571 }
4572 #endif
4573 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4574 
4575 #endif /* FEATURE_TSO */
4576 
4577 /**
4578  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
4579  *
4580  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
4581  *
4582  * Return: N/A
4583  */
4584 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4585 			  uint32_t *lo, uint32_t *hi)
4586 {
4587 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4588 		*lo = lower_32_bits(dmaaddr);
4589 		*hi = upper_32_bits(dmaaddr);
4590 	} else {
4591 		*lo = dmaaddr;
4592 		*hi = 0;
4593 	}
4594 }
4595 
4596 qdf_export_symbol(__qdf_dmaaddr_to_32s);
4597 
4598 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4599 {
4600 	qdf_nbuf_users_inc(&skb->users);
4601 	return skb;
4602 }
4603 qdf_export_symbol(__qdf_nbuf_inc_users);
4604 
4605 int __qdf_nbuf_get_users(struct sk_buff *skb)
4606 {
4607 	return qdf_nbuf_users_read(&skb->users);
4608 }
4609 qdf_export_symbol(__qdf_nbuf_get_users);
4610 
4611 /**
4612  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
4613  * @skb: sk_buff handle
4614  *
4615  * Return: none
4616  */
4617 
4618 void __qdf_nbuf_ref(struct sk_buff *skb)
4619 {
4620 	skb_get(skb);
4621 }
4622 qdf_export_symbol(__qdf_nbuf_ref);
4623 
4624 /**
4625  * __qdf_nbuf_shared() - Check whether the buffer is shared
4626  *  @skb: sk_buff buffer
4627  *
4628  *  Return: true if more than one person has a reference to this buffer.
4629  */
4630 int __qdf_nbuf_shared(struct sk_buff *skb)
4631 {
4632 	return skb_shared(skb);
4633 }
4634 qdf_export_symbol(__qdf_nbuf_shared);
4635 
4636 /**
4637  * __qdf_nbuf_dmamap_create() - create a DMA map.
4638  * @osdev: qdf device handle
4639  * @dmap: dma map handle
4640  *
4641  * This can later be used to map networking buffers. They :
4642  * - need space in adf_drv's software descriptor
4643  * - are typically created during adf_drv_create
4644  * - need to be created before any API(qdf_nbuf_map) that uses them
4645  *
4646  * Return: QDF STATUS
4647  */
4648 QDF_STATUS
4649 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4650 {
4651 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4652 	/*
4653 	 * driver can tell its SG capablity, it must be handled.
4654 	 * Bounce buffers if they are there
4655 	 */
4656 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4657 	if (!(*dmap))
4658 		error = QDF_STATUS_E_NOMEM;
4659 
4660 	return error;
4661 }
4662 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4663 /**
4664  * __qdf_nbuf_dmamap_destroy() - delete a dma map
4665  * @osdev: qdf device handle
4666  * @dmap: dma map handle
4667  *
4668  * Return: none
4669  */
4670 void
4671 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4672 {
4673 	kfree(dmap);
4674 }
4675 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4676 
4677 /**
4678  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
4679  * @osdev: os device
4680  * @skb: skb handle
4681  * @dir: dma direction
4682  * @nbytes: number of bytes to be mapped
4683  *
4684  * Return: QDF_STATUS
4685  */
4686 #ifdef QDF_OS_DEBUG
4687 QDF_STATUS
4688 __qdf_nbuf_map_nbytes(
4689 	qdf_device_t osdev,
4690 	struct sk_buff *skb,
4691 	qdf_dma_dir_t dir,
4692 	int nbytes)
4693 {
4694 	struct skb_shared_info  *sh = skb_shinfo(skb);
4695 
4696 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4697 
4698 	/*
4699 	 * Assume there's only a single fragment.
4700 	 * To support multiple fragments, it would be necessary to change
4701 	 * adf_nbuf_t to be a separate object that stores meta-info
4702 	 * (including the bus address for each fragment) and a pointer
4703 	 * to the underlying sk_buff.
4704 	 */
4705 	qdf_assert(sh->nr_frags == 0);
4706 
4707 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4708 }
4709 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4710 #else
4711 QDF_STATUS
4712 __qdf_nbuf_map_nbytes(
4713 	qdf_device_t osdev,
4714 	struct sk_buff *skb,
4715 	qdf_dma_dir_t dir,
4716 	int nbytes)
4717 {
4718 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4719 }
4720 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4721 #endif
4722 /**
4723  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
4724  * @osdev: OS device
4725  * @skb: skb handle
4726  * @dir: direction
4727  * @nbytes: number of bytes
4728  *
4729  * Return: none
4730  */
4731 void
4732 __qdf_nbuf_unmap_nbytes(
4733 	qdf_device_t osdev,
4734 	struct sk_buff *skb,
4735 	qdf_dma_dir_t dir,
4736 	int nbytes)
4737 {
4738 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4739 
4740 	/*
4741 	 * Assume there's a single fragment.
4742 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4743 	 */
4744 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4745 }
4746 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4747 
4748 /**
4749  * __qdf_nbuf_dma_map_info() - return the dma map info
4750  * @bmap: dma map
4751  * @sg: dma map info
4752  *
4753  * Return: none
4754  */
4755 void
4756 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4757 {
4758 	qdf_assert(bmap->mapped);
4759 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4760 
4761 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4762 			sizeof(struct __qdf_segment));
4763 	sg->nsegs = bmap->nsegs;
4764 }
4765 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4766 /**
4767  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
4768  *			specified by the index
4769  * @skb: sk buff
4770  * @sg: scatter/gather list of all the frags
4771  *
4772  * Return: none
4773  */
4774 #if defined(__QDF_SUPPORT_FRAG_MEM)
4775 void
4776 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4777 {
4778 	qdf_assert(skb);
4779 	sg->sg_segs[0].vaddr = skb->data;
4780 	sg->sg_segs[0].len   = skb->len;
4781 	sg->nsegs            = 1;
4782 
4783 	for (int i = 1; i <= sh->nr_frags; i++) {
4784 		skb_frag_t    *f        = &sh->frags[i - 1];
4785 
4786 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4787 			f->page_offset);
4788 		sg->sg_segs[i].len      = f->size;
4789 
4790 		qdf_assert(i < QDF_MAX_SGLIST);
4791 	}
4792 	sg->nsegs += i;
4793 
4794 }
4795 qdf_export_symbol(__qdf_nbuf_frag_info);
4796 #else
4797 #ifdef QDF_OS_DEBUG
4798 void
4799 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4800 {
4801 
4802 	struct skb_shared_info  *sh = skb_shinfo(skb);
4803 
4804 	qdf_assert(skb);
4805 	sg->sg_segs[0].vaddr = skb->data;
4806 	sg->sg_segs[0].len   = skb->len;
4807 	sg->nsegs            = 1;
4808 
4809 	qdf_assert(sh->nr_frags == 0);
4810 }
4811 qdf_export_symbol(__qdf_nbuf_frag_info);
4812 #else
4813 void
4814 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4815 {
4816 	sg->sg_segs[0].vaddr = skb->data;
4817 	sg->sg_segs[0].len   = skb->len;
4818 	sg->nsegs            = 1;
4819 }
4820 qdf_export_symbol(__qdf_nbuf_frag_info);
4821 #endif
4822 #endif
4823 /**
4824  * __qdf_nbuf_get_frag_size() - get frag size
4825  * @nbuf: sk buffer
4826  * @cur_frag: current frag
4827  *
4828  * Return: frag size
4829  */
4830 uint32_t
4831 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4832 {
4833 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4834 	const skb_frag_t *frag = sh->frags + cur_frag;
4835 
4836 	return skb_frag_size(frag);
4837 }
4838 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4839 
4840 /**
4841  * __qdf_nbuf_frag_map() - dma map frag
4842  * @osdev: os device
4843  * @nbuf: sk buff
4844  * @offset: offset
4845  * @dir: direction
4846  * @cur_frag: current fragment
4847  *
4848  * Return: QDF status
4849  */
4850 #ifdef A_SIMOS_DEVHOST
4851 QDF_STATUS __qdf_nbuf_frag_map(
4852 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4853 	int offset, qdf_dma_dir_t dir, int cur_frag)
4854 {
4855 	int32_t paddr, frag_len;
4856 
4857 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4858 	return QDF_STATUS_SUCCESS;
4859 }
4860 qdf_export_symbol(__qdf_nbuf_frag_map);
4861 #else
4862 QDF_STATUS __qdf_nbuf_frag_map(
4863 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4864 	int offset, qdf_dma_dir_t dir, int cur_frag)
4865 {
4866 	dma_addr_t paddr, frag_len;
4867 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4868 	const skb_frag_t *frag = sh->frags + cur_frag;
4869 
4870 	frag_len = skb_frag_size(frag);
4871 
4872 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4873 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4874 					__qdf_dma_dir_to_os(dir));
4875 	return dma_mapping_error(osdev->dev, paddr) ?
4876 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4877 }
4878 qdf_export_symbol(__qdf_nbuf_frag_map);
4879 #endif
4880 /**
4881  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
4882  * @dmap: dma map
4883  * @cb: callback
4884  * @arg: argument
4885  *
4886  * Return: none
4887  */
4888 void
4889 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4890 {
4891 	return;
4892 }
4893 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4894 
4895 
4896 /**
4897  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4898  * @osdev: os device
4899  * @buf: sk buff
4900  * @dir: direction
4901  *
4902  * Return: none
4903  */
4904 #if defined(A_SIMOS_DEVHOST)
4905 static void __qdf_nbuf_sync_single_for_cpu(
4906 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4907 {
4908 	return;
4909 }
4910 #else
4911 static void __qdf_nbuf_sync_single_for_cpu(
4912 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4913 {
4914 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4915 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4916 		return;
4917 	}
4918 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4919 		skb_end_offset(buf) - skb_headroom(buf),
4920 		__qdf_dma_dir_to_os(dir));
4921 }
4922 #endif
4923 /**
4924  * __qdf_nbuf_sync_for_cpu() - nbuf sync
4925  * @osdev: os device
4926  * @skb: sk buff
4927  * @dir: direction
4928  *
4929  * Return: none
4930  */
4931 void
4932 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4933 	struct sk_buff *skb, qdf_dma_dir_t dir)
4934 {
4935 	qdf_assert(
4936 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4937 
4938 	/*
4939 	 * Assume there's a single fragment.
4940 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4941 	 */
4942 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4943 }
4944 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4945 
4946 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4947 /**
4948  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4949  * @rx_status: Pointer to rx_status.
4950  * @rtap_buf: Buf to which VHT info has to be updated.
4951  * @rtap_len: Current length of radiotap buffer
4952  *
4953  * Return: Length of radiotap after VHT flags updated.
4954  */
4955 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4956 					struct mon_rx_status *rx_status,
4957 					int8_t *rtap_buf,
4958 					uint32_t rtap_len)
4959 {
4960 	uint16_t vht_flags = 0;
4961 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4962 
4963 	rtap_len = qdf_align(rtap_len, 2);
4964 
4965 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4966 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4967 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4968 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4969 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4970 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4971 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4972 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4973 	rtap_len += 2;
4974 
4975 	rtap_buf[rtap_len] |=
4976 		(rx_status->is_stbc ?
4977 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4978 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4979 		(rx_status->ldpc ?
4980 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4981 		(rx_status->beamformed ?
4982 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4983 	rtap_len += 1;
4984 
4985 	if (!rx_user_status) {
4986 		switch (rx_status->vht_flag_values2) {
4987 		case IEEE80211_RADIOTAP_VHT_BW_20:
4988 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4989 			break;
4990 		case IEEE80211_RADIOTAP_VHT_BW_40:
4991 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4992 			break;
4993 		case IEEE80211_RADIOTAP_VHT_BW_80:
4994 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4995 			break;
4996 		case IEEE80211_RADIOTAP_VHT_BW_160:
4997 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4998 			break;
4999 		}
5000 		rtap_len += 1;
5001 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
5002 		rtap_len += 1;
5003 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
5004 		rtap_len += 1;
5005 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
5006 		rtap_len += 1;
5007 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
5008 		rtap_len += 1;
5009 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
5010 		rtap_len += 1;
5011 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
5012 		rtap_len += 1;
5013 		put_unaligned_le16(rx_status->vht_flag_values6,
5014 				   &rtap_buf[rtap_len]);
5015 		rtap_len += 2;
5016 	} else {
5017 		switch (rx_user_status->vht_flag_values2) {
5018 		case IEEE80211_RADIOTAP_VHT_BW_20:
5019 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5020 			break;
5021 		case IEEE80211_RADIOTAP_VHT_BW_40:
5022 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5023 			break;
5024 		case IEEE80211_RADIOTAP_VHT_BW_80:
5025 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5026 			break;
5027 		case IEEE80211_RADIOTAP_VHT_BW_160:
5028 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5029 			break;
5030 		}
5031 		rtap_len += 1;
5032 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
5033 		rtap_len += 1;
5034 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
5035 		rtap_len += 1;
5036 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
5037 		rtap_len += 1;
5038 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
5039 		rtap_len += 1;
5040 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
5041 		rtap_len += 1;
5042 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
5043 		rtap_len += 1;
5044 		put_unaligned_le16(rx_user_status->vht_flag_values6,
5045 				   &rtap_buf[rtap_len]);
5046 		rtap_len += 2;
5047 	}
5048 
5049 	return rtap_len;
5050 }
5051 
5052 /**
5053  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
5054  * @rx_status: Pointer to rx_status.
5055  * @rtap_buf: buffer to which radiotap has to be updated
5056  * @rtap_len: radiotap length
5057  *
5058  * API update high-efficiency (11ax) fields in the radiotap header
5059  *
5060  * Return: length of rtap_len updated.
5061  */
5062 static unsigned int
5063 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5064 				     int8_t *rtap_buf, uint32_t rtap_len)
5065 {
5066 	/*
5067 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
5068 	 * Enable all "known" HE radiotap flags for now
5069 	 */
5070 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5071 
5072 	rtap_len = qdf_align(rtap_len, 2);
5073 
5074 	if (!rx_user_status) {
5075 		put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5076 		rtap_len += 2;
5077 
5078 		put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5079 		rtap_len += 2;
5080 
5081 		put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5082 		rtap_len += 2;
5083 
5084 		put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5085 		rtap_len += 2;
5086 
5087 		put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5088 		rtap_len += 2;
5089 
5090 		put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5091 		rtap_len += 2;
5092 		qdf_rl_debug("he data %x %x %x %x %x %x",
5093 			     rx_status->he_data1,
5094 			     rx_status->he_data2, rx_status->he_data3,
5095 			     rx_status->he_data4, rx_status->he_data5,
5096 			     rx_status->he_data6);
5097 	} else {
5098 		put_unaligned_le16(rx_user_status->he_data1,
5099 				   &rtap_buf[rtap_len]);
5100 		rtap_len += 2;
5101 
5102 		put_unaligned_le16(rx_user_status->he_data2,
5103 				   &rtap_buf[rtap_len]);
5104 		rtap_len += 2;
5105 
5106 		put_unaligned_le16(rx_user_status->he_data3,
5107 				   &rtap_buf[rtap_len]);
5108 		rtap_len += 2;
5109 
5110 		put_unaligned_le16(rx_user_status->he_data4,
5111 				   &rtap_buf[rtap_len]);
5112 		rtap_len += 2;
5113 
5114 		put_unaligned_le16(rx_user_status->he_data5,
5115 				   &rtap_buf[rtap_len]);
5116 		rtap_len += 2;
5117 
5118 		put_unaligned_le16(rx_user_status->he_data6,
5119 				   &rtap_buf[rtap_len]);
5120 		rtap_len += 2;
5121 		qdf_rl_debug("he data %x %x %x %x %x %x",
5122 			     rx_user_status->he_data1,
5123 			     rx_user_status->he_data2, rx_user_status->he_data3,
5124 			     rx_user_status->he_data4, rx_user_status->he_data5,
5125 			     rx_user_status->he_data6);
5126 	}
5127 
5128 	return rtap_len;
5129 }
5130 
5131 
5132 /**
5133  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
5134  * @rx_status: Pointer to rx_status.
5135  * @rtap_buf: buffer to which radiotap has to be updated
5136  * @rtap_len: radiotap length
5137  *
5138  * API update HE-MU fields in the radiotap header
5139  *
5140  * Return: length of rtap_len updated.
5141  */
5142 static unsigned int
5143 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
5144 				     int8_t *rtap_buf, uint32_t rtap_len)
5145 {
5146 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5147 
5148 	rtap_len = qdf_align(rtap_len, 2);
5149 
5150 	/*
5151 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
5152 	 * Enable all "known" he-mu radiotap flags for now
5153 	 */
5154 
5155 	if (!rx_user_status) {
5156 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5157 		rtap_len += 2;
5158 
5159 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5160 		rtap_len += 2;
5161 
5162 		rtap_buf[rtap_len] = rx_status->he_RU[0];
5163 		rtap_len += 1;
5164 
5165 		rtap_buf[rtap_len] = rx_status->he_RU[1];
5166 		rtap_len += 1;
5167 
5168 		rtap_buf[rtap_len] = rx_status->he_RU[2];
5169 		rtap_len += 1;
5170 
5171 		rtap_buf[rtap_len] = rx_status->he_RU[3];
5172 		rtap_len += 1;
5173 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5174 			  rx_status->he_flags1,
5175 			  rx_status->he_flags2, rx_status->he_RU[0],
5176 			  rx_status->he_RU[1], rx_status->he_RU[2],
5177 			  rx_status->he_RU[3]);
5178 	} else {
5179 		put_unaligned_le16(rx_user_status->he_flags1,
5180 				   &rtap_buf[rtap_len]);
5181 		rtap_len += 2;
5182 
5183 		put_unaligned_le16(rx_user_status->he_flags2,
5184 				   &rtap_buf[rtap_len]);
5185 		rtap_len += 2;
5186 
5187 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
5188 		rtap_len += 1;
5189 
5190 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
5191 		rtap_len += 1;
5192 
5193 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
5194 		rtap_len += 1;
5195 
5196 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
5197 		rtap_len += 1;
5198 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5199 			  rx_user_status->he_flags1,
5200 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
5201 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
5202 			  rx_user_status->he_RU[3]);
5203 	}
5204 
5205 	return rtap_len;
5206 }
5207 
5208 /**
5209  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
5210  * @rx_status: Pointer to rx_status.
5211  * @rtap_buf: buffer to which radiotap has to be updated
5212  * @rtap_len: radiotap length
5213  *
5214  * API update he-mu-other fields in the radiotap header
5215  *
5216  * Return: length of rtap_len updated.
5217  */
5218 static unsigned int
5219 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
5220 				     int8_t *rtap_buf, uint32_t rtap_len)
5221 {
5222 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5223 
5224 	rtap_len = qdf_align(rtap_len, 2);
5225 
5226 	/*
5227 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
5228 	 * Enable all "known" he-mu-other radiotap flags for now
5229 	 */
5230 	if (!rx_user_status) {
5231 		put_unaligned_le16(rx_status->he_per_user_1,
5232 				   &rtap_buf[rtap_len]);
5233 		rtap_len += 2;
5234 
5235 		put_unaligned_le16(rx_status->he_per_user_2,
5236 				   &rtap_buf[rtap_len]);
5237 		rtap_len += 2;
5238 
5239 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
5240 		rtap_len += 1;
5241 
5242 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
5243 		rtap_len += 1;
5244 		qdf_debug("he_per_user %x %x pos %x knwn %x",
5245 			  rx_status->he_per_user_1,
5246 			  rx_status->he_per_user_2,
5247 			  rx_status->he_per_user_position,
5248 			  rx_status->he_per_user_known);
5249 	} else {
5250 		put_unaligned_le16(rx_user_status->he_per_user_1,
5251 				   &rtap_buf[rtap_len]);
5252 		rtap_len += 2;
5253 
5254 		put_unaligned_le16(rx_user_status->he_per_user_2,
5255 				   &rtap_buf[rtap_len]);
5256 		rtap_len += 2;
5257 
5258 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
5259 		rtap_len += 1;
5260 
5261 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
5262 		rtap_len += 1;
5263 		qdf_debug("he_per_user %x %x pos %x knwn %x",
5264 			  rx_user_status->he_per_user_1,
5265 			  rx_user_status->he_per_user_2,
5266 			  rx_user_status->he_per_user_position,
5267 			  rx_user_status->he_per_user_known);
5268 	}
5269 
5270 	return rtap_len;
5271 }
5272 
5273 /**
5274  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
5275  *						from rx_status
5276  * @rx_status: Pointer to rx_status.
5277  * @rtap_buf: buffer to which radiotap has to be updated
5278  * @rtap_len: radiotap length
5279  *
5280  * API update Extra High Throughput (11be) fields in the radiotap header
5281  *
5282  * Return: length of rtap_len updated.
5283  */
5284 static unsigned int
5285 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
5286 				    int8_t *rtap_buf, uint32_t rtap_len)
5287 {
5288 	/*
5289 	 * IEEE80211_RADIOTAP_USIG:
5290 	 *		u32, u32, u32
5291 	 */
5292 	rtap_len = qdf_align(rtap_len, 4);
5293 
5294 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
5295 	rtap_len += 4;
5296 
5297 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
5298 	rtap_len += 4;
5299 
5300 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
5301 	rtap_len += 4;
5302 
5303 	qdf_rl_debug("U-SIG data %x %x %x",
5304 		     rx_status->usig_common, rx_status->usig_value,
5305 		     rx_status->usig_mask);
5306 
5307 	return rtap_len;
5308 }
5309 
5310 /**
5311  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
5312  *					from rx_status
5313  * @rx_status: Pointer to rx_status.
5314  * @rtap_buf: buffer to which radiotap has to be updated
5315  * @rtap_len: radiotap length
5316  *
5317  * API update Extra High Throughput (11be) fields in the radiotap header
5318  *
5319  * Return: length of rtap_len updated.
5320  */
5321 static unsigned int
5322 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5323 				   int8_t *rtap_buf, uint32_t rtap_len)
5324 {
5325 	uint32_t user;
5326 
5327 	/*
5328 	 * IEEE80211_RADIOTAP_EHT:
5329 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5330 	 */
5331 	rtap_len = qdf_align(rtap_len, 4);
5332 
5333 	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5334 	rtap_len += 4;
5335 
5336 	put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5337 	rtap_len += 4;
5338 
5339 	put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5340 	rtap_len += 4;
5341 
5342 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5343 	rtap_len += 4;
5344 
5345 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5346 	rtap_len += 4;
5347 
5348 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5349 	rtap_len += 4;
5350 
5351 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5352 	rtap_len += 4;
5353 
5354 	for (user = 0; user < rx_status->num_eht_user_info_valid; user++) {
5355 		put_unaligned_le32(rx_status->eht_user_info[user],
5356 				   &rtap_buf[rtap_len]);
5357 		rtap_len += 4;
5358 	}
5359 
5360 	qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5361 		     rx_status->eht_known, rx_status->eht_data[0],
5362 		     rx_status->eht_data[1], rx_status->eht_data[2],
5363 		     rx_status->eht_data[3], rx_status->eht_data[4],
5364 		     rx_status->eht_data[5]);
5365 
5366 	return rtap_len;
5367 }
5368 
5369 #define IEEE80211_RADIOTAP_TX_STATUS 0
5370 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5371 #define IEEE80211_RADIOTAP_EXTENSION2 2
5372 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5373 
5374 /**
5375  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5376  * @rx_status: Pointer to rx_status.
5377  * @rtap_buf: Buf to which AMPDU info has to be updated.
5378  * @rtap_len: Current length of radiotap buffer
5379  *
5380  * Return: Length of radiotap after AMPDU flags updated.
5381  */
5382 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5383 					struct mon_rx_status *rx_status,
5384 					uint8_t *rtap_buf,
5385 					uint32_t rtap_len)
5386 {
5387 	/*
5388 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5389 	 * First 32 bits of AMPDU represents the reference number
5390 	 */
5391 
5392 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
5393 	uint16_t ampdu_flags = 0;
5394 	uint16_t ampdu_reserved_flags = 0;
5395 
5396 	rtap_len = qdf_align(rtap_len, 4);
5397 
5398 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5399 	rtap_len += 4;
5400 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5401 	rtap_len += 2;
5402 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5403 	rtap_len += 2;
5404 
5405 	return rtap_len;
5406 }
5407 
5408 #ifdef DP_MON_RSSI_IN_DBM
5409 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5410 (rx_status->rssi_comb)
5411 #else
5412 #ifdef QCA_RSSI_DB2DBM
5413 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5414 (((rx_status)->rssi_dbm_conv_support) ? \
5415 ((rx_status)->rssi_comb + (rx_status)->min_nf_dbm +\
5416 (rx_status)->rssi_temp_offset) : \
5417 ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5418 #else
5419 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5420 (rx_status->rssi_comb + rx_status->chan_noise_floor)
5421 #endif
5422 #endif
5423 
5424 /**
5425  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5426  * @rx_status: Pointer to rx_status.
5427  * @rtap_buf: Buf to which tx info has to be updated.
5428  * @rtap_len: Current length of radiotap buffer
5429  *
5430  * Return: Length of radiotap after tx flags updated.
5431  */
5432 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5433 						struct mon_rx_status *rx_status,
5434 						uint8_t *rtap_buf,
5435 						uint32_t rtap_len)
5436 {
5437 	/*
5438 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
5439 	 */
5440 
5441 	uint16_t tx_flags = 0;
5442 
5443 	rtap_len = qdf_align(rtap_len, 2);
5444 
5445 	switch (rx_status->tx_status) {
5446 	case RADIOTAP_TX_STATUS_FAIL:
5447 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5448 		break;
5449 	case RADIOTAP_TX_STATUS_NOACK:
5450 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5451 		break;
5452 	}
5453 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5454 	rtap_len += 2;
5455 
5456 	return rtap_len;
5457 }
5458 
5459 /**
5460  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
5461  * @rx_status: Pointer to rx_status.
5462  * @nbuf:      nbuf pointer to which radiotap has to be updated
5463  * @headroom_sz: Available headroom size.
5464  *
5465  * Return: length of rtap_len updated.
5466  */
5467 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5468 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5469 {
5470 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5471 	struct ieee80211_radiotap_header *rthdr =
5472 		(struct ieee80211_radiotap_header *)rtap_buf;
5473 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5474 	uint32_t rtap_len = rtap_hdr_len;
5475 	uint8_t length = rtap_len;
5476 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5477 	struct qdf_radiotap_ext2 *rtap_ext2;
5478 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5479 
5480 	/* per user info */
5481 	qdf_le32_t *it_present;
5482 	uint32_t it_present_val;
5483 	bool radiotap_ext1_hdr_present = false;
5484 
5485 	it_present = &rthdr->it_present;
5486 
5487 	/* Adding Extended Header space */
5488 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5489 	    rx_status->usig_flags || rx_status->eht_flags) {
5490 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5491 		rtap_len = rtap_hdr_len;
5492 		radiotap_ext1_hdr_present = true;
5493 	}
5494 
5495 	length = rtap_len;
5496 
5497 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5498 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5499 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5500 	rtap_len += 8;
5501 
5502 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5503 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5504 
5505 	if (rx_status->rs_fcs_err)
5506 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5507 
5508 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5509 	rtap_len += 1;
5510 
5511 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5512 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5513 	    !rx_status->he_flags) {
5514 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5515 		rtap_buf[rtap_len] = rx_status->rate;
5516 	} else
5517 		rtap_buf[rtap_len] = 0;
5518 	rtap_len += 1;
5519 
5520 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5521 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5522 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5523 	rtap_len += 2;
5524 	/* Channel flags. */
5525 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5526 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5527 	else
5528 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5529 	if (rx_status->cck_flag)
5530 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5531 	if (rx_status->ofdm_flag)
5532 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5533 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5534 	rtap_len += 2;
5535 
5536 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5537 	 *					(dBm)
5538 	 */
5539 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5540 	/*
5541 	 * rssi_comb is int dB, need to convert it to dBm.
5542 	 * normalize value to noise floor of -96 dBm
5543 	 */
5544 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5545 	rtap_len += 1;
5546 
5547 	/* RX signal noise floor */
5548 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5549 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5550 	rtap_len += 1;
5551 
5552 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
5553 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5554 	rtap_buf[rtap_len] = rx_status->nr_ant;
5555 	rtap_len += 1;
5556 
5557 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5558 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5559 		return 0;
5560 	}
5561 
5562 	/* update tx flags for pkt capture*/
5563 	if (rx_status->add_rtap_ext) {
5564 		rthdr->it_present |=
5565 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5566 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5567 							     rtap_buf,
5568 							     rtap_len);
5569 
5570 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5571 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5572 			return 0;
5573 		}
5574 	}
5575 
5576 	if (rx_status->ht_flags) {
5577 		length = rtap_len;
5578 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5579 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5580 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5581 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5582 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
5583 		rtap_len += 1;
5584 
5585 		if (rx_status->sgi)
5586 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5587 		if (rx_status->bw)
5588 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5589 		else
5590 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5591 		rtap_len += 1;
5592 
5593 		rtap_buf[rtap_len] = rx_status->ht_mcs;
5594 		rtap_len += 1;
5595 
5596 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5597 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5598 			return 0;
5599 		}
5600 	}
5601 
5602 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5603 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5604 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5605 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5606 								rtap_buf,
5607 								rtap_len);
5608 	}
5609 
5610 	if (rx_status->vht_flags) {
5611 		length = rtap_len;
5612 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5613 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5614 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5615 								rtap_buf,
5616 								rtap_len);
5617 
5618 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5619 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5620 			return 0;
5621 		}
5622 	}
5623 
5624 	if (rx_status->he_flags) {
5625 		length = rtap_len;
5626 		/* IEEE80211_RADIOTAP_HE */
5627 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5628 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5629 								rtap_buf,
5630 								rtap_len);
5631 
5632 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5633 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5634 			return 0;
5635 		}
5636 	}
5637 
5638 	if (rx_status->he_mu_flags) {
5639 		length = rtap_len;
5640 		/* IEEE80211_RADIOTAP_HE-MU */
5641 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5642 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5643 								rtap_buf,
5644 								rtap_len);
5645 
5646 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5647 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5648 			return 0;
5649 		}
5650 	}
5651 
5652 	if (rx_status->he_mu_other_flags) {
5653 		length = rtap_len;
5654 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
5655 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5656 		rtap_len =
5657 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5658 								rtap_buf,
5659 								rtap_len);
5660 
5661 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5662 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5663 			return 0;
5664 		}
5665 	}
5666 
5667 	rtap_len = qdf_align(rtap_len, 2);
5668 	/*
5669 	 * Radiotap Vendor Namespace
5670 	 */
5671 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5672 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5673 					(rtap_buf + rtap_len);
5674 	/*
5675 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5676 	 */
5677 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5678 	/*
5679 	 * Name space selector = 0
5680 	 * We only will have one namespace for now
5681 	 */
5682 	radiotap_vendor_ns_ath->hdr.selector = 0;
5683 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5684 					sizeof(*radiotap_vendor_ns_ath) -
5685 					sizeof(radiotap_vendor_ns_ath->hdr));
5686 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5687 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5688 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5689 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
5690 				cpu_to_le32(rx_status->ppdu_timestamp);
5691 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
5692 
5693 	/* Move to next it_present */
5694 	if (radiotap_ext1_hdr_present) {
5695 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5696 		put_unaligned_le32(it_present_val, it_present);
5697 		it_present_val = 0;
5698 		it_present++;
5699 	}
5700 
5701 	/* Add Extension to Radiotap Header & corresponding data */
5702 	if (rx_status->add_rtap_ext) {
5703 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5704 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5705 
5706 		rtap_buf[rtap_len] = rx_status->tx_status;
5707 		rtap_len += 1;
5708 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5709 		rtap_len += 1;
5710 	}
5711 
5712 	/* Add Extension2 to Radiotap Header */
5713 	if (rx_status->add_rtap_ext2) {
5714 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5715 
5716 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5717 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
5718 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5719 		if (!rx_user_status) {
5720 			rtap_ext2->tid = rx_status->tid;
5721 			rtap_ext2->start_seq = rx_status->start_seq;
5722 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5723 				     rx_status->ba_bitmap,
5724 				     8 * (sizeof(uint32_t)));
5725 		} else {
5726 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5727 
5728 			/* set default bitmap sz if not set */
5729 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5730 			rtap_ext2->tid = rx_user_status->tid;
5731 			rtap_ext2->start_seq = rx_user_status->start_seq;
5732 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5733 				     rx_user_status->ba_bitmap,
5734 				     ba_bitmap_sz * (sizeof(uint32_t)));
5735 		}
5736 
5737 		rtap_len += sizeof(*rtap_ext2);
5738 	}
5739 
5740 	if (rx_status->usig_flags) {
5741 		length = rtap_len;
5742 		/* IEEE80211_RADIOTAP_USIG */
5743 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5744 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5745 							       rtap_buf,
5746 							       rtap_len);
5747 
5748 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5749 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5750 			return 0;
5751 		}
5752 	}
5753 
5754 	if (rx_status->eht_flags) {
5755 		length = rtap_len;
5756 		/* IEEE80211_RADIOTAP_EHT */
5757 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5758 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5759 							      rtap_buf,
5760 							      rtap_len);
5761 
5762 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5763 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5764 			return 0;
5765 		}
5766 	}
5767 
5768 	put_unaligned_le32(it_present_val, it_present);
5769 	rthdr->it_len = cpu_to_le16(rtap_len);
5770 
5771 	if (headroom_sz < rtap_len) {
5772 		qdf_debug("DEBUG: Not enough space to update radiotap");
5773 		return 0;
5774 	}
5775 
5776 	qdf_nbuf_push_head(nbuf, rtap_len);
5777 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5778 	return rtap_len;
5779 }
5780 #else
5781 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5782 					struct mon_rx_status *rx_status,
5783 					int8_t *rtap_buf,
5784 					uint32_t rtap_len)
5785 {
5786 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5787 	return 0;
5788 }
5789 
5790 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5791 				      int8_t *rtap_buf, uint32_t rtap_len)
5792 {
5793 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5794 	return 0;
5795 }
5796 
5797 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5798 					struct mon_rx_status *rx_status,
5799 					uint8_t *rtap_buf,
5800 					uint32_t rtap_len)
5801 {
5802 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5803 	return 0;
5804 }
5805 
5806 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5807 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5808 {
5809 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5810 	return 0;
5811 }
5812 #endif
5813 qdf_export_symbol(qdf_nbuf_update_radiotap);
5814 
5815 /**
5816  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
5817  * @cb_func_ptr: function pointer to the nbuf free callback
5818  *
5819  * This function registers a callback function for nbuf free.
5820  *
5821  * Return: none
5822  */
5823 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5824 {
5825 	nbuf_free_cb = cb_func_ptr;
5826 }
5827 
5828 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5829 
5830 /**
5831  * qdf_nbuf_classify_pkt() - classify packet
5832  * @skb - sk buff
5833  *
5834  * Return: none
5835  */
5836 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5837 {
5838 	struct ethhdr *eh = (struct ethhdr *)skb->data;
5839 
5840 	/* check destination mac address is broadcast/multicast */
5841 	if (is_broadcast_ether_addr((uint8_t *)eh))
5842 		QDF_NBUF_CB_SET_BCAST(skb);
5843 	else if (is_multicast_ether_addr((uint8_t *)eh))
5844 		QDF_NBUF_CB_SET_MCAST(skb);
5845 
5846 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5847 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5848 			QDF_NBUF_CB_PACKET_TYPE_ARP;
5849 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5850 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5851 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
5852 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5853 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5854 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5855 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5856 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5857 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
5858 }
5859 qdf_export_symbol(qdf_nbuf_classify_pkt);
5860 
5861 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5862 {
5863 	qdf_nbuf_users_set(&nbuf->users, 1);
5864 	nbuf->data = nbuf->head + NET_SKB_PAD;
5865 	skb_reset_tail_pointer(nbuf);
5866 }
5867 qdf_export_symbol(__qdf_nbuf_init);
5868 
5869 #ifdef WLAN_FEATURE_FASTPATH
5870 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5871 {
5872 	qdf_nbuf_users_set(&nbuf->users, 1);
5873 	nbuf->data = nbuf->head + NET_SKB_PAD;
5874 	skb_reset_tail_pointer(nbuf);
5875 }
5876 qdf_export_symbol(qdf_nbuf_init_fast);
5877 #endif /* WLAN_FEATURE_FASTPATH */
5878 
5879 
5880 #ifdef QDF_NBUF_GLOBAL_COUNT
5881 /**
5882  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
5883  *
5884  * Return void
5885  */
5886 void __qdf_nbuf_mod_init(void)
5887 {
5888 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5889 	qdf_atomic_init(&nbuf_count);
5890 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5891 }
5892 
5893 /**
5894  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
5895  *
5896  * Return void
5897  */
5898 void __qdf_nbuf_mod_exit(void)
5899 {
5900 }
5901 #endif
5902 
5903 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
5904 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5905 					    int offset)
5906 {
5907 	unsigned int frag_offset;
5908 	skb_frag_t *frag;
5909 
5910 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5911 		return QDF_STATUS_E_FAILURE;
5912 
5913 	frag = &skb_shinfo(nbuf)->frags[idx];
5914 	frag_offset = skb_frag_off(frag);
5915 
5916 	frag_offset += offset;
5917 	skb_frag_off_set(frag, frag_offset);
5918 
5919 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5920 
5921 	return QDF_STATUS_SUCCESS;
5922 }
5923 
5924 #else
5925 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5926 					    int offset)
5927 {
5928 	uint16_t frag_offset;
5929 	skb_frag_t *frag;
5930 
5931 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5932 		return QDF_STATUS_E_FAILURE;
5933 
5934 	frag = &skb_shinfo(nbuf)->frags[idx];
5935 	frag_offset = frag->page_offset;
5936 
5937 	frag_offset += offset;
5938 	frag->page_offset = frag_offset;
5939 
5940 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5941 
5942 	return QDF_STATUS_SUCCESS;
5943 }
5944 #endif
5945 
5946 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5947 
5948 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
5949 			    uint16_t idx,
5950 			    uint16_t truesize)
5951 {
5952 	struct page *page;
5953 	uint16_t frag_len;
5954 
5955 	page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
5956 
5957 	if (qdf_unlikely(!page))
5958 		return;
5959 
5960 	frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
5961 	put_page(page);
5962 	nbuf->len -= frag_len;
5963 	nbuf->data_len -= frag_len;
5964 	nbuf->truesize -= truesize;
5965 	skb_shinfo(nbuf)->nr_frags--;
5966 }
5967 
5968 qdf_export_symbol(__qdf_nbuf_remove_frag);
5969 
5970 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5971 			    int offset, int frag_len,
5972 			    unsigned int truesize, bool take_frag_ref)
5973 {
5974 	struct page *page;
5975 	int frag_offset;
5976 	uint8_t nr_frag;
5977 
5978 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5979 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5980 
5981 	page = virt_to_head_page(buf);
5982 	frag_offset = buf - page_address(page);
5983 
5984 	skb_add_rx_frag(nbuf, nr_frag, page,
5985 			(frag_offset + offset),
5986 			frag_len, truesize);
5987 
5988 	if (unlikely(take_frag_ref)) {
5989 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5990 		skb_frag_ref(nbuf, nr_frag);
5991 	}
5992 }
5993 
5994 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5995 
5996 void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
5997 {
5998 	struct page *page;
5999 	skb_frag_t frag = {0};
6000 
6001 	page = virt_to_head_page(buf);
6002 	__skb_frag_set_page(&frag, page);
6003 
6004 	/*
6005 	 * since __skb_frag_ref() just use page to increase ref
6006 	 * we just decode page alone
6007 	 */
6008 	qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
6009 	__skb_frag_ref(&frag);
6010 }
6011 
6012 qdf_export_symbol(__qdf_nbuf_ref_frag);
6013 
6014 #ifdef NBUF_FRAG_MEMORY_DEBUG
6015 
6016 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
6017 						int offset, const char *func,
6018 						uint32_t line)
6019 {
6020 	QDF_STATUS result;
6021 	qdf_frag_t p_fragp, n_fragp;
6022 
6023 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
6024 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
6025 
6026 	if (qdf_likely(is_initial_mem_debug_disabled))
6027 		return result;
6028 
6029 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
6030 
6031 	/*
6032 	 * Update frag address in frag debug tracker
6033 	 * when frag offset is successfully changed in skb
6034 	 */
6035 	if (result == QDF_STATUS_SUCCESS)
6036 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
6037 
6038 	return result;
6039 }
6040 
6041 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
6042 
6043 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
6044 				int offset, int frag_len,
6045 				unsigned int truesize, bool take_frag_ref,
6046 				const char *func, uint32_t line)
6047 {
6048 	qdf_frag_t fragp;
6049 	uint32_t num_nr_frags;
6050 
6051 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
6052 			       frag_len, truesize, take_frag_ref);
6053 
6054 	if (qdf_likely(is_initial_mem_debug_disabled))
6055 		return;
6056 
6057 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
6058 
6059 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6060 
6061 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
6062 
6063 	/* Update frag address in frag debug tracking table */
6064 	if (fragp != buf)
6065 		qdf_frag_debug_update_addr(buf, fragp, func, line);
6066 
6067 	/* Update frag refcount in frag debug tracking table */
6068 	qdf_frag_debug_refcount_inc(fragp, func, line);
6069 }
6070 
6071 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
6072 
6073 /**
6074  * qdf_nbuf_ref_frag_debug() - get frag reference
6075  * @buf: Frag pointer needs to be taken reference.
6076  *
6077  * return: void
6078  */
6079 void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
6080 {
6081 	__qdf_nbuf_ref_frag(buf);
6082 
6083 	if (qdf_likely(is_initial_mem_debug_disabled))
6084 		return;
6085 
6086 	/* Update frag refcount in frag debug tracking table */
6087 	qdf_frag_debug_refcount_inc(buf, func, line);
6088 }
6089 
6090 qdf_export_symbol(qdf_nbuf_ref_frag_debug);
6091 
6092 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
6093 				    uint32_t line)
6094 {
6095 	uint32_t num_nr_frags;
6096 	uint32_t idx = 0;
6097 	qdf_nbuf_t ext_list;
6098 	qdf_frag_t p_frag;
6099 
6100 	if (qdf_likely(is_initial_mem_debug_disabled))
6101 		return;
6102 
6103 	if (qdf_unlikely(!buf))
6104 		return;
6105 
6106 	/* Take care to update the refcount in the debug entries for frags */
6107 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6108 
6109 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6110 
6111 	while (idx < num_nr_frags) {
6112 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6113 		if (qdf_likely(p_frag))
6114 			qdf_frag_debug_refcount_inc(p_frag, func, line);
6115 		idx++;
6116 	}
6117 
6118 	/**
6119 	 * Take care to update the refcount in the debug entries for the
6120 	 * frags attached to frag_list
6121 	 */
6122 	ext_list = qdf_nbuf_get_ext_list(buf);
6123 	while (ext_list) {
6124 		idx = 0;
6125 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6126 
6127 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6128 
6129 		while (idx < num_nr_frags) {
6130 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6131 			if (qdf_likely(p_frag))
6132 				qdf_frag_debug_refcount_inc(p_frag, func, line);
6133 			idx++;
6134 		}
6135 		ext_list = qdf_nbuf_queue_next(ext_list);
6136 	}
6137 }
6138 
6139 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
6140 
6141 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
6142 				    uint32_t line)
6143 {
6144 	uint32_t num_nr_frags;
6145 	qdf_nbuf_t ext_list;
6146 	uint32_t idx = 0;
6147 	qdf_frag_t p_frag;
6148 
6149 	if (qdf_likely(is_initial_mem_debug_disabled))
6150 		return;
6151 
6152 	if (qdf_unlikely(!buf))
6153 		return;
6154 
6155 	/**
6156 	 * Decrement refcount for frag debug nodes only when last user
6157 	 * of nbuf calls this API so as to avoid decrementing refcount
6158 	 * on every call expect the last one in case where nbuf has multiple
6159 	 * users
6160 	 */
6161 	if (qdf_nbuf_get_users(buf) > 1)
6162 		return;
6163 
6164 	/* Take care to update the refcount in the debug entries for frags */
6165 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6166 
6167 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6168 
6169 	while (idx < num_nr_frags) {
6170 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6171 		if (qdf_likely(p_frag))
6172 			qdf_frag_debug_refcount_dec(p_frag, func, line);
6173 		idx++;
6174 	}
6175 
6176 	/* Take care to update debug entries for frags attached to frag_list */
6177 	ext_list = qdf_nbuf_get_ext_list(buf);
6178 	while (ext_list) {
6179 		if (qdf_nbuf_get_users(ext_list) == 1) {
6180 			idx = 0;
6181 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6182 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6183 			while (idx < num_nr_frags) {
6184 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6185 				if (qdf_likely(p_frag))
6186 					qdf_frag_debug_refcount_dec(p_frag,
6187 								    func, line);
6188 				idx++;
6189 			}
6190 		}
6191 		ext_list = qdf_nbuf_queue_next(ext_list);
6192 	}
6193 }
6194 
6195 qdf_export_symbol(qdf_net_buf_debug_release_frag);
6196 
6197 /**
6198  * qdf_nbuf_remove_frag_debug - Remove frag from nbuf
6199  * @nbuf: nbuf  where frag will be removed
6200  * @idx: frag index
6201  * @truesize: truesize of frag
6202  * @func: Caller function name
6203  * @line:  Caller function line no.
6204  *
6205  * Return: QDF_STATUS
6206  *
6207  */
6208 QDF_STATUS
6209 qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
6210 			   uint16_t idx,
6211 			   uint16_t truesize,
6212 			   const char *func,
6213 			   uint32_t line)
6214 {
6215 	uint16_t num_frags;
6216 	qdf_frag_t frag;
6217 
6218 	if (qdf_unlikely(!nbuf))
6219 		return QDF_STATUS_E_INVAL;
6220 
6221 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6222 	if (idx >= num_frags)
6223 		return QDF_STATUS_E_INVAL;
6224 
6225 	if (qdf_likely(is_initial_mem_debug_disabled)) {
6226 		__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6227 		return QDF_STATUS_SUCCESS;
6228 	}
6229 
6230 	frag = qdf_nbuf_get_frag_addr(nbuf, idx);
6231 	if (qdf_likely(frag))
6232 		qdf_frag_debug_refcount_dec(frag, func, line);
6233 
6234 	__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6235 
6236 	return QDF_STATUS_SUCCESS;
6237 }
6238 
6239 qdf_export_symbol(qdf_nbuf_remove_frag_debug);
6240 
6241 #endif /* NBUF_FRAG_MEMORY_DEBUG */
6242 
6243 /**
6244  * qdf_get_nbuf_valid_frag() - Get nbuf to store frag
6245  * @nbuf: qdf_nbuf_t master nbuf
6246  *
6247  * Return: qdf_nbuf_t
6248  */
6249 qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
6250 {
6251 	qdf_nbuf_t last_nbuf;
6252 	uint32_t num_frags;
6253 
6254 	if (qdf_unlikely(!nbuf))
6255 		return NULL;
6256 
6257 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6258 
6259 	/* Check nbuf has enough memory to store frag memory */
6260 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6261 		return nbuf;
6262 
6263 	if (!__qdf_nbuf_has_fraglist(nbuf))
6264 		return NULL;
6265 
6266 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
6267 	if (qdf_unlikely(!last_nbuf))
6268 		return NULL;
6269 
6270 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
6271 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6272 		return last_nbuf;
6273 
6274 	return NULL;
6275 }
6276 
6277 qdf_export_symbol(qdf_get_nbuf_valid_frag);
6278 
6279 /**
6280  * qdf_nbuf_add_frag_debug() - Add frag to nbuf
6281  * @osdev: Device handle
6282  * @buf: Frag pointer needs to be added in nbuf frag
6283  * @nbuf: qdf_nbuf_t where frag will be added
6284  * @offset: Offset in frag to be added to nbuf_frags
6285  * @frag_len: Frag length
6286  * @truesize: truesize
6287  * @take_frag_ref: Whether to take ref for frag or not
6288  *      This bool must be set as per below comdition:
6289  *      1. False: If this frag is being added in any nbuf
6290  *              for the first time after allocation
6291  *      2. True: If frag is already attached part of any
6292  *              nbuf
6293  * @minsize: Minimum size to allocate
6294  * @func: Caller function name
6295  * @line: Caller function line no.
6296  *
6297  * if number of frag exceed maximum frag array. A new nbuf is allocated
6298  * with minimum headroom and frag it added to that nbuf.
6299  * new nbuf is added as frag_list to the master nbuf.
6300  *
6301  * Return: QDF_STATUS
6302  */
6303 QDF_STATUS
6304 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
6305 			qdf_nbuf_t nbuf, int offset,
6306 			int frag_len, unsigned int truesize,
6307 			bool take_frag_ref, unsigned int minsize,
6308 			const char *func, uint32_t line)
6309 {
6310 	qdf_nbuf_t cur_nbuf;
6311 	qdf_nbuf_t this_nbuf;
6312 
6313 	cur_nbuf = nbuf;
6314 	this_nbuf = nbuf;
6315 
6316 	if (qdf_unlikely(!frag_len || !buf)) {
6317 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
6318 			     func, line,
6319 			     buf, frag_len);
6320 		return QDF_STATUS_E_INVAL;
6321 	}
6322 
6323 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6324 
6325 	if (this_nbuf) {
6326 		cur_nbuf = this_nbuf;
6327 	} else {
6328 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
6329 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6330 		if (qdf_unlikely(!this_nbuf)) {
6331 			qdf_nofl_err("%s : %d no memory to allocate\n",
6332 				     func, line);
6333 			return QDF_STATUS_E_NOMEM;
6334 		}
6335 	}
6336 
6337 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6338 			     take_frag_ref);
6339 
6340 	if (this_nbuf != cur_nbuf) {
6341 		/* add new skb to frag list */
6342 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6343 					 qdf_nbuf_len(this_nbuf));
6344 	}
6345 
6346 	return QDF_STATUS_SUCCESS;
6347 }
6348 
6349 qdf_export_symbol(qdf_nbuf_add_frag_debug);
6350 
6351 #ifdef MEMORY_DEBUG
6352 void qdf_nbuf_acquire_track_lock(uint32_t index,
6353 				 unsigned long irq_flag)
6354 {
6355 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6356 			  irq_flag);
6357 }
6358 
6359 void qdf_nbuf_release_track_lock(uint32_t index,
6360 				 unsigned long irq_flag)
6361 {
6362 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6363 			       irq_flag);
6364 }
6365 
6366 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6367 {
6368 	return gp_qdf_net_buf_track_tbl[index];
6369 }
6370 #endif /* MEMORY_DEBUG */
6371 
6372 #ifdef ENHANCED_OS_ABSTRACTION
6373 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6374 {
6375 	__qdf_nbuf_set_timestamp(buf);
6376 }
6377 
6378 qdf_export_symbol(qdf_nbuf_set_timestamp);
6379 
6380 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6381 {
6382 	return __qdf_nbuf_get_timestamp(buf);
6383 }
6384 
6385 qdf_export_symbol(qdf_nbuf_get_timestamp);
6386 
6387 uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
6388 {
6389 	return __qdf_nbuf_get_timestamp_us(buf);
6390 }
6391 
6392 qdf_export_symbol(qdf_nbuf_get_timestamp_us);
6393 
6394 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6395 {
6396 	return __qdf_nbuf_get_timedelta_us(buf);
6397 }
6398 
6399 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6400 
6401 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6402 {
6403 	return __qdf_nbuf_get_timedelta_ms(buf);
6404 }
6405 
6406 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6407 
6408 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6409 {
6410 	return __qdf_nbuf_net_timedelta(t);
6411 }
6412 
6413 qdf_export_symbol(qdf_nbuf_net_timedelta);
6414 #endif
6415