xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 621c2d72b88fcc9ffdd99576458b4dc42b65a4b2)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
45 /* Since commit
46  *  baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
47  *
48  * the function netif_rx() can be used in preemptible/thread context as
49  * well as in interrupt context.
50  *
51  * Use netif_rx().
52  */
53 #define netif_rx_ni(skb) netif_rx(skb)
54 #endif
55 
56 /*
57  * Use socket buffer as the underlying implementation as skbuf .
58  * Linux use sk_buff to represent both packet and data,
59  * so we use sk_buffer to represent both skbuf .
60  */
61 typedef struct sk_buff *__qdf_nbuf_t;
62 
63 /*
64  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
65  *
66  * This is used for skb queue management via linux skb buff head APIs
67  */
68 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
69 
70 /*
71  * typedef __qdf_nbuf_shared_info_t for skb_shinfo linux struct
72  *
73  * This is used for skb shared info via linux skb shinfo APIs
74  */
75 typedef struct skb_shared_info *__qdf_nbuf_shared_info_t;
76 
77 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
78 
79 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
80 
81 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
82  * max tx fragments added by the driver
83  * The driver will always add one tx fragment (the tx descriptor)
84  */
85 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
86 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
87 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
88 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
89 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
90 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
91 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
92 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
93 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
94 #define QDF_NBUF_CB_PACKET_TYPE_TCP_ACK 9
95 
96 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
97 
98 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
99 #define IEEE80211_RADIOTAP_HE 23
100 #define IEEE80211_RADIOTAP_HE_MU 24
101 #endif
102 
103 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
104 
105 #define IEEE80211_RADIOTAP_EXT1_USIG	1
106 #define IEEE80211_RADIOTAP_EXT1_EHT	2
107 
108 /* mark the first packet after wow wakeup */
109 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
110 
111 /* TCP Related MASK */
112 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
113 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
114 #define QDF_NBUF_PKT_TCPOP_RST			0x04
115 
116 /*
117  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
118  */
119 typedef union {
120 	uint64_t       u64;
121 	qdf_dma_addr_t dma_addr;
122 } qdf_paddr_t;
123 
124 typedef void (*qdf_nbuf_trace_update_t)(char *);
125 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
126 
127 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
128 
129 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
130 	(QDF_NBUF_CB_PADDR(skb) = paddr)
131 
132 #define __qdf_nbuf_frag_push_head(					\
133 	skb, frag_len, frag_vaddr, frag_paddr)				\
134 	do {					\
135 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
136 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
137 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
138 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
139 	} while (0)
140 
141 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
142 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
143 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
144 
145 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
146 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
147 
148 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
149 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
150 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
151 	 /* assume that the OS only provides a single fragment */	\
152 	 QDF_NBUF_CB_PADDR(skb))
153 
154 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
155 
156 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
157 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
158 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
159 
160 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
161 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
162 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
163 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
164 
165 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
166 	do {								\
167 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
168 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
169 		if (frag_num)						\
170 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
171 							      is_wstrm; \
172 		else					\
173 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
174 							      is_wstrm; \
175 	} while (0)
176 
177 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
178 	do { \
179 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
180 	} while (0)
181 
182 #define __qdf_nbuf_get_vdev_ctx(skb) \
183 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
184 
185 #define __qdf_nbuf_set_tx_ftype(skb, type) \
186 	do { \
187 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
188 	} while (0)
189 
190 #define __qdf_nbuf_get_tx_ftype(skb) \
191 		 QDF_NBUF_CB_TX_FTYPE((skb))
192 
193 
194 #define __qdf_nbuf_set_rx_ftype(skb, type) \
195 	do { \
196 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
197 	} while (0)
198 
199 #define __qdf_nbuf_get_rx_ftype(skb) \
200 		 QDF_NBUF_CB_RX_FTYPE((skb))
201 
202 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
203 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
204 
205 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
206 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
207 
208 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
209 	do { \
210 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
211 	} while (0)
212 
213 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
214 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
215 
216 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
217 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
218 
219 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
220 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
221 
222 #define __qdf_nbuf_set_da_mcbc(skb, val) \
223 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
224 
225 #define __qdf_nbuf_is_da_mcbc(skb) \
226 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
227 
228 #define __qdf_nbuf_set_da_valid(skb, val) \
229 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
230 
231 #define __qdf_nbuf_is_da_valid(skb) \
232 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
233 
234 #define __qdf_nbuf_set_sa_valid(skb, val) \
235 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
236 
237 #define __qdf_nbuf_is_sa_valid(skb) \
238 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
239 
240 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
241 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
242 
243 #define __qdf_nbuf_is_rx_retry_flag(skb) \
244 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
245 
246 #define __qdf_nbuf_set_raw_frame(skb, val) \
247 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
248 
249 #define __qdf_nbuf_is_raw_frame(skb) \
250 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
251 
252 #define __qdf_nbuf_is_fr_ds_set(skb) \
253 	(QDF_NBUF_CB_RX_FROM_DS((skb)))
254 
255 #define __qdf_nbuf_is_to_ds_set(skb) \
256 	(QDF_NBUF_CB_RX_TO_DS((skb)))
257 
258 #define __qdf_nbuf_get_tid_val(skb) \
259 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
260 
261 #define __qdf_nbuf_set_tid_val(skb, val) \
262 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
263 
264 #define __qdf_nbuf_set_is_frag(skb, val) \
265 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
266 
267 #define __qdf_nbuf_is_frag(skb) \
268 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
269 
270 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
271 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
272 
273 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
274 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
275 
276 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
277 	do { \
278 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
279 	} while (0)
280 
281 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
282 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
283 
284 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
285 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
286 
287 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
288 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
289 
290 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
291 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
292 
293 #define __qdf_nbuf_trace_get_proto_type(skb) \
294 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
295 
296 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
297 		skb_queue_walk_safe(queue, var, tvar)
298 
299 /*
300  * prototypes. Implemented in qdf_nbuf.c
301  */
302 
303 /**
304  * __qdf_nbuf_alloc() - Allocate nbuf
305  * @osdev: Device handle
306  * @size: Netbuf requested size
307  * @reserve: headroom to start with
308  * @align: Align
309  * @prio: Priority
310  * @func: Function name of the call site
311  * @line: line number of the call site
312  *
313  * This allocates a nbuf aligns if needed and reserves some space in the front,
314  * since the reserve is done after alignment the reserve value if being
315  * unaligned will result in an unaligned address.
316  *
317  * Return: nbuf or %NULL if no memory
318  */
319 __qdf_nbuf_t
320 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
321 		 int prio, const char *func, uint32_t line);
322 
323 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
324 				     const char *func, uint32_t line);
325 
326 #if defined(QCA_DP_NBUF_FAST_PPEDS)
327 /**
328  * __qdf_nbuf_alloc_ppe_ds() - Allocates nbuf
329  * @osdev: Device handle
330  * @size: Netbuf requested size
331  * @func: Function name of the call site
332  * @line: line number of the call site
333  *
334  * This allocates an nbuf for wifi module
335  * in DS mode and uses __netdev_alloc_skb_no_skb_reset API.
336  * The netdev API invokes skb_recycler_alloc with reset_skb
337  * as false. Hence, recycler pool will not do reset_struct
338  * when it allocates DS used buffer to DS module, which will
339  * helps to improve the performance
340  *
341  * Return: nbuf or %NULL if no memory
342  */
343 
344 __qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
345 				     const char *func, uint32_t line);
346 #endif /* QCA_DP_NBUF_FAST_PPEDS */
347 
348 /**
349  * __qdf_nbuf_frag_alloc() - Allocate nbuf in page fragment way.
350  * @osdev: Device handle
351  * @size: Netbuf requested size
352  * @reserve: headroom to start with
353  * @align: Align
354  * @prio: Priority
355  * @func: Function name of the call site
356  * @line: line number of the call site
357  *
358  * This allocates a nbuf aligns if needed and reserves some space in the front,
359  * since the reserve is done after alignment the reserve value if being
360  * unaligned will result in an unaligned address.
361  * It will call into kernel page fragment APIs, long time keeping for scattered
362  * allocations should be considered for avoidance.
363  *
364  * Return: nbuf or %NULL if no memory
365  */
366 __qdf_nbuf_t
367 __qdf_nbuf_frag_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
368 		      int prio, const char *func, uint32_t line);
369 
370 /**
371  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
372  * @size: Size to be allocated for skb
373  * @reserve: Reserve headroom size
374  * @align: Align data
375  * @func: Function name of the call site
376  * @line: Line number of the callsite
377  *
378  * This API allocates a nbuf and aligns it if needed and reserves some headroom
379  * space after the alignment where nbuf is not allocated from skb recycler pool.
380  *
381  * Return: Allocated nbuf pointer
382  */
383 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
384 					  const char *func, uint32_t line);
385 
386 /**
387  * __qdf_nbuf_page_frag_alloc() - Allocate nbuf from @pf_cache page
388  *				  fragment cache
389  * @osdev: Device handle
390  * @size: Netbuf requested size
391  * @reserve: headroom to start with
392  * @align: Align
393  * @pf_cache: Reference to page fragment cache
394  * @func: Function name of the call site
395  * @line: line number of the call site
396  *
397  * This allocates a nbuf, aligns if needed and reserves some space in the front,
398  * since the reserve is done after alignment the reserve value if being
399  * unaligned will result in an unaligned address.
400  *
401  * It will call kernel page fragment APIs for allocation of skb->head, prefer
402  * this API for buffers that are allocated and freed only once i.e., for
403  * reusable buffers.
404  *
405  * Return: nbuf or %NULL if no memory
406  */
407 __qdf_nbuf_t
408 __qdf_nbuf_page_frag_alloc(__qdf_device_t osdev, size_t size, int reserve,
409 			   int align, __qdf_frag_cache_t *pf_cache,
410 			   const char *func, uint32_t line);
411 
412 /**
413  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
414  * @nbuf: Pointer to network buffer
415  *
416  * if GFP_ATOMIC is overkill then we can check whether its
417  * called from interrupt context and then do it or else in
418  * normal case use GFP_KERNEL
419  *
420  * example     use "in_irq() || irqs_disabled()"
421  *
422  * Return: cloned skb
423  */
424 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
425 
426 /**
427  * __qdf_nbuf_free() - free the nbuf its interrupt safe
428  * @skb: Pointer to network buffer
429  *
430  * Return: none
431  */
432 void __qdf_nbuf_free(struct sk_buff *skb);
433 
434 /**
435  * __qdf_nbuf_map() - map a buffer to local bus address space
436  * @osdev: OS device
437  * @skb: Pointer to network buffer
438  * @dir: Direction
439  *
440  * Return: QDF_STATUS
441  */
442 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
443 			struct sk_buff *skb, qdf_dma_dir_t dir);
444 
445 /**
446  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
447  * @osdev: OS device
448  * @skb: Pointer to network buffer
449  * @dir: dma direction
450  *
451  * Return: none
452  */
453 void __qdf_nbuf_unmap(__qdf_device_t osdev,
454 			struct sk_buff *skb, qdf_dma_dir_t dir);
455 
456 /**
457  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
458  * @osdev: OS device
459  * @skb: Pointer to network buffer
460  * @dir: Direction
461  *
462  * Return: QDF_STATUS
463  */
464 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
465 				 struct sk_buff *skb, qdf_dma_dir_t dir);
466 
467 /**
468  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
469  * @osdev: OS device
470  * @skb: Pointer to network buffer
471  * @dir: Direction
472  *
473  * Return: none
474  */
475 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
476 			struct sk_buff *skb, qdf_dma_dir_t dir);
477 
478 /**
479  * __qdf_nbuf_reg_trace_cb() - register trace callback
480  * @cb_func_ptr: Pointer to trace callback function
481  *
482  * Return: none
483  */
484 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
485 
486 /**
487  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
488  * @cb_func_ptr: function pointer to the nbuf free callback
489  *
490  * This function registers a callback function for nbuf free.
491  *
492  * Return: none
493  */
494 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
495 
496 /**
497  * __qdf_nbuf_dmamap_create() - create a DMA map.
498  * @osdev: qdf device handle
499  * @dmap: dma map handle
500  *
501  * This can later be used to map networking buffers. They :
502  * - need space in adf_drv's software descriptor
503  * - are typically created during adf_drv_create
504  * - need to be created before any API(qdf_nbuf_map) that uses them
505  *
506  * Return: QDF STATUS
507  */
508 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
509 
510 /**
511  * __qdf_nbuf_dmamap_destroy() - delete a dma map
512  * @osdev: qdf device handle
513  * @dmap: dma map handle
514  *
515  * Return: none
516  */
517 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
518 
519 /**
520  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
521  * @dmap: dma map
522  * @cb: callback
523  * @arg: argument
524  *
525  * Return: none
526  */
527 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
528 
529 /**
530  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
531  * @osdev: os device
532  * @skb: skb handle
533  * @dir: dma direction
534  * @nbytes: number of bytes to be mapped
535  *
536  * Return: QDF_STATUS
537  */
538 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
539 				 qdf_dma_dir_t dir, int nbytes);
540 
541 /**
542  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
543  * @osdev: OS device
544  * @skb: skb handle
545  * @dir: direction
546  * @nbytes: number of bytes
547  *
548  * Return: none
549  */
550 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
551 			     qdf_dma_dir_t dir, int nbytes);
552 
553 /**
554  * __qdf_nbuf_sync_for_cpu() - nbuf sync
555  * @osdev: os device
556  * @skb: sk buff
557  * @dir: direction
558  *
559  * Return: none
560  */
561 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
562 	qdf_dma_dir_t dir);
563 
564 /**
565  * __qdf_nbuf_dma_map_info() - return the dma map info
566  * @bmap: dma map
567  * @sg: dma map info
568  *
569  * Return: none
570  */
571 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
572 
573 /**
574  * __qdf_nbuf_get_frag_size() - get frag size
575  * @nbuf: sk buffer
576  * @cur_frag: current frag
577  *
578  * Return: frag size
579  */
580 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
581 
582 /**
583  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
584  *			specified by the index
585  * @skb: sk buff
586  * @sg: scatter/gather list of all the frags
587  *
588  * Return: none
589  */
590 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
591 
592 /**
593  * __qdf_nbuf_frag_map() - dma map frag
594  * @osdev: os device
595  * @nbuf: sk buff
596  * @offset: offset
597  * @dir: direction
598  * @cur_frag: current fragment
599  *
600  * Return: QDF status
601  */
602 QDF_STATUS __qdf_nbuf_frag_map(
603 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
604 	int offset, qdf_dma_dir_t dir, int cur_frag);
605 
606 /**
607  * qdf_nbuf_classify_pkt() - classify packet
608  * @skb: sk buff
609  *
610  * Return: none
611  */
612 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
613 
614 /**
615  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
616  * @skb: Pointer to network buffer
617  *
618  * This api is for ipv4 packet.
619  *
620  * Return: true if packet is WAPI packet
621  *	   false otherwise.
622  */
623 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
624 
625 /**
626  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
627  * @skb: Pointer to network buffer
628  *
629  * This api is for ipv4 packet.
630  *
631  * Return: true if packet is tdls packet
632  *	   false otherwise.
633  */
634 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
635 
636 /**
637  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
638  * @data: Pointer to network data
639  *
640  * This api is for Tx packets.
641  *
642  * Return: true if packet is ipv4 packet
643  *	   false otherwise
644  */
645 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
646 
647 /**
648  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
649  * @data: Pointer to IPV6 packet data buffer
650  *
651  * This func. checks whether it is a IPV6 packet or not.
652  *
653  * Return: TRUE if it is a IPV6 packet
654  *         FALSE if not
655  */
656 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
657 
658 /**
659  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
660  * @data: Pointer to IPV4 packet data buffer
661  *
662  * This func. checks whether it is a IPV4 multicast packet or not.
663  *
664  * Return: TRUE if it is a IPV4 multicast packet
665  *         FALSE if not
666  */
667 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
668 
669 /**
670  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
671  * @data: Pointer to IPV6 packet data buffer
672  *
673  * This func. checks whether it is a IPV6 multicast packet or not.
674  *
675  * Return: TRUE if it is a IPV6 multicast packet
676  *         FALSE if not
677  */
678 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
679 
680 /**
681  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
682  * @data: Pointer to IPV4 ICMP packet data buffer
683  *
684  * This func. checks whether it is a ICMP packet or not.
685  *
686  * Return: TRUE if it is a ICMP packet
687  *         FALSE if not
688  */
689 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
690 
691 /**
692  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
693  * @data: Pointer to IPV6 ICMPV6 packet data buffer
694  *
695  * This func. checks whether it is a ICMPV6 packet or not.
696  *
697  * Return: TRUE if it is a ICMPV6 packet
698  *         FALSE if not
699  */
700 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
701 
702 /**
703  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
704  * @data: Pointer to IPV4 UDP packet data buffer
705  *
706  * This func. checks whether it is a IPV4 UDP packet or not.
707  *
708  * Return: TRUE if it is a IPV4 UDP packet
709  *         FALSE if not
710  */
711 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
712 
713 /**
714  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
715  * @data: Pointer to IPV4 TCP packet data buffer
716  *
717  * This func. checks whether it is a IPV4 TCP packet or not.
718  *
719  * Return: TRUE if it is a IPV4 TCP packet
720  *         FALSE if not
721  */
722 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
723 
724 /**
725  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
726  * @data: Pointer to IPV6 UDP packet data buffer
727  *
728  * This func. checks whether it is a IPV6 UDP packet or not.
729  *
730  * Return: TRUE if it is a IPV6 UDP packet
731  *         FALSE if not
732  */
733 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
734 
735 /**
736  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
737  * @data: Pointer to IPV6 TCP packet data buffer
738  *
739  * This func. checks whether it is a IPV6 TCP packet or not.
740  *
741  * Return: TRUE if it is a IPV6 TCP packet
742  *         FALSE if not
743  */
744 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
745 
746 /**
747  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
748  * @data: Pointer to network data buffer
749  *
750  * This api is for ipv4 packet.
751  *
752  * Return: true if packet is DHCP packet
753  *	   false otherwise
754  */
755 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
756 
757 /**
758  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
759  * @data: Pointer to network data buffer
760  *
761  * This api is for ipv6 packet.
762  *
763  * Return: true if packet is DHCP packet
764  *	   false otherwise
765  */
766 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
767 
768 /**
769  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
770  * @data: Pointer to network data buffer
771  *
772  * This api is for ipv6 packet.
773  *
774  * Return: true if packet is MDNS packet
775  *	   false otherwise
776  */
777 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
778 
779 /**
780  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
781  * @data: Pointer to network data buffer
782  *
783  * This api is for ipv4 packet.
784  *
785  * Return: true if packet is EAPOL packet
786  *	   false otherwise.
787  */
788 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
789 
790 /**
791  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
792  * @data: Pointer to network data buffer
793  *
794  * This api is for ipv4 packet.
795  *
796  * Return: true if packet is igmp packet
797  *	   false otherwise.
798  */
799 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
800 
801 /**
802  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
803  * @data: Pointer to network data buffer
804  *
805  * This api is for ipv6 packet.
806  *
807  * Return: true if packet is igmp packet
808  *	   false otherwise.
809  */
810 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
811 
812 /**
813  * __qdf_nbuf_is_ipv4_igmp_leave_pkt() - check if skb is a igmp leave packet
814  * @buf: Pointer to network buffer
815  *
816  * This api is for ipv4 packet.
817  *
818  * Return: true if packet is igmp packet
819  *	   false otherwise.
820  */
821 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf);
822 
823 /**
824  * __qdf_nbuf_is_ipv6_igmp_leave_pkt() - check if skb is a igmp leave packet
825  * @buf: Pointer to network buffer
826  *
827  * This api is for ipv6 packet.
828  *
829  * Return: true if packet is igmp packet
830  *	   false otherwise.
831  */
832 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf);
833 
834 /**
835  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
836  * @data: Pointer to network data buffer
837  *
838  * This api is for ipv4 packet.
839  *
840  * Return: true if packet is ARP packet
841  *	   false otherwise.
842  */
843 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
844 
845 /**
846  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
847  * @nbuf: sk buff
848  *
849  * Return: true if packet is broadcast
850  *	   false otherwise
851  */
852 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
853 
854 /**
855  * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
856  * @nbuf: sk buff
857  *
858  * Return: true if packet is multicast replay
859  *	   false otherwise
860  */
861 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
862 
863 /**
864  * __qdf_nbuf_is_arp_local() - check if local or non local arp
865  * @skb: pointer to sk_buff
866  *
867  * Return: true if local arp or false otherwise.
868  */
869 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
870 
871 /**
872  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
873  * @data: Pointer to network data buffer
874  *
875  * This api is for ipv4 packet.
876  *
877  * Return: true if packet is ARP request
878  *	   false otherwise.
879  */
880 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
881 
882 /**
883  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
884  * @data: Pointer to network data buffer
885  *
886  * This api is for ipv4 packet.
887  *
888  * Return: true if packet is ARP response
889  *	   false otherwise.
890  */
891 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
892 
893 /**
894  * __qdf_nbuf_get_arp_src_ip() - get arp src IP
895  * @data: Pointer to network data buffer
896  *
897  * This api is for ipv4 packet.
898  *
899  * Return: ARP packet source IP value.
900  */
901 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
902 
903 /**
904  * __qdf_nbuf_get_arp_tgt_ip() - get arp target IP
905  * @data: Pointer to network data buffer
906  *
907  * This api is for ipv4 packet.
908  *
909  * Return: ARP packet target IP value.
910  */
911 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
912 
913 /**
914  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
915  * @data: Pointer to network data buffer
916  * @len: length to copy
917  *
918  * This api is for dns domain name
919  *
920  * Return: dns domain name.
921  */
922 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
923 
924 /**
925  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
926  * @data: Pointer to network data buffer
927  *
928  * This api is for dns query packet.
929  *
930  * Return: true if packet is dns query packet.
931  *	   false otherwise.
932  */
933 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
934 
935 /**
936  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
937  * @data: Pointer to network data buffer
938  *
939  * This api is for dns query response.
940  *
941  * Return: true if packet is dns response packet.
942  *	   false otherwise.
943  */
944 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
945 
946 /**
947  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
948  * @data: Pointer to network data buffer
949  *
950  * This api is to check if the packet is tcp fin.
951  *
952  * Return: true if packet is tcp fin packet.
953  *         false otherwise.
954  */
955 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
956 
957 /**
958  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
959  * @data: Pointer to network data buffer
960  *
961  * This api is to check if the tcp packet is fin ack.
962  *
963  * Return: true if packet is tcp fin ack packet.
964  *         false otherwise.
965  */
966 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
967 
968 /**
969  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
970  * @data: Pointer to network data buffer
971  *
972  * This api is for tcp syn packet.
973  *
974  * Return: true if packet is tcp syn packet.
975  *	   false otherwise.
976  */
977 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
978 
979 /**
980  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
981  * @data: Pointer to network data buffer
982  *
983  * This api is for tcp syn ack packet.
984  *
985  * Return: true if packet is tcp syn ack packet.
986  *	   false otherwise.
987  */
988 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
989 
990 /**
991  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
992  * @data: Pointer to network data buffer
993  *
994  * This api is to check if the tcp packet is rst.
995  *
996  * Return: true if packet is tcp rst packet.
997  *         false otherwise.
998  */
999 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
1000 
1001 /**
1002  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1003  * @data: Pointer to network data buffer
1004  *
1005  * This api is for tcp ack packet.
1006  *
1007  * Return: true if packet is tcp ack packet.
1008  *	   false otherwise.
1009  */
1010 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
1011 
1012 /**
1013  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1014  * @data: Pointer to network data buffer
1015  *
1016  * This api is for tcp packet.
1017  *
1018  * Return: tcp source port value.
1019  */
1020 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
1021 
1022 /**
1023  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1024  * @data: Pointer to network data buffer
1025  *
1026  * This api is for tcp packet.
1027  *
1028  * Return: tcp destination port value.
1029  */
1030 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
1031 
1032 /**
1033  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1034  * @data: Pointer to network data buffer
1035  *
1036  * This api is for ipv4 req packet.
1037  *
1038  * Return: true if packet is icmpv4 request
1039  *	   false otherwise.
1040  */
1041 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
1042 
1043 /**
1044  * __qdf_nbuf_data_is_icmpv4_redirect() - check if skb data is a icmpv4 redirect
1045  * @data: Pointer to network data buffer
1046  *
1047  * This api is for ipv4 req packet.
1048  *
1049  * Return: true if packet is icmpv4 redirect
1050  *	   false otherwise.
1051  */
1052 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
1053 
1054 /**
1055  * __qdf_nbuf_data_is_icmpv6_redirect() - check if skb data is a icmpv6 redirect
1056  * @data: Pointer to network data buffer
1057  *
1058  * This api is for ipv6 req packet.
1059  *
1060  * Return: true if packet is icmpv6 redirect
1061  *	   false otherwise.
1062  */
1063 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
1064 
1065 /**
1066  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1067  * @data: Pointer to network data buffer
1068  *
1069  * This api is for ipv4 res packet.
1070  *
1071  * Return: true if packet is icmpv4 response
1072  *	   false otherwise.
1073  */
1074 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
1075 
1076 /**
1077  * __qdf_nbuf_get_icmpv4_src_ip() - get icmpv4 src IP
1078  * @data: Pointer to network data buffer
1079  *
1080  * This api is for ipv4 packet.
1081  *
1082  * Return: icmpv4 packet source IP value.
1083  */
1084 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
1085 
1086 /**
1087  * __qdf_nbuf_get_icmpv4_tgt_ip() - get icmpv4 target IP
1088  * @data: Pointer to network data buffer
1089  *
1090  * This api is for ipv4 packet.
1091  *
1092  * Return: icmpv4 packet target IP value.
1093  */
1094 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
1095 
1096 /**
1097  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1098  *              of DHCP packet.
1099  * @data: Pointer to DHCP packet data buffer
1100  *
1101  * This func. returns the subtype of DHCP packet.
1102  *
1103  * Return: subtype of the DHCP packet.
1104  */
1105 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
1106 
1107 /**
1108  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype of EAPOL packet.
1109  * @data: Pointer to EAPOL packet data buffer
1110  *
1111  * This func. returns the subtype of EAPOL packet.
1112  *
1113  * Return: subtype of the EAPOL packet.
1114  */
1115 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
1116 
1117 /**
1118  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1119  *            of ARP packet.
1120  * @data: Pointer to ARP packet data buffer
1121  *
1122  * This func. returns the subtype of ARP packet.
1123  *
1124  * Return: subtype of the ARP packet.
1125  */
1126 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
1127 
1128 /**
1129  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1130  *            of IPV4 ICMP packet.
1131  * @data: Pointer to IPV4 ICMP packet data buffer
1132  *
1133  * This func. returns the subtype of ICMP packet.
1134  *
1135  * Return: subtype of the ICMP packet.
1136  */
1137 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
1138 
1139 /**
1140  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1141  *            of IPV6 ICMPV6 packet.
1142  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1143  *
1144  * This func. returns the subtype of ICMPV6 packet.
1145  *
1146  * Return: subtype of the ICMPV6 packet.
1147  */
1148 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
1149 
1150 /**
1151  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1152  *            of IPV4 packet.
1153  * @data: Pointer to IPV4 packet data buffer
1154  *
1155  * This func. returns the proto type of IPV4 packet.
1156  *
1157  * Return: proto type of IPV4 packet.
1158  */
1159 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
1160 
1161 /**
1162  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1163  *            of IPV6 packet.
1164  * @data: Pointer to IPV6 packet data buffer
1165  *
1166  * This func. returns the proto type of IPV6 packet.
1167  *
1168  * Return: proto type of IPV6 packet.
1169  */
1170 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
1171 
1172 /**
1173  * __qdf_nbuf_data_get_ipv4_tos() - get the TOS type of IPv4 packet
1174  * @data: Pointer to skb payload
1175  *
1176  * This func. returns the TOS type of IPv4 packet.
1177  *
1178  * Return: TOS type of IPv4 packet.
1179  */
1180 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
1181 
1182 /**
1183  * __qdf_nbuf_data_get_ipv6_tc() - get the TC field
1184  *                                 of IPv6 packet.
1185  * @data: Pointer to IPv6 packet data buffer
1186  *
1187  * This func. returns the TC field of IPv6 packet.
1188  *
1189  * Return: traffic classification of IPv6 packet.
1190  */
1191 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
1192 
1193 /**
1194  * __qdf_nbuf_data_set_ipv4_tos() - set the TOS for IPv4 packet
1195  * @data: pointer to skb payload
1196  * @tos: value of TOS to be set
1197  *
1198  * This func. set the TOS field of IPv4 packet.
1199  *
1200  * Return: None
1201  */
1202 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
1203 
1204 /**
1205  * __qdf_nbuf_data_set_ipv6_tc() - set the TC field
1206  *                                 of IPv6 packet.
1207  * @data: Pointer to skb payload
1208  * @tc: value to set to IPv6 header TC field
1209  *
1210  * This func. set the TC field of IPv6 header.
1211  *
1212  * Return: None
1213  */
1214 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
1215 
1216 /**
1217  * __qdf_nbuf_is_ipv4_last_fragment() - Check if IPv4 packet is last fragment
1218  * @skb: Buffer
1219  *
1220  * This function checks IPv4 packet is last fragment or not.
1221  * Caller has to call this function for IPv4 packets only.
1222  *
1223  * Return: True if IPv4 packet is last fragment otherwise false
1224  */
1225 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
1226 
1227 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb);
1228 
1229 #ifdef QDF_NBUF_GLOBAL_COUNT
1230 /**
1231  * __qdf_nbuf_count_get() - get nbuf global count
1232  *
1233  * Return: nbuf global count
1234  */
1235 int __qdf_nbuf_count_get(void);
1236 
1237 /**
1238  * __qdf_nbuf_count_inc() - increment nbuf global count
1239  *
1240  * @nbuf: sk buff
1241  *
1242  * Return: void
1243  */
1244 void __qdf_nbuf_count_inc(struct sk_buff *nbuf);
1245 
1246 /**
1247  * __qdf_nbuf_count_dec() - decrement nbuf global count
1248  *
1249  * @nbuf: sk buff
1250  *
1251  * Return: void
1252  */
1253 void __qdf_nbuf_count_dec(struct sk_buff *nbuf);
1254 
1255 /**
1256  * __qdf_nbuf_mod_init() - Initialization routine for qdf_nbuf
1257  *
1258  * Return void
1259  */
1260 void __qdf_nbuf_mod_init(void);
1261 
1262 /**
1263  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nbuf
1264  *
1265  * Return void
1266  */
1267 void __qdf_nbuf_mod_exit(void);
1268 
1269 #else
1270 
1271 static inline int __qdf_nbuf_count_get(void)
1272 {
1273 	return 0;
1274 }
1275 
1276 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
1277 {
1278 	return;
1279 }
1280 
1281 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
1282 {
1283 	return;
1284 }
1285 
1286 static inline void __qdf_nbuf_mod_init(void)
1287 {
1288 	return;
1289 }
1290 
1291 static inline void __qdf_nbuf_mod_exit(void)
1292 {
1293 	return;
1294 }
1295 #endif
1296 
1297 /**
1298  * __qdf_to_status() - OS to QDF status conversion
1299  * @error : OS error
1300  *
1301  * Return: QDF status
1302  */
1303 static inline QDF_STATUS __qdf_to_status(signed int error)
1304 {
1305 	switch (error) {
1306 	case 0:
1307 		return QDF_STATUS_SUCCESS;
1308 	case ENOMEM:
1309 	case -ENOMEM:
1310 		return QDF_STATUS_E_NOMEM;
1311 	default:
1312 		return QDF_STATUS_E_NOSUPPORT;
1313 	}
1314 }
1315 
1316 /**
1317  * __qdf_nbuf_cat() - link two nbufs
1318  * @dst: Buffer to piggyback into
1319  * @src: Buffer to put
1320  *
1321  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
1322  * It is callers responsibility to free the src skb.
1323  *
1324  * Return: QDF_STATUS (status of the call) if failed the src skb
1325  *         is released
1326  */
1327 static inline QDF_STATUS
1328 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1329 {
1330 	QDF_STATUS error = 0;
1331 
1332 	qdf_assert(dst && src);
1333 
1334 	/*
1335 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1336 	 * buffer, first check whether the current buffer is already large
1337 	 * enough.
1338 	 */
1339 	if (skb_tailroom(dst) < src->len) {
1340 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1341 		if (error)
1342 			return __qdf_to_status(error);
1343 	}
1344 
1345 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1346 	skb_put(dst, src->len);
1347 	return __qdf_to_status(error);
1348 }
1349 
1350 /*
1351  * nbuf manipulation routines
1352  */
1353 /**
1354  * __qdf_nbuf_headroom() - return the amount of tail space available
1355  * @skb: Pointer to network buffer
1356  *
1357  * Return: amount of tail room
1358  */
1359 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1360 {
1361 	return skb_headroom(skb);
1362 }
1363 
1364 /**
1365  * __qdf_nbuf_tailroom() - return the amount of tail space available
1366  * @skb: Pointer to network buffer
1367  *
1368  * Return: amount of tail room
1369  */
1370 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1371 {
1372 	return skb_tailroom(skb);
1373 }
1374 
1375 /**
1376  * __qdf_nbuf_put_tail() - Puts data in the end
1377  * @skb: Pointer to network buffer
1378  * @size: size to be pushed
1379  *
1380  * Return: data pointer of this buf where new data has to be
1381  *         put, or NULL if there is not enough room in this buf.
1382  */
1383 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1384 {
1385 	if (skb_tailroom(skb) < size) {
1386 		if (unlikely(pskb_expand_head(skb, 0,
1387 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1388 			__qdf_nbuf_count_dec(skb);
1389 			dev_kfree_skb_any(skb);
1390 			return NULL;
1391 		}
1392 	}
1393 	return skb_put(skb, size);
1394 }
1395 
1396 /**
1397  * __qdf_nbuf_trim_tail() - trim data out from the end
1398  * @skb: Pointer to network buffer
1399  * @size: size to be popped
1400  *
1401  * Return: none
1402  */
1403 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1404 {
1405 	return skb_trim(skb, skb->len - size);
1406 }
1407 
1408 
1409 /*
1410  * prototypes. Implemented in qdf_nbuf.c
1411  */
1412 
1413 /**
1414  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1415  * @skb: Pointer to network buffer
1416  *
1417  * Return: TX checksum value
1418  */
1419 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1420 
1421 /**
1422  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1423  * @skb: Pointer to network buffer
1424  * @cksum: Pointer to checksum value
1425  *
1426  * Return: QDF_STATUS
1427  */
1428 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1429 				   qdf_nbuf_rx_cksum_t *cksum);
1430 
1431 /**
1432  * __qdf_nbuf_get_tid() - get tid
1433  * @skb: Pointer to network buffer
1434  *
1435  * Return: tid
1436  */
1437 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1438 
1439 /**
1440  * __qdf_nbuf_set_tid() - set tid
1441  * @skb: Pointer to network buffer
1442  * @tid: TID value to set
1443  *
1444  * Return: none
1445  */
1446 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1447 
1448 /**
1449  * __qdf_nbuf_get_exemption_type() - get exemption type
1450  * @skb: Pointer to network buffer
1451  *
1452  * Return: exemption type
1453  */
1454 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1455 
1456 /**
1457  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
1458  * @skb: sk_buff handle
1459  *
1460  * Return: none
1461  */
1462 
1463 void __qdf_nbuf_ref(struct sk_buff *skb);
1464 
1465 /**
1466  * __qdf_nbuf_shared() - Check whether the buffer is shared
1467  *  @skb: sk_buff buffer
1468  *
1469  *  Return: true if more than one person has a reference to this buffer.
1470  */
1471 int __qdf_nbuf_shared(struct sk_buff *skb);
1472 
1473 /**
1474  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1475  * @skb: sk buff
1476  *
1477  * Return: number of fragments
1478  */
1479 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1480 {
1481 	return skb_shinfo(skb)->nr_frags;
1482 }
1483 
1484 /**
1485  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1486  * @skb: sk buff
1487  *
1488  * This API returns a total number of fragments from the fraglist
1489  * Return: total number of fragments
1490  */
1491 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1492 {
1493 	uint32_t num_frag = 0;
1494 	struct sk_buff *list = NULL;
1495 
1496 	num_frag = skb_shinfo(skb)->nr_frags;
1497 	skb_walk_frags(skb, list)
1498 		num_frag += skb_shinfo(list)->nr_frags;
1499 
1500 	return num_frag;
1501 }
1502 
1503 /*
1504  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1505  */
1506 #define __qdf_nbuf_pool_delete(osdev)
1507 
1508 /**
1509  * __qdf_nbuf_copy() - returns a private copy of the skb
1510  * @skb: Pointer to network buffer
1511  *
1512  * This API returns a private copy of the skb, the skb returned is completely
1513  *  modifiable by callers
1514  *
1515  * Return: skb or NULL
1516  */
1517 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1518 {
1519 	struct sk_buff *skb_new = NULL;
1520 
1521 	skb_new = skb_copy(skb, GFP_ATOMIC);
1522 	if (skb_new) {
1523 		__qdf_nbuf_count_inc(skb_new);
1524 	}
1525 	return skb_new;
1526 }
1527 
1528 #define __qdf_nbuf_reserve      skb_reserve
1529 
1530 /**
1531  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1532  * @skb: Pointer to network buffer
1533  * @data: data pointer
1534  *
1535  * Return: none
1536  */
1537 static inline void
1538 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1539 {
1540 	skb->data = data;
1541 }
1542 
1543 /**
1544  * __qdf_nbuf_set_len() - set buffer data length
1545  * @skb: Pointer to network buffer
1546  * @len: data length
1547  *
1548  * Return: none
1549  */
1550 static inline void
1551 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1552 {
1553 	skb->len = len;
1554 }
1555 
1556 /**
1557  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1558  * @skb: Pointer to network buffer
1559  * @len: skb data length
1560  *
1561  * Return: none
1562  */
1563 static inline void
1564 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1565 {
1566 	skb_set_tail_pointer(skb, len);
1567 }
1568 
1569 /**
1570  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1571  * @skb: Pointer to network buffer
1572  * @list: list to use
1573  *
1574  * This is a lockless version, driver must acquire locks if it
1575  * needs to synchronize
1576  *
1577  * Return: none
1578  */
1579 static inline void
1580 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1581 {
1582 	__skb_unlink(skb, list);
1583 }
1584 
1585 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1586 /**
1587  * __qdf_nbuf_is_dev_scratch_supported() - dev_scratch support for network
1588  *                                         buffer in kernel
1589  *
1590  * Return: true if dev_scratch is supported
1591  *         false if dev_scratch is not supported
1592  */
1593 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1594 {
1595 	return true;
1596 }
1597 
1598 /**
1599  * __qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1600  * @skb: Pointer to network buffer
1601  *
1602  * Return: dev_scratch if dev_scratch supported
1603  *         0 if dev_scratch not supported
1604  */
1605 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1606 {
1607 	return skb->dev_scratch;
1608 }
1609 
1610 /**
1611  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1612  * @skb: Pointer to network buffer
1613  * @value: value to be set in dev_scratch of network buffer
1614  *
1615  * Return: void
1616  */
1617 static inline void
1618 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1619 {
1620 	skb->dev_scratch = value;
1621 }
1622 #else
1623 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1624 {
1625 	return false;
1626 }
1627 
1628 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1629 {
1630 	return 0;
1631 }
1632 
1633 static inline void
1634 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1635 {
1636 }
1637 #endif /* KERNEL_VERSION(4, 14, 0) */
1638 
1639 /**
1640  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1641  * @skb: Pointer to network buffer
1642  *
1643  * Return: Pointer to head buffer
1644  */
1645 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1646 {
1647 	return skb->head;
1648 }
1649 
1650 /**
1651  * __qdf_nbuf_data() - return the pointer to data header in the skb
1652  * @skb: Pointer to network buffer
1653  *
1654  * Return: Pointer to skb data
1655  */
1656 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1657 {
1658 	return skb->data;
1659 }
1660 
1661 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1662 {
1663 	return (uint8_t *)&skb->data;
1664 }
1665 
1666 /**
1667  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1668  * @skb: Pointer to network buffer
1669  *
1670  * Return: skb protocol
1671  */
1672 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1673 {
1674 	return skb->protocol;
1675 }
1676 
1677 /**
1678  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1679  * @skb: Pointer to network buffer
1680  *
1681  * Return: skb ip_summed
1682  */
1683 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1684 {
1685 	return skb->ip_summed;
1686 }
1687 
1688 /**
1689  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1690  * @skb: Pointer to network buffer
1691  * @ip_summed: ip checksum
1692  *
1693  * Return: none
1694  */
1695 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1696 		 uint8_t ip_summed)
1697 {
1698 	skb->ip_summed = ip_summed;
1699 }
1700 
1701 /**
1702  * __qdf_nbuf_get_priority() - return the priority value of the skb
1703  * @skb: Pointer to network buffer
1704  *
1705  * Return: skb priority
1706  */
1707 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1708 {
1709 	return skb->priority;
1710 }
1711 
1712 /**
1713  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1714  * @skb: Pointer to network buffer
1715  * @p: priority
1716  *
1717  * Return: none
1718  */
1719 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1720 {
1721 	skb->priority = p;
1722 }
1723 
1724 /**
1725  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1726  * @skb: Current skb
1727  * @skb_next: Next skb
1728  *
1729  * Return: void
1730  */
1731 static inline void
1732 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1733 {
1734 	skb->next = skb_next;
1735 }
1736 
1737 /**
1738  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1739  * @skb: Current skb
1740  *
1741  * Return: the next skb pointed to by the current skb
1742  */
1743 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1744 {
1745 	return skb->next;
1746 }
1747 
1748 /**
1749  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1750  * @skb: Current skb
1751  * @skb_next: Next skb
1752  *
1753  * This fn is used to link up extensions to the head skb. Does not handle
1754  * linking to the head
1755  *
1756  * Return: none
1757  */
1758 static inline void
1759 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1760 {
1761 	skb->next = skb_next;
1762 }
1763 
1764 /**
1765  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1766  * @skb: Current skb
1767  *
1768  * Return: the next skb pointed to by the current skb
1769  */
1770 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1771 {
1772 	return skb->next;
1773 }
1774 
1775 /**
1776  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1777  * @skb_head: head_buf nbuf holding head segment (single)
1778  * @ext_list: nbuf list holding linked extensions to the head
1779  * @ext_len: Total length of all buffers in the extension list
1780  *
1781  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1782  * to the nbuf holding the head segment (seg0)
1783  *
1784  * Return: none
1785  */
1786 static inline void
1787 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1788 			struct sk_buff *ext_list, size_t ext_len)
1789 {
1790 	skb_shinfo(skb_head)->frag_list = ext_list;
1791 	skb_head->data_len += ext_len;
1792 	skb_head->len += ext_len;
1793 }
1794 
1795 /**
1796  * __qdf_nbuf_get_shinfo() - return the shared info of the skb
1797  * @head_buf: Pointer to network buffer
1798  *
1799  * Return: skb shared info from head buf
1800  */
1801 static inline
1802 struct skb_shared_info *__qdf_nbuf_get_shinfo(struct sk_buff *head_buf)
1803 {
1804 	return skb_shinfo(head_buf);
1805 }
1806 
1807 /**
1808  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1809  * @head_buf: Network buf holding head segment (single)
1810  *
1811  * This ext_list is populated when we have Jumbo packet, for example in case of
1812  * monitor mode amsdu packet reception, and are stiched using frags_list.
1813  *
1814  * Return: Network buf list holding linked extensions from head buf.
1815  */
1816 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1817 {
1818 	return (skb_shinfo(head_buf)->frag_list);
1819 }
1820 
1821 /**
1822  * __qdf_nbuf_get_age() - return the checksum value of the skb
1823  * @skb: Pointer to network buffer
1824  *
1825  * Return: checksum value
1826  */
1827 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1828 {
1829 	return skb->csum;
1830 }
1831 
1832 /**
1833  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1834  * @skb: Pointer to network buffer
1835  * @v: Value
1836  *
1837  * Return: none
1838  */
1839 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1840 {
1841 	skb->csum = v;
1842 }
1843 
1844 /**
1845  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1846  * @skb: Pointer to network buffer
1847  * @adj: Adjustment value
1848  *
1849  * Return: none
1850  */
1851 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1852 {
1853 	skb->csum -= adj;
1854 }
1855 
1856 /**
1857  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1858  * @skb: Pointer to network buffer
1859  * @offset: Offset value
1860  * @len: Length
1861  * @to: Destination pointer
1862  *
1863  * Return: length of the copy bits for skb
1864  */
1865 static inline int32_t
1866 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1867 {
1868 	return skb_copy_bits(skb, offset, to, len);
1869 }
1870 
1871 /**
1872  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1873  * @skb: Pointer to network buffer
1874  * @len:  Packet length
1875  *
1876  * Return: none
1877  */
1878 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1879 {
1880 	if (skb->len > len) {
1881 		skb_trim(skb, len);
1882 	} else {
1883 		if (skb_tailroom(skb) < len - skb->len) {
1884 			if (unlikely(pskb_expand_head(skb, 0,
1885 				len - skb->len - skb_tailroom(skb),
1886 				GFP_ATOMIC))) {
1887 				QDF_DEBUG_PANIC(
1888 				   "SKB tailroom is lessthan requested length."
1889 				   " tail-room: %u, len: %u, skb->len: %u",
1890 				   skb_tailroom(skb), len, skb->len);
1891 				__qdf_nbuf_count_dec(skb);
1892 				dev_kfree_skb_any(skb);
1893 			}
1894 		}
1895 		skb_put(skb, (len - skb->len));
1896 	}
1897 }
1898 
1899 /**
1900  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1901  * @skb: Pointer to network buffer
1902  * @protocol: Protocol type
1903  *
1904  * Return: none
1905  */
1906 static inline void
1907 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1908 {
1909 	skb->protocol = protocol;
1910 }
1911 
1912 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1913 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1914 
1915 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1916 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1917 
1918 /**
1919  * __qdf_dmaaddr_to_32s() - return high and low parts of dma_addr
1920  * @dmaaddr: DMA address
1921  * @lo: low 32-bits of @dmaaddr
1922  * @hi: high 32-bits of @dmaaddr
1923  *
1924  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
1925  *
1926  * Return: N/A
1927  */
1928 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1929 				      uint32_t *lo, uint32_t *hi);
1930 
1931 /**
1932  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
1933  * into segments
1934  * @osdev: qdf device handle
1935  * @skb: network buffer to be segmented
1936  * @tso_info: This is the output. The information about the
1937  *           TSO segments will be populated within this.
1938  *
1939  * This function fragments a TCP jumbo packet into smaller
1940  * segments to be transmitted by the driver. It chains the TSO
1941  * segments created into a list.
1942  *
1943  * Return: number of TSO segments
1944  */
1945 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1946 				 struct qdf_tso_info_t *tso_info);
1947 
1948 /**
1949  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
1950  *
1951  * @osdev: qdf device handle
1952  * @tso_seg: TSO segment element to be unmapped
1953  * @is_last_seg: whether this is last tso seg or not
1954  *
1955  * Return: none
1956  */
1957 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1958 			  struct qdf_tso_seg_elem_t *tso_seg,
1959 			  bool is_last_seg);
1960 
1961 #ifdef FEATURE_TSO
1962 /**
1963  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1964  *                                    payload len
1965  * @skb: buffer
1966  *
1967  * Return: size
1968  */
1969 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1970 
1971 /**
1972  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
1973  *                                into segments
1974  * @skb:   network buffer to be segmented
1975  *
1976  * This function fragments a TCP jumbo packet into smaller
1977  * segments to be transmitted by the driver. It chains the TSO
1978  * segments created into a list.
1979  *
1980  * Return: number of segments
1981  */
1982 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1983 
1984 #else
1985 static inline
1986 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1987 {
1988 	return 0;
1989 }
1990 
1991 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1992 {
1993 	return 0;
1994 }
1995 
1996 #endif /* FEATURE_TSO */
1997 
1998 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1999 {
2000 	if (skb_is_gso(skb) &&
2001 		(skb_is_gso_v6(skb) ||
2002 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
2003 		return true;
2004 	else
2005 		return false;
2006 }
2007 
2008 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
2009 
2010 int __qdf_nbuf_get_users(struct sk_buff *skb);
2011 
2012 /**
2013  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
2014  *			      and get hw_classify by peeking
2015  *			      into packet
2016  * @skb:		Network buffer (skb on Linux)
2017  * @pkt_type:		Pkt type (from enum htt_pkt_type)
2018  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
2019  *			needs to be set in case of CE classification support
2020  *			Is set by this macro.
2021  * @hw_classify:	This is a flag which is set to indicate
2022  *			CE classification is enabled.
2023  *			Do not set this bit for VLAN packets
2024  *			OR for mcast / bcast frames.
2025  *
2026  * This macro parses the payload to figure out relevant Tx meta-data e.g.
2027  * whether to enable tx_classify bit in CE.
2028  *
2029  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
2030  * If protocol is less than ETH_P_802_3_MIN (0x600), then
2031  * it is the length and a 802.3 frame else it is Ethernet Type II
2032  * (RFC 894).
2033  * Bit 4 in pkt_subtype is the tx_classify bit
2034  *
2035  * Return:	void
2036  */
2037 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
2038 				pkt_subtype, hw_classify)	\
2039 do {								\
2040 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
2041 	uint16_t ether_type = ntohs(eh->h_proto);		\
2042 	bool is_mc_bc;						\
2043 								\
2044 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
2045 		   is_multicast_ether_addr((uint8_t *)eh);	\
2046 								\
2047 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
2048 		hw_classify = 1;				\
2049 		pkt_subtype = 0x01 <<				\
2050 			HTT_TX_CLASSIFY_BIT_S;			\
2051 	}							\
2052 								\
2053 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
2054 		pkt_type = htt_pkt_type_ethernet;		\
2055 								\
2056 } while (0)
2057 
2058 /*
2059  * nbuf private buffer routines
2060  */
2061 
2062 /**
2063  * __qdf_nbuf_peek_header() - return the header's addr & m_len
2064  * @skb: Pointer to network buffer
2065  * @addr: Pointer to store header's addr
2066  * @len: network buffer length
2067  *
2068  * Return: none
2069  */
2070 static inline void
2071 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
2072 {
2073 	*addr = skb->data;
2074 	*len = skb->len;
2075 }
2076 
2077 /**
2078  * typedef __qdf_nbuf_queue_t -  network buffer queue
2079  * @head: Head pointer
2080  * @tail: Tail pointer
2081  * @qlen: Queue length
2082  */
2083 typedef struct __qdf_nbuf_qhead {
2084 	struct sk_buff *head;
2085 	struct sk_buff *tail;
2086 	unsigned int qlen;
2087 } __qdf_nbuf_queue_t;
2088 
2089 /******************Functions *************/
2090 
2091 /**
2092  * __qdf_nbuf_queue_init() - initiallize the queue head
2093  * @qhead: Queue head
2094  *
2095  * Return: QDF status
2096  */
2097 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
2098 {
2099 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
2100 	return QDF_STATUS_SUCCESS;
2101 }
2102 
2103 /**
2104  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
2105  * @qhead: Queue head
2106  * @skb: Pointer to network buffer
2107  *
2108  * This is a lockless version, driver must acquire locks if it
2109  * needs to synchronize
2110  *
2111  * Return: none
2112  */
2113 static inline void
2114 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
2115 {
2116 	skb->next = NULL;       /*Nullify the next ptr */
2117 
2118 	if (!qhead->head)
2119 		qhead->head = skb;
2120 	else
2121 		qhead->tail->next = skb;
2122 
2123 	qhead->tail = skb;
2124 	qhead->qlen++;
2125 }
2126 
2127 /**
2128  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
2129  * @dest: target netbuf queue
2130  * @src:  source netbuf queue
2131  *
2132  * Return: target netbuf queue
2133  */
2134 static inline __qdf_nbuf_queue_t *
2135 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
2136 {
2137 	if (!dest)
2138 		return NULL;
2139 	else if (!src || !(src->head))
2140 		return dest;
2141 
2142 	if (!(dest->head))
2143 		dest->head = src->head;
2144 	else
2145 		dest->tail->next = src->head;
2146 
2147 	dest->tail = src->tail;
2148 	dest->qlen += src->qlen;
2149 	return dest;
2150 }
2151 
2152 /**
2153  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
2154  * @qhead: Queue head
2155  * @skb: Pointer to network buffer
2156  *
2157  * This is a lockless version, driver must acquire locks if it needs to
2158  * synchronize
2159  *
2160  * Return: none
2161  */
2162 static inline void
2163 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
2164 {
2165 	if (!qhead->head) {
2166 		/*Empty queue Tail pointer Must be updated */
2167 		qhead->tail = skb;
2168 	}
2169 	skb->next = qhead->head;
2170 	qhead->head = skb;
2171 	qhead->qlen++;
2172 }
2173 
2174 /**
2175  * __qdf_nbuf_queue_remove_last() - remove a skb from the tail of the queue
2176  * @qhead: Queue head
2177  *
2178  * This is a lockless version. Driver should take care of the locks
2179  *
2180  * Return: skb or NULL
2181  */
2182 static inline struct sk_buff *
2183 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
2184 {
2185 	__qdf_nbuf_t tmp_tail, node = NULL;
2186 
2187 	if (qhead->head) {
2188 		qhead->qlen--;
2189 		tmp_tail = qhead->tail;
2190 		node = qhead->head;
2191 		if (qhead->head == qhead->tail) {
2192 			qhead->head = NULL;
2193 			qhead->tail = NULL;
2194 			return node;
2195 		} else {
2196 			while (tmp_tail != node->next)
2197 			       node = node->next;
2198 			qhead->tail = node;
2199 			return node->next;
2200 		}
2201 	}
2202 	return node;
2203 }
2204 
2205 /**
2206  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
2207  * @qhead: Queue head
2208  *
2209  * This is a lockless version. Driver should take care of the locks
2210  *
2211  * Return: skb or NULL
2212  */
2213 static inline
2214 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
2215 {
2216 	__qdf_nbuf_t tmp = NULL;
2217 
2218 	if (qhead->head) {
2219 		qhead->qlen--;
2220 		tmp = qhead->head;
2221 		if (qhead->head == qhead->tail) {
2222 			qhead->head = NULL;
2223 			qhead->tail = NULL;
2224 		} else {
2225 			qhead->head = tmp->next;
2226 		}
2227 		tmp->next = NULL;
2228 	}
2229 	return tmp;
2230 }
2231 
2232 /**
2233  * __qdf_nbuf_queue_first() - returns the first skb in the queue
2234  * @qhead: head of queue
2235  *
2236  * Return: NULL if the queue is empty
2237  */
2238 static inline struct sk_buff *
2239 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
2240 {
2241 	return qhead->head;
2242 }
2243 
2244 /**
2245  * __qdf_nbuf_queue_last() - returns the last skb in the queue
2246  * @qhead: head of queue
2247  *
2248  * Return: NULL if the queue is empty
2249  */
2250 static inline struct sk_buff *
2251 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
2252 {
2253 	return qhead->tail;
2254 }
2255 
2256 /**
2257  * __qdf_nbuf_queue_len() - return the queue length
2258  * @qhead: Queue head
2259  *
2260  * Return: Queue length
2261  */
2262 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
2263 {
2264 	return qhead->qlen;
2265 }
2266 
2267 /**
2268  * __qdf_nbuf_queue_next() - return the next skb from packet chain
2269  * @skb: Pointer to network buffer
2270  *
2271  * This API returns the next skb from packet chain, remember the skb is
2272  * still in the queue
2273  *
2274  * Return: NULL if no packets are there
2275  */
2276 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
2277 {
2278 	return skb->next;
2279 }
2280 
2281 /**
2282  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
2283  * @qhead: Queue head
2284  *
2285  * Return: true if length is 0 else false
2286  */
2287 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
2288 {
2289 	return qhead->qlen == 0;
2290 }
2291 
2292 /*
2293  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
2294  * Because the queue head will most likely put in some structure,
2295  * we don't use pointer type as the definition.
2296  */
2297 
2298 /*
2299  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
2300  * Because the queue head will most likely put in some structure,
2301  * we don't use pointer type as the definition.
2302  */
2303 
2304 static inline void
2305 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
2306 {
2307 }
2308 
2309 /**
2310  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
2311  *        expands the headroom
2312  *        in the data region. In case of failure the skb is released.
2313  * @skb: sk buff
2314  * @headroom: size of headroom
2315  *
2316  * Return: skb or NULL
2317  */
2318 static inline struct sk_buff *
2319 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
2320 {
2321 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
2322 		__qdf_nbuf_count_dec(skb);
2323 		dev_kfree_skb_any(skb);
2324 		skb = NULL;
2325 	}
2326 	return skb;
2327 }
2328 
2329 /**
2330  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
2331  *        exapnds the tailroom
2332  *        in data region. In case of failure it releases the skb.
2333  * @skb: sk buff
2334  * @tailroom: size of tailroom
2335  *
2336  * Return: skb or NULL
2337  */
2338 static inline struct sk_buff *
2339 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
2340 {
2341 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
2342 		return skb;
2343 	/**
2344 	 * unlikely path
2345 	 */
2346 	__qdf_nbuf_count_dec(skb);
2347 	dev_kfree_skb_any(skb);
2348 	return NULL;
2349 }
2350 
2351 /**
2352  * __qdf_nbuf_linearize() - skb linearize
2353  * @skb: sk buff
2354  *
2355  * create a version of the specified nbuf whose contents
2356  * can be safely modified without affecting other
2357  * users.If the nbuf is non-linear then this function
2358  * linearize. if unable to linearize returns -ENOMEM on
2359  * success 0 is returned
2360  *
2361  * Return: 0 on Success, -ENOMEM on failure is returned.
2362  */
2363 static inline int
2364 __qdf_nbuf_linearize(struct sk_buff *skb)
2365 {
2366 	return skb_linearize(skb);
2367 }
2368 
2369 /**
2370  * __qdf_nbuf_unshare() - skb unshare
2371  * @skb: sk buff
2372  *
2373  * create a version of the specified nbuf whose contents
2374  * can be safely modified without affecting other
2375  * users.If the nbuf is a clone then this function
2376  * creates a new copy of the data. If the buffer is not
2377  * a clone the original buffer is returned.
2378  *
2379  * Return: skb or NULL
2380  */
2381 static inline struct sk_buff *
2382 __qdf_nbuf_unshare(struct sk_buff *skb)
2383 {
2384 	struct sk_buff *skb_new;
2385 
2386 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
2387 
2388 	skb_new = skb_unshare(skb, GFP_ATOMIC);
2389 	if (skb_new)
2390 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
2391 
2392 	return skb_new;
2393 }
2394 
2395 /**
2396  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
2397  * @skb: sk buff
2398  *
2399  * Return: true/false
2400  */
2401 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
2402 {
2403 	return skb_cloned(skb);
2404 }
2405 
2406 /**
2407  * __qdf_nbuf_pool_init() - init pool
2408  * @net: net handle
2409  *
2410  * Return: QDF status
2411  */
2412 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2413 {
2414 	return QDF_STATUS_SUCCESS;
2415 }
2416 
2417 /*
2418  * adf_nbuf_pool_delete() implementation - do nothing in linux
2419  */
2420 #define __qdf_nbuf_pool_delete(osdev)
2421 
2422 /**
2423  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2424  *        release the skb.
2425  * @skb: sk buff
2426  * @headroom: size of headroom
2427  * @tailroom: size of tailroom
2428  *
2429  * Return: skb or NULL
2430  */
2431 static inline struct sk_buff *
2432 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2433 {
2434 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2435 		return skb;
2436 
2437 	__qdf_nbuf_count_dec(skb);
2438 	dev_kfree_skb_any(skb);
2439 	return NULL;
2440 }
2441 
2442 /**
2443  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2444  * @buf: Network buf instance
2445  * @headroom: Additional headroom to be added
2446  * @tailroom: Additional tailroom to be added
2447  *
2448  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2449  *	or NULL if there is no memory
2450  */
2451 static inline struct sk_buff *
2452 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2453 {
2454 	struct sk_buff *copy;
2455 	copy = skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2456 	if (copy)
2457 		__qdf_nbuf_count_inc(copy);
2458 
2459 	return copy;
2460 }
2461 
2462 /**
2463  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2464  * @buf: Network buf instance
2465  *
2466  * Return: True, if buf has frag_list else return False
2467  */
2468 static inline bool
2469 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2470 {
2471 	return skb_has_frag_list(buf);
2472 }
2473 
2474 /**
2475  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2476  * @buf: Network buf instance
2477  *
2478  * Return: Network buf instance
2479  */
2480 static inline struct sk_buff *
2481 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2482 {
2483 	struct sk_buff *list;
2484 
2485 	if (!__qdf_nbuf_has_fraglist(buf))
2486 		return NULL;
2487 
2488 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2489 		;
2490 
2491 	return list;
2492 }
2493 
2494 /**
2495  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2496  * @buf: Network buf instance
2497  *
2498  * Return: void
2499  */
2500 static inline void
2501 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2502 {
2503 	struct sk_buff *list;
2504 
2505 	skb_walk_frags(buf, list)
2506 		skb_get(list);
2507 }
2508 
2509 /**
2510  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2511  * @skb: Network buffer
2512  * @hdr_off:
2513  * @where:
2514  *
2515  * Return: true/false
2516  */
2517 static inline bool
2518 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2519 			 uint8_t **where)
2520 {
2521 	qdf_assert(0);
2522 	return false;
2523 }
2524 
2525 /**
2526  * __qdf_nbuf_reset_ctxt() - mem zero control block
2527  * @nbuf: buffer
2528  *
2529  * Return: none
2530  */
2531 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2532 {
2533 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2534 }
2535 
2536 /**
2537  * __qdf_nbuf_network_header() - get network header
2538  * @buf: buffer
2539  *
2540  * Return: network header pointer
2541  */
2542 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2543 {
2544 	return skb_network_header(buf);
2545 }
2546 
2547 /**
2548  * __qdf_nbuf_transport_header() - get transport header
2549  * @buf: buffer
2550  *
2551  * Return: transport header pointer
2552  */
2553 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2554 {
2555 	return skb_transport_header(buf);
2556 }
2557 
2558 /**
2559  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2560  *  passed as part of network buffer by network stack
2561  * @skb: sk buff
2562  *
2563  * Return: TCP MSS size
2564  *
2565  */
2566 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2567 {
2568 	return skb_shinfo(skb)->gso_size;
2569 }
2570 
2571 /**
2572  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2573  * @nbuf: sk buff
2574  *
2575  * Return: none
2576  */
2577 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2578 
2579 /**
2580  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2581  * @nbuf: sk buff
2582  *
2583  * Return: void ptr
2584  */
2585 static inline void *
2586 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2587 {
2588 	return (void *)nbuf->cb;
2589 }
2590 
2591 /**
2592  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2593  * @skb: sk buff
2594  *
2595  * Return: head size
2596  */
2597 static inline size_t
2598 __qdf_nbuf_headlen(struct sk_buff *skb)
2599 {
2600 	return skb_headlen(skb);
2601 }
2602 
2603 /**
2604  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2605  * @skb: sk buff
2606  *
2607  * Return: true/false
2608  */
2609 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2610 {
2611 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2612 }
2613 
2614 /**
2615  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2616  * @skb: sk buff
2617  *
2618  * Return: true/false
2619  */
2620 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2621 {
2622 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2623 }
2624 
2625 /**
2626  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2627  * @skb: sk buff
2628  *
2629  * Return: size of l2+l3+l4 header length
2630  */
2631 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2632 {
2633 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2634 }
2635 
2636 /**
2637  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2638  * @skb: sk buff
2639  *
2640  * Return: size of TCP header length
2641  */
2642 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2643 {
2644 	return tcp_hdrlen(skb);
2645 }
2646 
2647 /**
2648  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2649  * @skb: sk buff
2650  *
2651  * Return:  true/false
2652  */
2653 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2654 {
2655 	if (skb_is_nonlinear(skb))
2656 		return true;
2657 	else
2658 		return false;
2659 }
2660 
2661 /**
2662  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2663  * @skb: sk buff
2664  *
2665  * Return: TCP sequence number
2666  */
2667 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2668 {
2669 	return ntohl(tcp_hdr(skb)->seq);
2670 }
2671 
2672 /**
2673  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2674  *@skb: sk buff
2675  *
2676  * Return: data pointer to typecast into your priv structure
2677  */
2678 static inline char *
2679 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2680 {
2681 	return &skb->cb[8];
2682 }
2683 
2684 /**
2685  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2686  * @buf: Pointer to nbuf
2687  *
2688  * Return: None
2689  */
2690 static inline void
2691 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2692 {
2693 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2694 }
2695 
2696 /**
2697  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2698  *
2699  * @skb: sk buff
2700  * @queue_id: Queue id
2701  *
2702  * Return: void
2703  */
2704 static inline void
2705 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2706 {
2707 	skb_record_rx_queue(skb, queue_id);
2708 }
2709 
2710 /**
2711  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2712  *
2713  * @skb: sk buff
2714  *
2715  * Return: Queue mapping
2716  */
2717 static inline uint16_t
2718 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2719 {
2720 	return skb->queue_mapping;
2721 }
2722 
2723 /**
2724  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2725  *
2726  * @skb: sk buff
2727  * @val: queue_id
2728  *
2729  */
2730 static inline void
2731 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2732 {
2733 	skb_set_queue_mapping(skb, val);
2734 }
2735 
2736 /**
2737  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2738  *
2739  * @skb: sk buff
2740  *
2741  * Return: void
2742  */
2743 static inline void
2744 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2745 {
2746 	__net_timestamp(skb);
2747 }
2748 
2749 /**
2750  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2751  *
2752  * @skb: sk buff
2753  *
2754  * Return: timestamp stored in skb in ms
2755  */
2756 static inline uint64_t
2757 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2758 {
2759 	return ktime_to_ms(skb_get_ktime(skb));
2760 }
2761 
2762 /**
2763  * __qdf_nbuf_get_timestamp_us() - get the timestamp for frame
2764  *
2765  * @skb: sk buff
2766  *
2767  * Return: timestamp stored in skb in us
2768  */
2769 static inline uint64_t
2770 __qdf_nbuf_get_timestamp_us(struct sk_buff *skb)
2771 {
2772 	return ktime_to_us(skb_get_ktime(skb));
2773 }
2774 
2775 /**
2776  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2777  *
2778  * @skb: sk buff
2779  *
2780  * Return: time difference in ms
2781  */
2782 static inline uint64_t
2783 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2784 {
2785 	return ktime_to_ms(net_timedelta(skb->tstamp));
2786 }
2787 
2788 /**
2789  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2790  *
2791  * @skb: sk buff
2792  *
2793  * Return: time difference in micro seconds
2794  */
2795 static inline uint64_t
2796 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2797 {
2798 	return ktime_to_us(net_timedelta(skb->tstamp));
2799 }
2800 
2801 /**
2802  * __qdf_nbuf_orphan() - orphan a nbuf
2803  * @skb: sk buff
2804  *
2805  * If a buffer currently has an owner then we call the
2806  * owner's destructor function
2807  *
2808  * Return: void
2809  */
2810 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2811 {
2812 	return skb_orphan(skb);
2813 }
2814 
2815 /**
2816  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2817  * head pointer to end pointer
2818  * @nbuf: qdf_nbuf_t
2819  *
2820  * Return: size of network buffer from head pointer to end
2821  * pointer
2822  */
2823 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2824 {
2825 	return skb_end_offset(nbuf);
2826 }
2827 
2828 /**
2829  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2830  * including the header and variable data area
2831  * @skb: sk buff
2832  *
2833  * Return: size of network buffer
2834  */
2835 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2836 {
2837 	return skb->truesize;
2838 }
2839 
2840 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2841 /**
2842  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2843  * from the total skb mem and DP tx/rx skb mem
2844  * @nbytes: number of bytes
2845  * @dir: direction
2846  * @is_mapped: is mapped or unmapped memory
2847  *
2848  * Return: none
2849  */
2850 static inline void __qdf_record_nbuf_nbytes(
2851 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2852 {
2853 	if (is_mapped) {
2854 		if (dir == QDF_DMA_TO_DEVICE) {
2855 			qdf_mem_dp_tx_skb_cnt_inc();
2856 			qdf_mem_dp_tx_skb_inc(nbytes);
2857 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2858 			qdf_mem_dp_rx_skb_cnt_inc();
2859 			qdf_mem_dp_rx_skb_inc(nbytes);
2860 		}
2861 		qdf_mem_skb_total_inc(nbytes);
2862 	} else {
2863 		if (dir == QDF_DMA_TO_DEVICE) {
2864 			qdf_mem_dp_tx_skb_cnt_dec();
2865 			qdf_mem_dp_tx_skb_dec(nbytes);
2866 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2867 			qdf_mem_dp_rx_skb_cnt_dec();
2868 			qdf_mem_dp_rx_skb_dec(nbytes);
2869 		}
2870 		qdf_mem_skb_total_dec(nbytes);
2871 	}
2872 }
2873 
2874 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2875 static inline void __qdf_record_nbuf_nbytes(
2876 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2877 {
2878 }
2879 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2880 
2881 static inline struct sk_buff *
2882 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2883 {
2884 	return skb_dequeue(skb_queue_head);
2885 }
2886 
2887 static inline
2888 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2889 {
2890 	return skb_queue_head->qlen;
2891 }
2892 
2893 static inline
2894 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2895 					struct sk_buff *skb)
2896 {
2897 	return skb_queue_tail(skb_queue_head, skb);
2898 }
2899 
2900 static inline
2901 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2902 {
2903 	return skb_queue_head_init(skb_queue_head);
2904 }
2905 
2906 static inline
2907 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2908 {
2909 	return skb_queue_purge(skb_queue_head);
2910 }
2911 
2912 static inline
2913 int __qdf_nbuf_queue_empty(__qdf_nbuf_queue_head_t *nbuf_queue_head)
2914 {
2915 	return skb_queue_empty(nbuf_queue_head);
2916 }
2917 
2918 /**
2919  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2920  * @skb_queue_head: skb list for which lock is to be acquired
2921  *
2922  * Return: void
2923  */
2924 static inline
2925 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2926 {
2927 	spin_lock_bh(&skb_queue_head->lock);
2928 }
2929 
2930 /**
2931  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2932  * @skb_queue_head: skb list for which lock is to be release
2933  *
2934  * Return: void
2935  */
2936 static inline
2937 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2938 {
2939 	spin_unlock_bh(&skb_queue_head->lock);
2940 }
2941 
2942 /**
2943  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2944  * @nbuf: qdf_nbuf_t
2945  * @idx: Index for which frag size is requested
2946  *
2947  * Return: Frag size
2948  */
2949 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2950 							   uint8_t idx)
2951 {
2952 	unsigned int size = 0;
2953 
2954 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2955 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2956 	return size;
2957 }
2958 
2959 /**
2960  * __qdf_nbuf_get_frag_addr() - Get nbuf frag address at index idx
2961  * @nbuf: qdf_nbuf_t
2962  * @idx: Index for which frag address is requested
2963  *
2964  * Return: Frag address in success, else NULL
2965  */
2966 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2967 						    uint8_t idx)
2968 {
2969 	__qdf_frag_t frag_addr = NULL;
2970 
2971 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2972 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2973 	return frag_addr;
2974 }
2975 
2976 /**
2977  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2978  * @nbuf: qdf_nbuf_t
2979  * @idx: Frag index
2980  * @size: Size by which frag_size needs to be increased/decreased
2981  *        +Ve means increase, -Ve means decrease
2982  * @truesize: truesize
2983  */
2984 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2985 						 int size,
2986 						 unsigned int truesize)
2987 {
2988 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2989 }
2990 
2991 /**
2992  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2993  *          and adjust length by size.
2994  * @nbuf: qdf_nbuf_t
2995  * @idx: Frag index
2996  * @offset: Frag page offset should be moved by offset.
2997  *      +Ve - Move offset forward.
2998  *      -Ve - Move offset backward.
2999  *
3000  * Return: QDF_STATUS
3001  */
3002 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
3003 					    int offset);
3004 
3005 /**
3006  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
3007  * @nbuf: nbuf pointer
3008  * @idx: frag idx need to be removed
3009  * @truesize: truesize of frag
3010  *
3011  * Return : void
3012  */
3013 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
3014 /**
3015  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
3016  * @buf: Frag pointer needs to be added in nbuf frag
3017  * @nbuf: qdf_nbuf_t where frag will be added
3018  * @offset: Offset in frag to be added to nbuf_frags
3019  * @frag_len: Frag length
3020  * @truesize: truesize
3021  * @take_frag_ref: Whether to take ref for frag or not
3022  *      This bool must be set as per below comdition:
3023  *      1. False: If this frag is being added in any nbuf
3024  *              for the first time after allocation.
3025  *      2. True: If frag is already attached part of any
3026  *              nbuf.
3027  *
3028  * It takes ref_count based on boolean flag take_frag_ref
3029  */
3030 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
3031 			    int offset, int frag_len,
3032 			    unsigned int truesize, bool take_frag_ref);
3033 
3034 /**
3035  * __qdf_nbuf_ref_frag() - get frag reference
3036  * @buf: Pointer to nbuf
3037  *
3038  * Return: void
3039  */
3040 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
3041 
3042 /**
3043  * __qdf_nbuf_set_mark() - Set nbuf mark
3044  * @buf: Pointer to nbuf
3045  * @mark: Value to set mark
3046  *
3047  * Return: None
3048  */
3049 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
3050 {
3051 	buf->mark = mark;
3052 }
3053 
3054 /**
3055  * __qdf_nbuf_get_mark() - Get nbuf mark
3056  * @buf: Pointer to nbuf
3057  *
3058  * Return: Value of mark
3059  */
3060 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
3061 {
3062 	return buf->mark;
3063 }
3064 
3065 /**
3066  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
3067  * the data pointer to the end pointer
3068  * @nbuf: qdf_nbuf_t
3069  *
3070  * Return: size of skb from data pointer to end pointer
3071  */
3072 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
3073 {
3074 	return (skb_end_pointer(nbuf) - nbuf->data);
3075 }
3076 
3077 /**
3078  * __qdf_nbuf_set_data_len() - Return the data_len of the nbuf
3079  * @nbuf: qdf_nbuf_t
3080  * @len: data_len to be set
3081  *
3082  * Return: value of data_len
3083  */
3084 static inline
3085 qdf_size_t __qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf, uint32_t len)
3086 {
3087 	return nbuf->data_len = len;
3088 }
3089 
3090 /**
3091  * __qdf_nbuf_get_only_data_len() - Return the data_len of the nbuf
3092  * @nbuf: qdf_nbuf_t
3093  *
3094  * Return: value of data_len
3095  */
3096 static inline qdf_size_t __qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)
3097 {
3098 	return nbuf->data_len;
3099 }
3100 
3101 /**
3102  * __qdf_nbuf_set_hash() - set the hash of the buf
3103  * @buf: Network buf instance
3104  * @len: len to be set
3105  *
3106  * Return: None
3107  */
3108 static inline void __qdf_nbuf_set_hash(__qdf_nbuf_t buf, uint32_t len)
3109 {
3110 	buf->hash = len;
3111 }
3112 
3113 /**
3114  * __qdf_nbuf_set_sw_hash() - set the sw hash of the buf
3115  * @buf: Network buf instance
3116  * @len: len to be set
3117  *
3118  * Return: None
3119  */
3120 static inline void __qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf, uint32_t len)
3121 {
3122 	buf->sw_hash = len;
3123 }
3124 
3125 /**
3126  * __qdf_nbuf_set_csum_start() - set the csum start of the buf
3127  * @buf: Network buf instance
3128  * @len: len to be set
3129  *
3130  * Return: None
3131  */
3132 static inline void __qdf_nbuf_set_csum_start(__qdf_nbuf_t buf, uint16_t len)
3133 {
3134 	buf->csum_start = len;
3135 }
3136 
3137 /**
3138  * __qdf_nbuf_set_csum_offset() - set the csum offset of the buf
3139  * @buf: Network buf instance
3140  * @len: len to be set
3141  *
3142  * Return: None
3143  */
3144 static inline void __qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf, uint16_t len)
3145 {
3146 	buf->csum_offset = len;
3147 }
3148 
3149 /**
3150  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
3151  * @skb: Pointer to network buffer
3152  *
3153  * Return: Return the number of gso segments
3154  */
3155 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
3156 {
3157 	return skb_shinfo(skb)->gso_segs;
3158 }
3159 
3160 /**
3161  * __qdf_nbuf_set_gso_segs() - set the number of gso segments
3162  * @skb: Pointer to network buffer
3163  * @val: val to be set
3164  *
3165  * Return: None
3166  */
3167 static inline void __qdf_nbuf_set_gso_segs(struct sk_buff *skb, uint16_t val)
3168 {
3169 	skb_shinfo(skb)->gso_segs = val;
3170 }
3171 
3172 /**
3173  * __qdf_nbuf_set_gso_type_udp_l4() - set the gso type to GSO UDP L4
3174  * @skb: Pointer to network buffer
3175  *
3176  * Return: None
3177  */
3178 static inline void __qdf_nbuf_set_gso_type_udp_l4(struct sk_buff *skb)
3179 {
3180 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3181 }
3182 
3183 /**
3184  * __qdf_nbuf_set_ip_summed_partial() - set the ip summed to CHECKSUM_PARTIAL
3185  * @skb: Pointer to network buffer
3186  *
3187  * Return: None
3188  */
3189 static inline void __qdf_nbuf_set_ip_summed_partial(struct sk_buff *skb)
3190 {
3191 	skb->ip_summed = CHECKSUM_PARTIAL;
3192 }
3193 
3194 /**
3195  * __qdf_nbuf_get_gso_size() - Return the number of gso size
3196  * @skb: Pointer to network buffer
3197  *
3198  * Return: Return the number of gso segments
3199  */
3200 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
3201 {
3202 	return skb_shinfo(skb)->gso_size;
3203 }
3204 
3205 /**
3206  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
3207  * @skb: Pointer to network buffer
3208  * @val: the number of GSO segments
3209  *
3210  * Return: None
3211  */
3212 static inline void
3213 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
3214 {
3215 	skb_shinfo(skb)->gso_size = val;
3216 }
3217 
3218 /**
3219  * __qdf_nbuf_kfree() - Free nbuf using kfree
3220  * @skb: Pointer to network buffer
3221  *
3222  * This function is called to free the skb on failure cases
3223  *
3224  * Return: None
3225  */
3226 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
3227 {
3228 	kfree_skb(skb);
3229 }
3230 
3231 /**
3232  * __qdf_nbuf_dev_kfree_list() - Free nbuf list using dev based os call
3233  * @nbuf_queue_head: Pointer to nbuf queue head
3234  *
3235  * This function is called to free the nbuf list on failure cases
3236  *
3237  * Return: None
3238  */
3239 void
3240 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head);
3241 
3242 /**
3243  * __qdf_nbuf_dev_queue_head() - queue a buffer using dev at the list head
3244  * @nbuf_queue_head: Pointer to skb list head
3245  * @buff: Pointer to nbuf
3246  *
3247  * This function is called to queue buffer at the skb list head
3248  *
3249  * Return: None
3250  */
3251 static inline void
3252 __qdf_nbuf_dev_queue_head(__qdf_nbuf_queue_head_t *nbuf_queue_head,
3253 			  __qdf_nbuf_t buff)
3254 {
3255 	 __skb_queue_head(nbuf_queue_head, buff);
3256 }
3257 
3258 /**
3259  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
3260  * @skb: Pointer to network buffer
3261  *
3262  * This function is called to free the skb on failure cases
3263  *
3264  * Return: None
3265  */
3266 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
3267 {
3268 	dev_kfree_skb(skb);
3269 }
3270 
3271 /**
3272  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
3273  * @skb: Network buffer
3274  *
3275  * Return: TRUE if skb pkt type is mcast
3276  *         FALSE if not
3277  */
3278 static inline
3279 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
3280 {
3281 	return skb->pkt_type == PACKET_MULTICAST;
3282 }
3283 
3284 /**
3285  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
3286  * @skb: Network buffer
3287  *
3288  * Return: TRUE if skb pkt type is mcast
3289  *         FALSE if not
3290  */
3291 static inline
3292 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
3293 {
3294 	return skb->pkt_type == PACKET_BROADCAST;
3295 }
3296 
3297 /**
3298  * __qdf_nbuf_set_dev() - set dev of network buffer
3299  * @skb: Pointer to network buffer
3300  * @dev: value to be set in dev of network buffer
3301  *
3302  * Return: void
3303  */
3304 static inline
3305 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
3306 {
3307 	skb->dev = dev;
3308 }
3309 
3310 /**
3311  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
3312  * @skb: Pointer to network buffer
3313  *
3314  * Return: dev mtu value in nbuf
3315  */
3316 static inline
3317 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
3318 {
3319 	return skb->dev->mtu;
3320 }
3321 
3322 /**
3323  * __qdf_nbuf_set_protocol_eth_type_trans() - set protocol using eth trans
3324  *                                            os API
3325  * @skb: Pointer to network buffer
3326  *
3327  * Return: None
3328  */
3329 static inline
3330 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
3331 {
3332 	skb->protocol = eth_type_trans(skb, skb->dev);
3333 }
3334 
3335 /**
3336  * __qdf_nbuf_net_timedelta() - get time delta
3337  * @t: time as __qdf_ktime_t object
3338  *
3339  * Return: time delta as ktime_t object
3340  */
3341 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
3342 {
3343 	return net_timedelta(t);
3344 }
3345 
3346 #ifdef CONFIG_NBUF_AP_PLATFORM
3347 #include <i_qdf_nbuf_w.h>
3348 #else
3349 #include <i_qdf_nbuf_m.h>
3350 #endif
3351 #endif /*_I_QDF_NET_BUF_H */
3352