xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 449758b4de7a219dad7b7a0e20ce2ea1c8388e34)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
45 /* Since commit
46  *  baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
47  *
48  * the function netif_rx() can be used in preemptible/thread context as
49  * well as in interrupt context.
50  *
51  * Use netif_rx().
52  */
53 #define netif_rx_ni(skb) netif_rx(skb)
54 #endif
55 
56 /*
57  * Use socket buffer as the underlying implementation as skbuf .
58  * Linux use sk_buff to represent both packet and data,
59  * so we use sk_buffer to represent both skbuf .
60  */
61 typedef struct sk_buff *__qdf_nbuf_t;
62 
63 /*
64  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
65  *
66  * This is used for skb queue management via linux skb buff head APIs
67  */
68 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
69 
70 /*
71  * typedef __qdf_nbuf_shared_info_t for skb_shinfo linux struct
72  *
73  * This is used for skb shared info via linux skb shinfo APIs
74  */
75 typedef struct skb_shared_info *__qdf_nbuf_shared_info_t;
76 
77 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
78 
79 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
80 
81 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
82  * max tx fragments added by the driver
83  * The driver will always add one tx fragment (the tx descriptor)
84  */
85 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
86 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
87 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
88 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
89 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
90 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
91 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
92 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
93 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
94 #define QDF_NBUF_CB_PACKET_TYPE_TCP_ACK 9
95 
96 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
97 
98 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
99 #define IEEE80211_RADIOTAP_HE 23
100 #define IEEE80211_RADIOTAP_HE_MU 24
101 #endif
102 
103 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
104 
105 #define IEEE80211_RADIOTAP_EXT1_USIG	1
106 #define IEEE80211_RADIOTAP_EXT1_EHT	2
107 
108 /* mark the first packet after wow wakeup */
109 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
110 
111 /* TCP Related MASK */
112 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
113 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
114 #define QDF_NBUF_PKT_TCPOP_RST			0x04
115 
116 /*
117  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
118  */
119 typedef union {
120 	uint64_t       u64;
121 	qdf_dma_addr_t dma_addr;
122 } qdf_paddr_t;
123 
124 typedef void (*qdf_nbuf_trace_update_t)(char *);
125 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
126 
127 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
128 
129 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
130 	(QDF_NBUF_CB_PADDR(skb) = paddr)
131 
132 #define __qdf_nbuf_frag_push_head(					\
133 	skb, frag_len, frag_vaddr, frag_paddr)				\
134 	do {					\
135 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
136 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
137 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
138 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
139 	} while (0)
140 
141 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
142 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
143 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
144 
145 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
146 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
147 
148 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
149 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
150 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
151 	 /* assume that the OS only provides a single fragment */	\
152 	 QDF_NBUF_CB_PADDR(skb))
153 
154 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
155 
156 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
157 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
158 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
159 
160 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
161 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
162 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
163 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
164 
165 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
166 	do {								\
167 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
168 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
169 		if (frag_num)						\
170 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
171 							      is_wstrm; \
172 		else					\
173 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
174 							      is_wstrm; \
175 	} while (0)
176 
177 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
178 	do { \
179 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
180 	} while (0)
181 
182 #define __qdf_nbuf_get_vdev_ctx(skb) \
183 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
184 
185 #define __qdf_nbuf_set_tx_ftype(skb, type) \
186 	do { \
187 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
188 	} while (0)
189 
190 #define __qdf_nbuf_set_vdev_xmit_type(skb, type) \
191 	do { \
192 		QDF_NBUF_CB_PKT_XMIT_TYPE((skb)) = (type); \
193 	} while (0)
194 
195 #define __qdf_nbuf_get_tx_ftype(skb) \
196 		 QDF_NBUF_CB_TX_FTYPE((skb))
197 
198 #define __qdf_nbuf_get_vdev_xmit_type(skb) \
199 		 QDF_NBUF_CB_PKT_XMIT_TYPE((skb))
200 
201 
202 #define __qdf_nbuf_set_rx_ftype(skb, type) \
203 	do { \
204 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
205 	} while (0)
206 
207 #define __qdf_nbuf_get_rx_ftype(skb) \
208 		 QDF_NBUF_CB_RX_FTYPE((skb))
209 
210 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
211 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
212 
213 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
214 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
215 
216 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
217 	do { \
218 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
219 	} while (0)
220 
221 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
222 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
223 
224 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
225 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
226 
227 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
228 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
229 
230 #define __qdf_nbuf_set_da_mcbc(skb, val) \
231 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
232 
233 #define __qdf_nbuf_is_da_mcbc(skb) \
234 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
235 
236 #define __qdf_nbuf_set_da_valid(skb, val) \
237 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
238 
239 #define __qdf_nbuf_is_da_valid(skb) \
240 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
241 
242 #define __qdf_nbuf_set_sa_valid(skb, val) \
243 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
244 
245 #define __qdf_nbuf_is_sa_valid(skb) \
246 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
247 
248 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
249 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
250 
251 #define __qdf_nbuf_is_rx_retry_flag(skb) \
252 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
253 
254 #define __qdf_nbuf_set_raw_frame(skb, val) \
255 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
256 
257 #define __qdf_nbuf_is_raw_frame(skb) \
258 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
259 
260 #define __qdf_nbuf_is_fr_ds_set(skb) \
261 	(QDF_NBUF_CB_RX_FROM_DS((skb)))
262 
263 #define __qdf_nbuf_is_to_ds_set(skb) \
264 	(QDF_NBUF_CB_RX_TO_DS((skb)))
265 
266 #define __qdf_nbuf_get_tid_val(skb) \
267 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
268 
269 #define __qdf_nbuf_set_tid_val(skb, val) \
270 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
271 
272 #define __qdf_nbuf_set_is_frag(skb, val) \
273 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
274 
275 #define __qdf_nbuf_is_frag(skb) \
276 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
277 
278 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
279 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
280 
281 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
282 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
283 
284 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
285 	do { \
286 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
287 	} while (0)
288 
289 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
290 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
291 
292 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
293 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
294 
295 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
296 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
297 
298 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
299 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
300 
301 #define __qdf_nbuf_trace_get_proto_type(skb) \
302 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
303 
304 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
305 		skb_queue_walk_safe(queue, var, tvar)
306 
307 /*
308  * prototypes. Implemented in qdf_nbuf.c
309  */
310 
311 /**
312  * __qdf_nbuf_alloc() - Allocate nbuf
313  * @osdev: Device handle
314  * @size: Netbuf requested size
315  * @reserve: headroom to start with
316  * @align: Align
317  * @prio: Priority
318  * @func: Function name of the call site
319  * @line: line number of the call site
320  *
321  * This allocates a nbuf aligns if needed and reserves some space in the front,
322  * since the reserve is done after alignment the reserve value if being
323  * unaligned will result in an unaligned address.
324  *
325  * Return: nbuf or %NULL if no memory
326  */
327 __qdf_nbuf_t
328 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
329 		 int prio, const char *func, uint32_t line);
330 
331 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
332 				     const char *func, uint32_t line);
333 
334 #if defined(QCA_DP_NBUF_FAST_PPEDS)
335 /**
336  * __qdf_nbuf_alloc_ppe_ds() - Allocates nbuf
337  * @osdev: Device handle
338  * @size: Netbuf requested size
339  * @func: Function name of the call site
340  * @line: line number of the call site
341  *
342  * This allocates an nbuf for wifi module
343  * in DS mode and uses __netdev_alloc_skb_no_skb_reset API.
344  * The netdev API invokes skb_recycler_alloc with reset_skb
345  * as false. Hence, recycler pool will not do reset_struct
346  * when it allocates DS used buffer to DS module, which will
347  * helps to improve the performance
348  *
349  * Return: nbuf or %NULL if no memory
350  */
351 
352 __qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
353 				     const char *func, uint32_t line);
354 #endif /* QCA_DP_NBUF_FAST_PPEDS */
355 
356 /**
357  * __qdf_nbuf_frag_alloc() - Allocate nbuf in page fragment way.
358  * @osdev: Device handle
359  * @size: Netbuf requested size
360  * @reserve: headroom to start with
361  * @align: Align
362  * @prio: Priority
363  * @func: Function name of the call site
364  * @line: line number of the call site
365  *
366  * This allocates a nbuf aligns if needed and reserves some space in the front,
367  * since the reserve is done after alignment the reserve value if being
368  * unaligned will result in an unaligned address.
369  * It will call into kernel page fragment APIs, long time keeping for scattered
370  * allocations should be considered for avoidance.
371  * This also brings in more probability of page frag allocation failures during
372  * low memory situation. In case of page frag allocation failure, fallback to
373  * non-frag slab allocations.
374  *
375  * Return: nbuf or %NULL if no memory
376  */
377 __qdf_nbuf_t
378 __qdf_nbuf_frag_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
379 		      int prio, const char *func, uint32_t line);
380 
381 /**
382  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
383  * @size: Size to be allocated for skb
384  * @reserve: Reserve headroom size
385  * @align: Align data
386  * @func: Function name of the call site
387  * @line: Line number of the callsite
388  *
389  * This API allocates a nbuf and aligns it if needed and reserves some headroom
390  * space after the alignment where nbuf is not allocated from skb recycler pool.
391  *
392  * Return: Allocated nbuf pointer
393  */
394 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
395 					  const char *func, uint32_t line);
396 
397 /**
398  * __qdf_nbuf_page_frag_alloc() - Allocate nbuf from @pf_cache page
399  *				  fragment cache
400  * @osdev: Device handle
401  * @size: Netbuf requested size
402  * @reserve: headroom to start with
403  * @align: Align
404  * @pf_cache: Reference to page fragment cache
405  * @func: Function name of the call site
406  * @line: line number of the call site
407  *
408  * This allocates a nbuf, aligns if needed and reserves some space in the front,
409  * since the reserve is done after alignment the reserve value if being
410  * unaligned will result in an unaligned address.
411  *
412  * It will call kernel page fragment APIs for allocation of skb->head, prefer
413  * this API for buffers that are allocated and freed only once i.e., for
414  * reusable buffers.
415  *
416  * Return: nbuf or %NULL if no memory
417  */
418 __qdf_nbuf_t
419 __qdf_nbuf_page_frag_alloc(__qdf_device_t osdev, size_t size, int reserve,
420 			   int align, __qdf_frag_cache_t *pf_cache,
421 			   const char *func, uint32_t line);
422 
423 /**
424  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
425  * @nbuf: Pointer to network buffer
426  *
427  * if GFP_ATOMIC is overkill then we can check whether its
428  * called from interrupt context and then do it or else in
429  * normal case use GFP_KERNEL
430  *
431  * example     use "in_irq() || irqs_disabled()"
432  *
433  * Return: cloned skb
434  */
435 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
436 
437 /**
438  * __qdf_nbuf_free() - free the nbuf its interrupt safe
439  * @skb: Pointer to network buffer
440  *
441  * Return: none
442  */
443 void __qdf_nbuf_free(struct sk_buff *skb);
444 
445 /**
446  * __qdf_nbuf_map() - map a buffer to local bus address space
447  * @osdev: OS device
448  * @skb: Pointer to network buffer
449  * @dir: Direction
450  *
451  * Return: QDF_STATUS
452  */
453 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
454 			struct sk_buff *skb, qdf_dma_dir_t dir);
455 
456 /**
457  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
458  * @osdev: OS device
459  * @skb: Pointer to network buffer
460  * @dir: dma direction
461  *
462  * Return: none
463  */
464 void __qdf_nbuf_unmap(__qdf_device_t osdev,
465 			struct sk_buff *skb, qdf_dma_dir_t dir);
466 
467 /**
468  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
469  * @osdev: OS device
470  * @skb: Pointer to network buffer
471  * @dir: Direction
472  *
473  * Return: QDF_STATUS
474  */
475 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
476 				 struct sk_buff *skb, qdf_dma_dir_t dir);
477 
478 /**
479  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
480  * @osdev: OS device
481  * @skb: Pointer to network buffer
482  * @dir: Direction
483  *
484  * Return: none
485  */
486 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
487 			struct sk_buff *skb, qdf_dma_dir_t dir);
488 
489 /**
490  * __qdf_nbuf_reg_trace_cb() - register trace callback
491  * @cb_func_ptr: Pointer to trace callback function
492  *
493  * Return: none
494  */
495 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
496 
497 /**
498  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
499  * @cb_func_ptr: function pointer to the nbuf free callback
500  *
501  * This function registers a callback function for nbuf free.
502  *
503  * Return: none
504  */
505 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
506 
507 /**
508  * __qdf_nbuf_dmamap_create() - create a DMA map.
509  * @osdev: qdf device handle
510  * @dmap: dma map handle
511  *
512  * This can later be used to map networking buffers. They :
513  * - need space in adf_drv's software descriptor
514  * - are typically created during adf_drv_create
515  * - need to be created before any API(qdf_nbuf_map) that uses them
516  *
517  * Return: QDF STATUS
518  */
519 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
520 
521 /**
522  * __qdf_nbuf_dmamap_destroy() - delete a dma map
523  * @osdev: qdf device handle
524  * @dmap: dma map handle
525  *
526  * Return: none
527  */
528 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
529 
530 /**
531  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
532  * @dmap: dma map
533  * @cb: callback
534  * @arg: argument
535  *
536  * Return: none
537  */
538 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
539 
540 /**
541  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
542  * @osdev: os device
543  * @skb: skb handle
544  * @dir: dma direction
545  * @nbytes: number of bytes to be mapped
546  *
547  * Return: QDF_STATUS
548  */
549 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
550 				 qdf_dma_dir_t dir, int nbytes);
551 
552 /**
553  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
554  * @osdev: OS device
555  * @skb: skb handle
556  * @dir: direction
557  * @nbytes: number of bytes
558  *
559  * Return: none
560  */
561 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
562 			     qdf_dma_dir_t dir, int nbytes);
563 
564 /**
565  * __qdf_nbuf_sync_for_cpu() - nbuf sync
566  * @osdev: os device
567  * @skb: sk buff
568  * @dir: direction
569  *
570  * Return: none
571  */
572 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
573 	qdf_dma_dir_t dir);
574 
575 /**
576  * __qdf_nbuf_dma_map_info() - return the dma map info
577  * @bmap: dma map
578  * @sg: dma map info
579  *
580  * Return: none
581  */
582 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
583 
584 /**
585  * __qdf_nbuf_get_frag_size() - get frag size
586  * @nbuf: sk buffer
587  * @cur_frag: current frag
588  *
589  * Return: frag size
590  */
591 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
592 
593 /**
594  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
595  *			specified by the index
596  * @skb: sk buff
597  * @sg: scatter/gather list of all the frags
598  *
599  * Return: none
600  */
601 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
602 
603 /**
604  * __qdf_nbuf_frag_map() - dma map frag
605  * @osdev: os device
606  * @nbuf: sk buff
607  * @offset: offset
608  * @dir: direction
609  * @cur_frag: current fragment
610  *
611  * Return: QDF status
612  */
613 QDF_STATUS __qdf_nbuf_frag_map(
614 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
615 	int offset, qdf_dma_dir_t dir, int cur_frag);
616 
617 /**
618  * qdf_nbuf_classify_pkt() - classify packet
619  * @skb: sk buff
620  *
621  * Return: none
622  */
623 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
624 
625 /**
626  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
627  * @skb: Pointer to network buffer
628  *
629  * This api is for ipv4 packet.
630  *
631  * Return: true if packet is WAPI packet
632  *	   false otherwise.
633  */
634 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
635 
636 /**
637  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
638  * @skb: Pointer to network buffer
639  *
640  * This api is for ipv4 packet.
641  *
642  * Return: true if packet is tdls packet
643  *	   false otherwise.
644  */
645 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
646 
647 /**
648  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
649  * @data: Pointer to network data
650  *
651  * This api is for Tx packets.
652  *
653  * Return: true if packet is ipv4 packet
654  *	   false otherwise
655  */
656 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
657 
658 /**
659  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
660  * @data: Pointer to IPV6 packet data buffer
661  *
662  * This func. checks whether it is a IPV6 packet or not.
663  *
664  * Return: TRUE if it is a IPV6 packet
665  *         FALSE if not
666  */
667 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
668 
669 /**
670  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
671  * @data: Pointer to IPV4 packet data buffer
672  *
673  * This func. checks whether it is a IPV4 multicast packet or not.
674  *
675  * Return: TRUE if it is a IPV4 multicast packet
676  *         FALSE if not
677  */
678 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
679 
680 /**
681  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
682  * @data: Pointer to IPV6 packet data buffer
683  *
684  * This func. checks whether it is a IPV6 multicast packet or not.
685  *
686  * Return: TRUE if it is a IPV6 multicast packet
687  *         FALSE if not
688  */
689 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
690 
691 /**
692  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
693  * @data: Pointer to IPV4 ICMP packet data buffer
694  *
695  * This func. checks whether it is a ICMP packet or not.
696  *
697  * Return: TRUE if it is a ICMP packet
698  *         FALSE if not
699  */
700 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
701 
702 /**
703  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
704  * @data: Pointer to IPV6 ICMPV6 packet data buffer
705  *
706  * This func. checks whether it is a ICMPV6 packet or not.
707  *
708  * Return: TRUE if it is a ICMPV6 packet
709  *         FALSE if not
710  */
711 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
712 
713 /**
714  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
715  * @data: Pointer to IPV4 UDP packet data buffer
716  *
717  * This func. checks whether it is a IPV4 UDP packet or not.
718  *
719  * Return: TRUE if it is a IPV4 UDP packet
720  *         FALSE if not
721  */
722 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
723 
724 /**
725  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
726  * @data: Pointer to IPV4 TCP packet data buffer
727  *
728  * This func. checks whether it is a IPV4 TCP packet or not.
729  *
730  * Return: TRUE if it is a IPV4 TCP packet
731  *         FALSE if not
732  */
733 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
734 
735 /**
736  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
737  * @data: Pointer to IPV6 UDP packet data buffer
738  *
739  * This func. checks whether it is a IPV6 UDP packet or not.
740  *
741  * Return: TRUE if it is a IPV6 UDP packet
742  *         FALSE if not
743  */
744 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
745 
746 /**
747  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
748  * @data: Pointer to IPV6 TCP packet data buffer
749  *
750  * This func. checks whether it is a IPV6 TCP packet or not.
751  *
752  * Return: TRUE if it is a IPV6 TCP packet
753  *         FALSE if not
754  */
755 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
756 
757 /**
758  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
759  * @data: Pointer to network data buffer
760  *
761  * This api is for ipv4 packet.
762  *
763  * Return: true if packet is DHCP packet
764  *	   false otherwise
765  */
766 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
767 
768 /**
769  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
770  * @data: Pointer to network data buffer
771  *
772  * This api is for ipv6 packet.
773  *
774  * Return: true if packet is DHCP packet
775  *	   false otherwise
776  */
777 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
778 
779 /**
780  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
781  * @data: Pointer to network data buffer
782  *
783  * This api is for ipv6 packet.
784  *
785  * Return: true if packet is MDNS packet
786  *	   false otherwise
787  */
788 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
789 
790 /**
791  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
792  * @data: Pointer to network data buffer
793  *
794  * This api is for ipv4 packet.
795  *
796  * Return: true if packet is EAPOL packet
797  *	   false otherwise.
798  */
799 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
800 
801 /**
802  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
803  * @data: Pointer to network data buffer
804  *
805  * This api is for ipv4 packet.
806  *
807  * Return: true if packet is igmp packet
808  *	   false otherwise.
809  */
810 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
811 
812 /**
813  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
814  * @data: Pointer to network data buffer
815  *
816  * This api is for ipv6 packet.
817  *
818  * Return: true if packet is igmp packet
819  *	   false otherwise.
820  */
821 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
822 
823 /**
824  * __qdf_nbuf_is_ipv4_igmp_leave_pkt() - check if skb is a igmp leave packet
825  * @buf: Pointer to network buffer
826  *
827  * This api is for ipv4 packet.
828  *
829  * Return: true if packet is igmp packet
830  *	   false otherwise.
831  */
832 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf);
833 
834 /**
835  * __qdf_nbuf_is_ipv6_igmp_leave_pkt() - check if skb is a igmp leave packet
836  * @buf: Pointer to network buffer
837  *
838  * This api is for ipv6 packet.
839  *
840  * Return: true if packet is igmp packet
841  *	   false otherwise.
842  */
843 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf);
844 
845 /**
846  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
847  * @data: Pointer to network data buffer
848  *
849  * This api is for ipv4 packet.
850  *
851  * Return: true if packet is ARP packet
852  *	   false otherwise.
853  */
854 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
855 
856 /**
857  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
858  * @nbuf: sk buff
859  *
860  * Return: true if packet is broadcast
861  *	   false otherwise
862  */
863 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
864 
865 /**
866  * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
867  * @nbuf: sk buff
868  *
869  * Return: true if packet is multicast replay
870  *	   false otherwise
871  */
872 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
873 
874 /**
875  * __qdf_nbuf_is_arp_local() - check if local or non local arp
876  * @skb: pointer to sk_buff
877  *
878  * Return: true if local arp or false otherwise.
879  */
880 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
881 
882 /**
883  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
884  * @data: Pointer to network data buffer
885  *
886  * This api is for ipv4 packet.
887  *
888  * Return: true if packet is ARP request
889  *	   false otherwise.
890  */
891 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
892 
893 /**
894  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
895  * @data: Pointer to network data buffer
896  *
897  * This api is for ipv4 packet.
898  *
899  * Return: true if packet is ARP response
900  *	   false otherwise.
901  */
902 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
903 
904 /**
905  * __qdf_nbuf_get_arp_src_ip() - get arp src IP
906  * @data: Pointer to network data buffer
907  *
908  * This api is for ipv4 packet.
909  *
910  * Return: ARP packet source IP value.
911  */
912 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
913 
914 /**
915  * __qdf_nbuf_get_arp_tgt_ip() - get arp target IP
916  * @data: Pointer to network data buffer
917  *
918  * This api is for ipv4 packet.
919  *
920  * Return: ARP packet target IP value.
921  */
922 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
923 
924 /**
925  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
926  * @data: Pointer to network data buffer
927  * @len: length to copy
928  *
929  * This api is for dns domain name
930  *
931  * Return: dns domain name.
932  */
933 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
934 
935 /**
936  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
937  * @data: Pointer to network data buffer
938  *
939  * This api is for dns query packet.
940  *
941  * Return: true if packet is dns query packet.
942  *	   false otherwise.
943  */
944 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
945 
946 /**
947  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
948  * @data: Pointer to network data buffer
949  *
950  * This api is for dns query response.
951  *
952  * Return: true if packet is dns response packet.
953  *	   false otherwise.
954  */
955 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
956 
957 /**
958  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
959  * @data: Pointer to network data buffer
960  *
961  * This api is to check if the packet is tcp fin.
962  *
963  * Return: true if packet is tcp fin packet.
964  *         false otherwise.
965  */
966 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
967 
968 /**
969  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
970  * @data: Pointer to network data buffer
971  *
972  * This api is to check if the tcp packet is fin ack.
973  *
974  * Return: true if packet is tcp fin ack packet.
975  *         false otherwise.
976  */
977 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
978 
979 /**
980  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
981  * @data: Pointer to network data buffer
982  *
983  * This api is for tcp syn packet.
984  *
985  * Return: true if packet is tcp syn packet.
986  *	   false otherwise.
987  */
988 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
989 
990 /**
991  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
992  * @data: Pointer to network data buffer
993  *
994  * This api is for tcp syn ack packet.
995  *
996  * Return: true if packet is tcp syn ack packet.
997  *	   false otherwise.
998  */
999 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
1000 
1001 /**
1002  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
1003  * @data: Pointer to network data buffer
1004  *
1005  * This api is to check if the tcp packet is rst.
1006  *
1007  * Return: true if packet is tcp rst packet.
1008  *         false otherwise.
1009  */
1010 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
1011 
1012 /**
1013  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1014  * @data: Pointer to network data buffer
1015  *
1016  * This api is for tcp ack packet.
1017  *
1018  * Return: true if packet is tcp ack packet.
1019  *	   false otherwise.
1020  */
1021 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
1022 
1023 /**
1024  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1025  * @data: Pointer to network data buffer
1026  *
1027  * This api is for tcp packet.
1028  *
1029  * Return: tcp source port value.
1030  */
1031 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
1032 
1033 /**
1034  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1035  * @data: Pointer to network data buffer
1036  *
1037  * This api is for tcp packet.
1038  *
1039  * Return: tcp destination port value.
1040  */
1041 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
1042 
1043 /**
1044  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1045  * @data: Pointer to network data buffer
1046  *
1047  * This api is for ipv4 req packet.
1048  *
1049  * Return: true if packet is icmpv4 request
1050  *	   false otherwise.
1051  */
1052 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
1053 
1054 /**
1055  * __qdf_nbuf_data_is_icmpv4_redirect() - check if skb data is a icmpv4 redirect
1056  * @data: Pointer to network data buffer
1057  *
1058  * This api is for ipv4 req packet.
1059  *
1060  * Return: true if packet is icmpv4 redirect
1061  *	   false otherwise.
1062  */
1063 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
1064 
1065 /**
1066  * __qdf_nbuf_data_is_icmpv6_redirect() - check if skb data is a icmpv6 redirect
1067  * @data: Pointer to network data buffer
1068  *
1069  * This api is for ipv6 req packet.
1070  *
1071  * Return: true if packet is icmpv6 redirect
1072  *	   false otherwise.
1073  */
1074 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
1075 
1076 /**
1077  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1078  * @data: Pointer to network data buffer
1079  *
1080  * This api is for ipv4 res packet.
1081  *
1082  * Return: true if packet is icmpv4 response
1083  *	   false otherwise.
1084  */
1085 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
1086 
1087 /**
1088  * __qdf_nbuf_get_icmpv4_src_ip() - get icmpv4 src IP
1089  * @data: Pointer to network data buffer
1090  *
1091  * This api is for ipv4 packet.
1092  *
1093  * Return: icmpv4 packet source IP value.
1094  */
1095 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
1096 
1097 /**
1098  * __qdf_nbuf_get_icmpv4_tgt_ip() - get icmpv4 target IP
1099  * @data: Pointer to network data buffer
1100  *
1101  * This api is for ipv4 packet.
1102  *
1103  * Return: icmpv4 packet target IP value.
1104  */
1105 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
1106 
1107 /**
1108  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1109  *              of DHCP packet.
1110  * @data: Pointer to DHCP packet data buffer
1111  *
1112  * This func. returns the subtype of DHCP packet.
1113  *
1114  * Return: subtype of the DHCP packet.
1115  */
1116 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
1117 
1118 /**
1119  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype of EAPOL packet.
1120  * @data: Pointer to EAPOL packet data buffer
1121  *
1122  * This func. returns the subtype of EAPOL packet.
1123  *
1124  * Return: subtype of the EAPOL packet.
1125  */
1126 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
1127 
1128 /**
1129  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1130  *            of ARP packet.
1131  * @data: Pointer to ARP packet data buffer
1132  *
1133  * This func. returns the subtype of ARP packet.
1134  *
1135  * Return: subtype of the ARP packet.
1136  */
1137 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
1138 
1139 /**
1140  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1141  *            of IPV4 ICMP packet.
1142  * @data: Pointer to IPV4 ICMP packet data buffer
1143  *
1144  * This func. returns the subtype of ICMP packet.
1145  *
1146  * Return: subtype of the ICMP packet.
1147  */
1148 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
1149 
1150 /**
1151  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1152  *            of IPV6 ICMPV6 packet.
1153  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1154  *
1155  * This func. returns the subtype of ICMPV6 packet.
1156  *
1157  * Return: subtype of the ICMPV6 packet.
1158  */
1159 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
1160 
1161 /**
1162  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1163  *            of IPV4 packet.
1164  * @data: Pointer to IPV4 packet data buffer
1165  *
1166  * This func. returns the proto type of IPV4 packet.
1167  *
1168  * Return: proto type of IPV4 packet.
1169  */
1170 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
1171 
1172 /**
1173  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1174  *            of IPV6 packet.
1175  * @data: Pointer to IPV6 packet data buffer
1176  *
1177  * This func. returns the proto type of IPV6 packet.
1178  *
1179  * Return: proto type of IPV6 packet.
1180  */
1181 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
1182 
1183 /**
1184  * __qdf_nbuf_data_get_ipv4_tos() - get the TOS type of IPv4 packet
1185  * @data: Pointer to skb payload
1186  *
1187  * This func. returns the TOS type of IPv4 packet.
1188  *
1189  * Return: TOS type of IPv4 packet.
1190  */
1191 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
1192 
1193 /**
1194  * __qdf_nbuf_data_get_ipv6_tc() - get the TC field
1195  *                                 of IPv6 packet.
1196  * @data: Pointer to IPv6 packet data buffer
1197  *
1198  * This func. returns the TC field of IPv6 packet.
1199  *
1200  * Return: traffic classification of IPv6 packet.
1201  */
1202 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
1203 
1204 /**
1205  * __qdf_nbuf_data_set_ipv4_tos() - set the TOS for IPv4 packet
1206  * @data: pointer to skb payload
1207  * @tos: value of TOS to be set
1208  *
1209  * This func. set the TOS field of IPv4 packet.
1210  *
1211  * Return: None
1212  */
1213 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
1214 
1215 /**
1216  * __qdf_nbuf_data_set_ipv6_tc() - set the TC field
1217  *                                 of IPv6 packet.
1218  * @data: Pointer to skb payload
1219  * @tc: value to set to IPv6 header TC field
1220  *
1221  * This func. set the TC field of IPv6 header.
1222  *
1223  * Return: None
1224  */
1225 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
1226 
1227 /**
1228  * __qdf_nbuf_is_ipv4_last_fragment() - Check if IPv4 packet is last fragment
1229  * @skb: Buffer
1230  *
1231  * This function checks IPv4 packet is last fragment or not.
1232  * Caller has to call this function for IPv4 packets only.
1233  *
1234  * Return: True if IPv4 packet is last fragment otherwise false
1235  */
1236 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
1237 
1238 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb);
1239 
1240 #ifdef QDF_NBUF_GLOBAL_COUNT
1241 /**
1242  * __qdf_nbuf_count_get() - get nbuf global count
1243  *
1244  * Return: nbuf global count
1245  */
1246 int __qdf_nbuf_count_get(void);
1247 
1248 /**
1249  * __qdf_nbuf_count_inc() - increment nbuf global count
1250  *
1251  * @nbuf: sk buff
1252  *
1253  * Return: void
1254  */
1255 void __qdf_nbuf_count_inc(struct sk_buff *nbuf);
1256 
1257 /**
1258  * __qdf_nbuf_count_dec() - decrement nbuf global count
1259  *
1260  * @nbuf: sk buff
1261  *
1262  * Return: void
1263  */
1264 void __qdf_nbuf_count_dec(struct sk_buff *nbuf);
1265 
1266 /**
1267  * __qdf_nbuf_mod_init() - Initialization routine for qdf_nbuf
1268  *
1269  * Return void
1270  */
1271 void __qdf_nbuf_mod_init(void);
1272 
1273 /**
1274  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nbuf
1275  *
1276  * Return void
1277  */
1278 void __qdf_nbuf_mod_exit(void);
1279 
1280 #else
1281 
1282 static inline int __qdf_nbuf_count_get(void)
1283 {
1284 	return 0;
1285 }
1286 
1287 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
1288 {
1289 	return;
1290 }
1291 
1292 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
1293 {
1294 	return;
1295 }
1296 
1297 static inline void __qdf_nbuf_mod_init(void)
1298 {
1299 	return;
1300 }
1301 
1302 static inline void __qdf_nbuf_mod_exit(void)
1303 {
1304 	return;
1305 }
1306 #endif
1307 
1308 /**
1309  * __qdf_to_status() - OS to QDF status conversion
1310  * @error : OS error
1311  *
1312  * Return: QDF status
1313  */
1314 static inline QDF_STATUS __qdf_to_status(signed int error)
1315 {
1316 	switch (error) {
1317 	case 0:
1318 		return QDF_STATUS_SUCCESS;
1319 	case ENOMEM:
1320 	case -ENOMEM:
1321 		return QDF_STATUS_E_NOMEM;
1322 	default:
1323 		return QDF_STATUS_E_NOSUPPORT;
1324 	}
1325 }
1326 
1327 /**
1328  * __qdf_nbuf_cat() - link two nbufs
1329  * @dst: Buffer to piggyback into
1330  * @src: Buffer to put
1331  *
1332  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
1333  * It is callers responsibility to free the src skb.
1334  *
1335  * Return: QDF_STATUS (status of the call) if failed the src skb
1336  *         is released
1337  */
1338 static inline QDF_STATUS
1339 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1340 {
1341 	QDF_STATUS error = 0;
1342 
1343 	qdf_assert(dst && src);
1344 
1345 	/*
1346 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1347 	 * buffer, first check whether the current buffer is already large
1348 	 * enough.
1349 	 */
1350 	if (skb_tailroom(dst) < src->len) {
1351 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1352 		if (error)
1353 			return __qdf_to_status(error);
1354 	}
1355 
1356 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1357 	skb_put(dst, src->len);
1358 	return __qdf_to_status(error);
1359 }
1360 
1361 /*
1362  * nbuf manipulation routines
1363  */
1364 /**
1365  * __qdf_nbuf_headroom() - return the amount of tail space available
1366  * @skb: Pointer to network buffer
1367  *
1368  * Return: amount of tail room
1369  */
1370 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1371 {
1372 	return skb_headroom(skb);
1373 }
1374 
1375 /**
1376  * __qdf_nbuf_tailroom() - return the amount of tail space available
1377  * @skb: Pointer to network buffer
1378  *
1379  * Return: amount of tail room
1380  */
1381 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1382 {
1383 	return skb_tailroom(skb);
1384 }
1385 
1386 /**
1387  * __qdf_nbuf_put_tail() - Puts data in the end
1388  * @skb: Pointer to network buffer
1389  * @size: size to be pushed
1390  *
1391  * Return: data pointer of this buf where new data has to be
1392  *         put, or NULL if there is not enough room in this buf.
1393  */
1394 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1395 {
1396 	if (skb_tailroom(skb) < size) {
1397 		if (unlikely(pskb_expand_head(skb, 0,
1398 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1399 			__qdf_nbuf_count_dec(skb);
1400 			dev_kfree_skb_any(skb);
1401 			return NULL;
1402 		}
1403 	}
1404 	return skb_put(skb, size);
1405 }
1406 
1407 /**
1408  * __qdf_nbuf_trim_tail() - trim data out from the end
1409  * @skb: Pointer to network buffer
1410  * @size: size to be popped
1411  *
1412  * Return: none
1413  */
1414 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1415 {
1416 	return skb_trim(skb, skb->len - size);
1417 }
1418 
1419 
1420 /*
1421  * prototypes. Implemented in qdf_nbuf.c
1422  */
1423 
1424 /**
1425  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1426  * @skb: Pointer to network buffer
1427  *
1428  * Return: TX checksum value
1429  */
1430 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1431 
1432 /**
1433  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1434  * @skb: Pointer to network buffer
1435  * @cksum: Pointer to checksum value
1436  *
1437  * Return: QDF_STATUS
1438  */
1439 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1440 				   qdf_nbuf_rx_cksum_t *cksum);
1441 
1442 /**
1443  * __qdf_nbuf_get_tid() - get tid
1444  * @skb: Pointer to network buffer
1445  *
1446  * Return: tid
1447  */
1448 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1449 
1450 /**
1451  * __qdf_nbuf_set_tid() - set tid
1452  * @skb: Pointer to network buffer
1453  * @tid: TID value to set
1454  *
1455  * Return: none
1456  */
1457 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1458 
1459 /**
1460  * __qdf_nbuf_get_exemption_type() - get exemption type
1461  * @skb: Pointer to network buffer
1462  *
1463  * Return: exemption type
1464  */
1465 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1466 
1467 /**
1468  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
1469  * @skb: sk_buff handle
1470  *
1471  * Return: none
1472  */
1473 
1474 void __qdf_nbuf_ref(struct sk_buff *skb);
1475 
1476 /**
1477  * __qdf_nbuf_shared() - Check whether the buffer is shared
1478  *  @skb: sk_buff buffer
1479  *
1480  *  Return: true if more than one person has a reference to this buffer.
1481  */
1482 int __qdf_nbuf_shared(struct sk_buff *skb);
1483 
1484 /**
1485  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1486  * @skb: sk buff
1487  *
1488  * Return: number of fragments
1489  */
1490 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1491 {
1492 	return skb_shinfo(skb)->nr_frags;
1493 }
1494 
1495 /**
1496  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1497  * @skb: sk buff
1498  *
1499  * This API returns a total number of fragments from the fraglist
1500  * Return: total number of fragments
1501  */
1502 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1503 {
1504 	uint32_t num_frag = 0;
1505 	struct sk_buff *list = NULL;
1506 
1507 	num_frag = skb_shinfo(skb)->nr_frags;
1508 	skb_walk_frags(skb, list)
1509 		num_frag += skb_shinfo(list)->nr_frags;
1510 
1511 	return num_frag;
1512 }
1513 
1514 /*
1515  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1516  */
1517 #define __qdf_nbuf_pool_delete(osdev)
1518 
1519 /**
1520  * __qdf_nbuf_copy() - returns a private copy of the skb
1521  * @skb: Pointer to network buffer
1522  *
1523  * This API returns a private copy of the skb, the skb returned is completely
1524  *  modifiable by callers
1525  *
1526  * Return: skb or NULL
1527  */
1528 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1529 {
1530 	struct sk_buff *skb_new = NULL;
1531 
1532 	skb_new = skb_copy(skb, GFP_ATOMIC);
1533 	if (skb_new) {
1534 		__qdf_nbuf_count_inc(skb_new);
1535 	}
1536 	return skb_new;
1537 }
1538 
1539 #define __qdf_nbuf_reserve      skb_reserve
1540 
1541 /**
1542  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1543  * @skb: Pointer to network buffer
1544  * @data: data pointer
1545  *
1546  * Return: none
1547  */
1548 static inline void
1549 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1550 {
1551 	skb->data = data;
1552 }
1553 
1554 /**
1555  * __qdf_nbuf_set_len() - set buffer data length
1556  * @skb: Pointer to network buffer
1557  * @len: data length
1558  *
1559  * Return: none
1560  */
1561 static inline void
1562 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1563 {
1564 	skb->len = len;
1565 }
1566 
1567 /**
1568  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1569  * @skb: Pointer to network buffer
1570  * @len: skb data length
1571  *
1572  * Return: none
1573  */
1574 static inline void
1575 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1576 {
1577 	skb_set_tail_pointer(skb, len);
1578 }
1579 
1580 /**
1581  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1582  * @skb: Pointer to network buffer
1583  * @list: list to use
1584  *
1585  * This is a lockless version, driver must acquire locks if it
1586  * needs to synchronize
1587  *
1588  * Return: none
1589  */
1590 static inline void
1591 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1592 {
1593 	__skb_unlink(skb, list);
1594 }
1595 
1596 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1597 /**
1598  * __qdf_nbuf_is_dev_scratch_supported() - dev_scratch support for network
1599  *                                         buffer in kernel
1600  *
1601  * Return: true if dev_scratch is supported
1602  *         false if dev_scratch is not supported
1603  */
1604 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1605 {
1606 	return true;
1607 }
1608 
1609 /**
1610  * __qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1611  * @skb: Pointer to network buffer
1612  *
1613  * Return: dev_scratch if dev_scratch supported
1614  *         0 if dev_scratch not supported
1615  */
1616 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1617 {
1618 	return skb->dev_scratch;
1619 }
1620 
1621 /**
1622  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1623  * @skb: Pointer to network buffer
1624  * @value: value to be set in dev_scratch of network buffer
1625  *
1626  * Return: void
1627  */
1628 static inline void
1629 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1630 {
1631 	skb->dev_scratch = value;
1632 }
1633 #else
1634 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1635 {
1636 	return false;
1637 }
1638 
1639 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1640 {
1641 	return 0;
1642 }
1643 
1644 static inline void
1645 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1646 {
1647 }
1648 #endif /* KERNEL_VERSION(4, 14, 0) */
1649 
1650 /**
1651  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1652  * @skb: Pointer to network buffer
1653  *
1654  * Return: Pointer to head buffer
1655  */
1656 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1657 {
1658 	return skb->head;
1659 }
1660 
1661 /**
1662  * __qdf_nbuf_data() - return the pointer to data header in the skb
1663  * @skb: Pointer to network buffer
1664  *
1665  * Return: Pointer to skb data
1666  */
1667 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1668 {
1669 	return skb->data;
1670 }
1671 
1672 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1673 {
1674 	return (uint8_t *)&skb->data;
1675 }
1676 
1677 /**
1678  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1679  * @skb: Pointer to network buffer
1680  *
1681  * Return: skb protocol
1682  */
1683 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1684 {
1685 	return skb->protocol;
1686 }
1687 
1688 /**
1689  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1690  * @skb: Pointer to network buffer
1691  *
1692  * Return: skb ip_summed
1693  */
1694 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1695 {
1696 	return skb->ip_summed;
1697 }
1698 
1699 /**
1700  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1701  * @skb: Pointer to network buffer
1702  * @ip_summed: ip checksum
1703  *
1704  * Return: none
1705  */
1706 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1707 		 uint8_t ip_summed)
1708 {
1709 	skb->ip_summed = ip_summed;
1710 }
1711 
1712 /**
1713  * __qdf_nbuf_get_priority() - return the priority value of the skb
1714  * @skb: Pointer to network buffer
1715  *
1716  * Return: skb priority
1717  */
1718 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1719 {
1720 	return skb->priority;
1721 }
1722 
1723 /**
1724  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1725  * @skb: Pointer to network buffer
1726  * @p: priority
1727  *
1728  * Return: none
1729  */
1730 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1731 {
1732 	skb->priority = p;
1733 }
1734 
1735 /**
1736  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1737  * @skb: Current skb
1738  * @skb_next: Next skb
1739  *
1740  * Return: void
1741  */
1742 static inline void
1743 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1744 {
1745 	skb->next = skb_next;
1746 }
1747 
1748 /**
1749  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1750  * @skb: Current skb
1751  *
1752  * Return: the next skb pointed to by the current skb
1753  */
1754 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1755 {
1756 	return skb->next;
1757 }
1758 
1759 /**
1760  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1761  * @skb: Current skb
1762  * @skb_next: Next skb
1763  *
1764  * This fn is used to link up extensions to the head skb. Does not handle
1765  * linking to the head
1766  *
1767  * Return: none
1768  */
1769 static inline void
1770 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1771 {
1772 	skb->next = skb_next;
1773 }
1774 
1775 /**
1776  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1777  * @skb: Current skb
1778  *
1779  * Return: the next skb pointed to by the current skb
1780  */
1781 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1782 {
1783 	return skb->next;
1784 }
1785 
1786 /**
1787  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1788  * @skb_head: head_buf nbuf holding head segment (single)
1789  * @ext_list: nbuf list holding linked extensions to the head
1790  * @ext_len: Total length of all buffers in the extension list
1791  *
1792  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1793  * to the nbuf holding the head segment (seg0)
1794  *
1795  * Return: none
1796  */
1797 static inline void
1798 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1799 			struct sk_buff *ext_list, size_t ext_len)
1800 {
1801 	skb_shinfo(skb_head)->frag_list = ext_list;
1802 	skb_head->data_len += ext_len;
1803 	skb_head->len += ext_len;
1804 }
1805 
1806 /**
1807  * __qdf_nbuf_get_shinfo() - return the shared info of the skb
1808  * @head_buf: Pointer to network buffer
1809  *
1810  * Return: skb shared info from head buf
1811  */
1812 static inline
1813 struct skb_shared_info *__qdf_nbuf_get_shinfo(struct sk_buff *head_buf)
1814 {
1815 	return skb_shinfo(head_buf);
1816 }
1817 
1818 /**
1819  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1820  * @head_buf: Network buf holding head segment (single)
1821  *
1822  * This ext_list is populated when we have Jumbo packet, for example in case of
1823  * monitor mode amsdu packet reception, and are stiched using frags_list.
1824  *
1825  * Return: Network buf list holding linked extensions from head buf.
1826  */
1827 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1828 {
1829 	return (skb_shinfo(head_buf)->frag_list);
1830 }
1831 
1832 /**
1833  * __qdf_nbuf_get_age() - return the checksum value of the skb
1834  * @skb: Pointer to network buffer
1835  *
1836  * Return: checksum value
1837  */
1838 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1839 {
1840 	return skb->csum;
1841 }
1842 
1843 /**
1844  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1845  * @skb: Pointer to network buffer
1846  * @v: Value
1847  *
1848  * Return: none
1849  */
1850 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1851 {
1852 	skb->csum = v;
1853 }
1854 
1855 /**
1856  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1857  * @skb: Pointer to network buffer
1858  * @adj: Adjustment value
1859  *
1860  * Return: none
1861  */
1862 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1863 {
1864 	skb->csum -= adj;
1865 }
1866 
1867 /**
1868  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1869  * @skb: Pointer to network buffer
1870  * @offset: Offset value
1871  * @len: Length
1872  * @to: Destination pointer
1873  *
1874  * Return: length of the copy bits for skb
1875  */
1876 static inline int32_t
1877 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1878 {
1879 	return skb_copy_bits(skb, offset, to, len);
1880 }
1881 
1882 /**
1883  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1884  * @skb: Pointer to network buffer
1885  * @len:  Packet length
1886  *
1887  * Return: none
1888  */
1889 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1890 {
1891 	if (skb->len > len) {
1892 		skb_trim(skb, len);
1893 	} else {
1894 		if (skb_tailroom(skb) < len - skb->len) {
1895 			if (unlikely(pskb_expand_head(skb, 0,
1896 				len - skb->len - skb_tailroom(skb),
1897 				GFP_ATOMIC))) {
1898 				QDF_DEBUG_PANIC(
1899 				   "SKB tailroom is lessthan requested length."
1900 				   " tail-room: %u, len: %u, skb->len: %u",
1901 				   skb_tailroom(skb), len, skb->len);
1902 				__qdf_nbuf_count_dec(skb);
1903 				dev_kfree_skb_any(skb);
1904 			}
1905 		}
1906 		skb_put(skb, (len - skb->len));
1907 	}
1908 }
1909 
1910 /**
1911  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1912  * @skb: Pointer to network buffer
1913  * @protocol: Protocol type
1914  *
1915  * Return: none
1916  */
1917 static inline void
1918 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1919 {
1920 	skb->protocol = protocol;
1921 }
1922 
1923 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1924 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1925 
1926 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1927 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1928 
1929 /**
1930  * __qdf_dmaaddr_to_32s() - return high and low parts of dma_addr
1931  * @dmaaddr: DMA address
1932  * @lo: low 32-bits of @dmaaddr
1933  * @hi: high 32-bits of @dmaaddr
1934  *
1935  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
1936  *
1937  * Return: N/A
1938  */
1939 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1940 				      uint32_t *lo, uint32_t *hi);
1941 
1942 /**
1943  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
1944  * into segments
1945  * @osdev: qdf device handle
1946  * @skb: network buffer to be segmented
1947  * @tso_info: This is the output. The information about the
1948  *           TSO segments will be populated within this.
1949  *
1950  * This function fragments a TCP jumbo packet into smaller
1951  * segments to be transmitted by the driver. It chains the TSO
1952  * segments created into a list.
1953  *
1954  * Return: number of TSO segments
1955  */
1956 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1957 				 struct qdf_tso_info_t *tso_info);
1958 
1959 /**
1960  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
1961  *
1962  * @osdev: qdf device handle
1963  * @tso_seg: TSO segment element to be unmapped
1964  * @is_last_seg: whether this is last tso seg or not
1965  *
1966  * Return: none
1967  */
1968 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1969 			  struct qdf_tso_seg_elem_t *tso_seg,
1970 			  bool is_last_seg);
1971 
1972 #ifdef FEATURE_TSO
1973 /**
1974  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1975  *                                    payload len
1976  * @skb: buffer
1977  *
1978  * Return: size
1979  */
1980 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1981 
1982 /**
1983  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
1984  *                                into segments
1985  * @skb:   network buffer to be segmented
1986  *
1987  * This function fragments a TCP jumbo packet into smaller
1988  * segments to be transmitted by the driver. It chains the TSO
1989  * segments created into a list.
1990  *
1991  * Return: number of segments
1992  */
1993 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1994 
1995 #else
1996 static inline
1997 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1998 {
1999 	return 0;
2000 }
2001 
2002 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
2003 {
2004 	return 0;
2005 }
2006 
2007 #endif /* FEATURE_TSO */
2008 
2009 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
2010 {
2011 	if (skb_is_gso(skb) &&
2012 		(skb_is_gso_v6(skb) ||
2013 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
2014 		return true;
2015 	else
2016 		return false;
2017 }
2018 
2019 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
2020 
2021 int __qdf_nbuf_get_users(struct sk_buff *skb);
2022 
2023 /**
2024  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
2025  *			      and get hw_classify by peeking
2026  *			      into packet
2027  * @skb:		Network buffer (skb on Linux)
2028  * @pkt_type:		Pkt type (from enum htt_pkt_type)
2029  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
2030  *			needs to be set in case of CE classification support
2031  *			Is set by this macro.
2032  * @hw_classify:	This is a flag which is set to indicate
2033  *			CE classification is enabled.
2034  *			Do not set this bit for VLAN packets
2035  *			OR for mcast / bcast frames.
2036  *
2037  * This macro parses the payload to figure out relevant Tx meta-data e.g.
2038  * whether to enable tx_classify bit in CE.
2039  *
2040  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
2041  * If protocol is less than ETH_P_802_3_MIN (0x600), then
2042  * it is the length and a 802.3 frame else it is Ethernet Type II
2043  * (RFC 894).
2044  * Bit 4 in pkt_subtype is the tx_classify bit
2045  *
2046  * Return:	void
2047  */
2048 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
2049 				pkt_subtype, hw_classify)	\
2050 do {								\
2051 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
2052 	uint16_t ether_type = ntohs(eh->h_proto);		\
2053 	bool is_mc_bc;						\
2054 								\
2055 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
2056 		   is_multicast_ether_addr((uint8_t *)eh);	\
2057 								\
2058 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
2059 		hw_classify = 1;				\
2060 		pkt_subtype = 0x01 <<				\
2061 			HTT_TX_CLASSIFY_BIT_S;			\
2062 	}							\
2063 								\
2064 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
2065 		pkt_type = htt_pkt_type_ethernet;		\
2066 								\
2067 } while (0)
2068 
2069 /*
2070  * nbuf private buffer routines
2071  */
2072 
2073 /**
2074  * __qdf_nbuf_peek_header() - return the header's addr & m_len
2075  * @skb: Pointer to network buffer
2076  * @addr: Pointer to store header's addr
2077  * @len: network buffer length
2078  *
2079  * Return: none
2080  */
2081 static inline void
2082 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
2083 {
2084 	*addr = skb->data;
2085 	*len = skb->len;
2086 }
2087 
2088 /**
2089  * typedef __qdf_nbuf_queue_t -  network buffer queue
2090  * @head: Head pointer
2091  * @tail: Tail pointer
2092  * @qlen: Queue length
2093  */
2094 typedef struct __qdf_nbuf_qhead {
2095 	struct sk_buff *head;
2096 	struct sk_buff *tail;
2097 	unsigned int qlen;
2098 } __qdf_nbuf_queue_t;
2099 
2100 /******************Functions *************/
2101 
2102 /**
2103  * __qdf_nbuf_queue_init() - initiallize the queue head
2104  * @qhead: Queue head
2105  *
2106  * Return: QDF status
2107  */
2108 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
2109 {
2110 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
2111 	return QDF_STATUS_SUCCESS;
2112 }
2113 
2114 /**
2115  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
2116  * @qhead: Queue head
2117  * @skb: Pointer to network buffer
2118  *
2119  * This is a lockless version, driver must acquire locks if it
2120  * needs to synchronize
2121  *
2122  * Return: none
2123  */
2124 static inline void
2125 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
2126 {
2127 	skb->next = NULL;       /*Nullify the next ptr */
2128 
2129 	if (!qhead->head)
2130 		qhead->head = skb;
2131 	else
2132 		qhead->tail->next = skb;
2133 
2134 	qhead->tail = skb;
2135 	qhead->qlen++;
2136 }
2137 
2138 /**
2139  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
2140  * @dest: target netbuf queue
2141  * @src:  source netbuf queue
2142  *
2143  * Return: target netbuf queue
2144  */
2145 static inline __qdf_nbuf_queue_t *
2146 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
2147 {
2148 	if (!dest)
2149 		return NULL;
2150 	else if (!src || !(src->head))
2151 		return dest;
2152 
2153 	if (!(dest->head))
2154 		dest->head = src->head;
2155 	else
2156 		dest->tail->next = src->head;
2157 
2158 	dest->tail = src->tail;
2159 	dest->qlen += src->qlen;
2160 	return dest;
2161 }
2162 
2163 /**
2164  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
2165  * @qhead: Queue head
2166  * @skb: Pointer to network buffer
2167  *
2168  * This is a lockless version, driver must acquire locks if it needs to
2169  * synchronize
2170  *
2171  * Return: none
2172  */
2173 static inline void
2174 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
2175 {
2176 	if (!qhead->head) {
2177 		/*Empty queue Tail pointer Must be updated */
2178 		qhead->tail = skb;
2179 	}
2180 	skb->next = qhead->head;
2181 	qhead->head = skb;
2182 	qhead->qlen++;
2183 }
2184 
2185 /**
2186  * __qdf_nbuf_queue_remove_last() - remove a skb from the tail of the queue
2187  * @qhead: Queue head
2188  *
2189  * This is a lockless version. Driver should take care of the locks
2190  *
2191  * Return: skb or NULL
2192  */
2193 static inline struct sk_buff *
2194 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
2195 {
2196 	__qdf_nbuf_t tmp_tail, node = NULL;
2197 
2198 	if (qhead->head) {
2199 		qhead->qlen--;
2200 		tmp_tail = qhead->tail;
2201 		node = qhead->head;
2202 		if (qhead->head == qhead->tail) {
2203 			qhead->head = NULL;
2204 			qhead->tail = NULL;
2205 			return node;
2206 		} else {
2207 			while (tmp_tail != node->next)
2208 			       node = node->next;
2209 			qhead->tail = node;
2210 			return node->next;
2211 		}
2212 	}
2213 	return node;
2214 }
2215 
2216 /**
2217  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
2218  * @qhead: Queue head
2219  *
2220  * This is a lockless version. Driver should take care of the locks
2221  *
2222  * Return: skb or NULL
2223  */
2224 static inline
2225 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
2226 {
2227 	__qdf_nbuf_t tmp = NULL;
2228 
2229 	if (qhead->head) {
2230 		qhead->qlen--;
2231 		tmp = qhead->head;
2232 		if (qhead->head == qhead->tail) {
2233 			qhead->head = NULL;
2234 			qhead->tail = NULL;
2235 		} else {
2236 			qhead->head = tmp->next;
2237 		}
2238 		tmp->next = NULL;
2239 	}
2240 	return tmp;
2241 }
2242 
2243 /**
2244  * __qdf_nbuf_queue_first() - returns the first skb in the queue
2245  * @qhead: head of queue
2246  *
2247  * Return: NULL if the queue is empty
2248  */
2249 static inline struct sk_buff *
2250 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
2251 {
2252 	return qhead->head;
2253 }
2254 
2255 /**
2256  * __qdf_nbuf_queue_last() - returns the last skb in the queue
2257  * @qhead: head of queue
2258  *
2259  * Return: NULL if the queue is empty
2260  */
2261 static inline struct sk_buff *
2262 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
2263 {
2264 	return qhead->tail;
2265 }
2266 
2267 /**
2268  * __qdf_nbuf_queue_len() - return the queue length
2269  * @qhead: Queue head
2270  *
2271  * Return: Queue length
2272  */
2273 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
2274 {
2275 	return qhead->qlen;
2276 }
2277 
2278 /**
2279  * __qdf_nbuf_queue_next() - return the next skb from packet chain
2280  * @skb: Pointer to network buffer
2281  *
2282  * This API returns the next skb from packet chain, remember the skb is
2283  * still in the queue
2284  *
2285  * Return: NULL if no packets are there
2286  */
2287 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
2288 {
2289 	return skb->next;
2290 }
2291 
2292 /**
2293  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
2294  * @qhead: Queue head
2295  *
2296  * Return: true if length is 0 else false
2297  */
2298 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
2299 {
2300 	return qhead->qlen == 0;
2301 }
2302 
2303 /*
2304  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
2305  * Because the queue head will most likely put in some structure,
2306  * we don't use pointer type as the definition.
2307  */
2308 
2309 /*
2310  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
2311  * Because the queue head will most likely put in some structure,
2312  * we don't use pointer type as the definition.
2313  */
2314 
2315 static inline void
2316 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
2317 {
2318 }
2319 
2320 /**
2321  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
2322  *        expands the headroom
2323  *        in the data region. In case of failure the skb is released.
2324  * @skb: sk buff
2325  * @headroom: size of headroom
2326  *
2327  * Return: skb or NULL
2328  */
2329 static inline struct sk_buff *
2330 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
2331 {
2332 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
2333 		__qdf_nbuf_count_dec(skb);
2334 		dev_kfree_skb_any(skb);
2335 		skb = NULL;
2336 	}
2337 	return skb;
2338 }
2339 
2340 /**
2341  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
2342  *        exapnds the tailroom
2343  *        in data region. In case of failure it releases the skb.
2344  * @skb: sk buff
2345  * @tailroom: size of tailroom
2346  *
2347  * Return: skb or NULL
2348  */
2349 static inline struct sk_buff *
2350 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
2351 {
2352 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
2353 		return skb;
2354 	/**
2355 	 * unlikely path
2356 	 */
2357 	__qdf_nbuf_count_dec(skb);
2358 	dev_kfree_skb_any(skb);
2359 	return NULL;
2360 }
2361 
2362 /**
2363  * __qdf_nbuf_linearize() - skb linearize
2364  * @skb: sk buff
2365  *
2366  * create a version of the specified nbuf whose contents
2367  * can be safely modified without affecting other
2368  * users.If the nbuf is non-linear then this function
2369  * linearize. if unable to linearize returns -ENOMEM on
2370  * success 0 is returned
2371  *
2372  * Return: 0 on Success, -ENOMEM on failure is returned.
2373  */
2374 static inline int
2375 __qdf_nbuf_linearize(struct sk_buff *skb)
2376 {
2377 	return skb_linearize(skb);
2378 }
2379 
2380 /**
2381  * __qdf_nbuf_unshare() - skb unshare
2382  * @skb: sk buff
2383  *
2384  * create a version of the specified nbuf whose contents
2385  * can be safely modified without affecting other
2386  * users.If the nbuf is a clone then this function
2387  * creates a new copy of the data. If the buffer is not
2388  * a clone the original buffer is returned.
2389  *
2390  * Return: skb or NULL
2391  */
2392 static inline struct sk_buff *
2393 __qdf_nbuf_unshare(struct sk_buff *skb)
2394 {
2395 	struct sk_buff *skb_new;
2396 
2397 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
2398 
2399 	skb_new = skb_unshare(skb, GFP_ATOMIC);
2400 	if (skb_new)
2401 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
2402 
2403 	return skb_new;
2404 }
2405 
2406 /**
2407  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
2408  * @skb: sk buff
2409  *
2410  * Return: true/false
2411  */
2412 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
2413 {
2414 	return skb_cloned(skb);
2415 }
2416 
2417 /**
2418  * __qdf_nbuf_pool_init() - init pool
2419  * @net: net handle
2420  *
2421  * Return: QDF status
2422  */
2423 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2424 {
2425 	return QDF_STATUS_SUCCESS;
2426 }
2427 
2428 /*
2429  * adf_nbuf_pool_delete() implementation - do nothing in linux
2430  */
2431 #define __qdf_nbuf_pool_delete(osdev)
2432 
2433 /**
2434  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2435  *        release the skb.
2436  * @skb: sk buff
2437  * @headroom: size of headroom
2438  * @tailroom: size of tailroom
2439  *
2440  * Return: skb or NULL
2441  */
2442 static inline struct sk_buff *
2443 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2444 {
2445 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2446 		return skb;
2447 
2448 	__qdf_nbuf_count_dec(skb);
2449 	dev_kfree_skb_any(skb);
2450 	return NULL;
2451 }
2452 
2453 /**
2454  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2455  * @buf: Network buf instance
2456  * @headroom: Additional headroom to be added
2457  * @tailroom: Additional tailroom to be added
2458  *
2459  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2460  *	or NULL if there is no memory
2461  */
2462 static inline struct sk_buff *
2463 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2464 {
2465 	struct sk_buff *copy;
2466 	copy = skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2467 	if (copy)
2468 		__qdf_nbuf_count_inc(copy);
2469 
2470 	return copy;
2471 }
2472 
2473 /**
2474  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2475  * @buf: Network buf instance
2476  *
2477  * Return: True, if buf has frag_list else return False
2478  */
2479 static inline bool
2480 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2481 {
2482 	return skb_has_frag_list(buf);
2483 }
2484 
2485 /**
2486  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2487  * @buf: Network buf instance
2488  *
2489  * Return: Network buf instance
2490  */
2491 static inline struct sk_buff *
2492 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2493 {
2494 	struct sk_buff *list;
2495 
2496 	if (!__qdf_nbuf_has_fraglist(buf))
2497 		return NULL;
2498 
2499 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2500 		;
2501 
2502 	return list;
2503 }
2504 
2505 /**
2506  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2507  * @buf: Network buf instance
2508  *
2509  * Return: void
2510  */
2511 static inline void
2512 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2513 {
2514 	struct sk_buff *list;
2515 
2516 	skb_walk_frags(buf, list)
2517 		skb_get(list);
2518 }
2519 
2520 /**
2521  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2522  * @skb: Network buffer
2523  * @hdr_off:
2524  * @where:
2525  *
2526  * Return: true/false
2527  */
2528 static inline bool
2529 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2530 			 uint8_t **where)
2531 {
2532 	qdf_assert(0);
2533 	return false;
2534 }
2535 
2536 /**
2537  * __qdf_nbuf_reset_ctxt() - mem zero control block
2538  * @nbuf: buffer
2539  *
2540  * Return: none
2541  */
2542 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2543 {
2544 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2545 }
2546 
2547 /**
2548  * __qdf_nbuf_network_header() - get network header
2549  * @buf: buffer
2550  *
2551  * Return: network header pointer
2552  */
2553 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2554 {
2555 	return skb_network_header(buf);
2556 }
2557 
2558 /**
2559  * __qdf_nbuf_transport_header() - get transport header
2560  * @buf: buffer
2561  *
2562  * Return: transport header pointer
2563  */
2564 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2565 {
2566 	return skb_transport_header(buf);
2567 }
2568 
2569 /**
2570  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2571  *  passed as part of network buffer by network stack
2572  * @skb: sk buff
2573  *
2574  * Return: TCP MSS size
2575  *
2576  */
2577 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2578 {
2579 	return skb_shinfo(skb)->gso_size;
2580 }
2581 
2582 /**
2583  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2584  * @nbuf: sk buff
2585  *
2586  * Return: none
2587  */
2588 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2589 
2590 /**
2591  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2592  * @nbuf: sk buff
2593  *
2594  * Return: void ptr
2595  */
2596 static inline void *
2597 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2598 {
2599 	return (void *)nbuf->cb;
2600 }
2601 
2602 /**
2603  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2604  * @skb: sk buff
2605  *
2606  * Return: head size
2607  */
2608 static inline size_t
2609 __qdf_nbuf_headlen(struct sk_buff *skb)
2610 {
2611 	return skb_headlen(skb);
2612 }
2613 
2614 /**
2615  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2616  * @skb: sk buff
2617  *
2618  * Return: true/false
2619  */
2620 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2621 {
2622 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2623 }
2624 
2625 /**
2626  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2627  * @skb: sk buff
2628  *
2629  * Return: true/false
2630  */
2631 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2632 {
2633 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2634 }
2635 
2636 /**
2637  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2638  * @skb: sk buff
2639  *
2640  * Return: size of l2+l3+l4 header length
2641  */
2642 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2643 {
2644 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2645 }
2646 
2647 /**
2648  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2649  * @skb: sk buff
2650  *
2651  * Return: size of TCP header length
2652  */
2653 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2654 {
2655 	return tcp_hdrlen(skb);
2656 }
2657 
2658 /**
2659  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2660  * @skb: sk buff
2661  *
2662  * Return:  true/false
2663  */
2664 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2665 {
2666 	if (skb_is_nonlinear(skb))
2667 		return true;
2668 	else
2669 		return false;
2670 }
2671 
2672 /**
2673  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2674  * @skb: sk buff
2675  *
2676  * Return: TCP sequence number
2677  */
2678 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2679 {
2680 	return ntohl(tcp_hdr(skb)->seq);
2681 }
2682 
2683 /**
2684  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2685  *@skb: sk buff
2686  *
2687  * Return: data pointer to typecast into your priv structure
2688  */
2689 static inline char *
2690 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2691 {
2692 	return &skb->cb[8];
2693 }
2694 
2695 /**
2696  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2697  * @buf: Pointer to nbuf
2698  *
2699  * Return: None
2700  */
2701 static inline void
2702 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2703 {
2704 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2705 }
2706 
2707 /**
2708  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2709  *
2710  * @skb: sk buff
2711  * @queue_id: Queue id
2712  *
2713  * Return: void
2714  */
2715 static inline void
2716 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2717 {
2718 	skb_record_rx_queue(skb, queue_id);
2719 }
2720 
2721 /**
2722  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2723  *
2724  * @skb: sk buff
2725  *
2726  * Return: Queue mapping
2727  */
2728 static inline uint16_t
2729 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2730 {
2731 	return skb->queue_mapping;
2732 }
2733 
2734 /**
2735  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2736  *
2737  * @skb: sk buff
2738  * @val: queue_id
2739  *
2740  */
2741 static inline void
2742 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2743 {
2744 	skb_set_queue_mapping(skb, val);
2745 }
2746 
2747 /**
2748  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2749  *
2750  * @skb: sk buff
2751  *
2752  * Return: void
2753  */
2754 static inline void
2755 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2756 {
2757 	__net_timestamp(skb);
2758 }
2759 
2760 /**
2761  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2762  *
2763  * @skb: sk buff
2764  *
2765  * Return: timestamp stored in skb in ms
2766  */
2767 static inline uint64_t
2768 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2769 {
2770 	return ktime_to_ms(skb_get_ktime(skb));
2771 }
2772 
2773 /**
2774  * __qdf_nbuf_get_timestamp_us() - get the timestamp for frame
2775  *
2776  * @skb: sk buff
2777  *
2778  * Return: timestamp stored in skb in us
2779  */
2780 static inline uint64_t
2781 __qdf_nbuf_get_timestamp_us(struct sk_buff *skb)
2782 {
2783 	return ktime_to_us(skb_get_ktime(skb));
2784 }
2785 
2786 /**
2787  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2788  *
2789  * @skb: sk buff
2790  *
2791  * Return: time difference in ms
2792  */
2793 static inline uint64_t
2794 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2795 {
2796 	return ktime_to_ms(net_timedelta(skb->tstamp));
2797 }
2798 
2799 /**
2800  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2801  *
2802  * @skb: sk buff
2803  *
2804  * Return: time difference in micro seconds
2805  */
2806 static inline uint64_t
2807 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2808 {
2809 	return ktime_to_us(net_timedelta(skb->tstamp));
2810 }
2811 
2812 /**
2813  * __qdf_nbuf_orphan() - orphan a nbuf
2814  * @skb: sk buff
2815  *
2816  * If a buffer currently has an owner then we call the
2817  * owner's destructor function
2818  *
2819  * Return: void
2820  */
2821 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2822 {
2823 	return skb_orphan(skb);
2824 }
2825 
2826 /**
2827  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2828  * head pointer to end pointer
2829  * @nbuf: qdf_nbuf_t
2830  *
2831  * Return: size of network buffer from head pointer to end
2832  * pointer
2833  */
2834 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2835 {
2836 	return skb_end_offset(nbuf);
2837 }
2838 
2839 /**
2840  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2841  * including the header and variable data area
2842  * @skb: sk buff
2843  *
2844  * Return: size of network buffer
2845  */
2846 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2847 {
2848 	return skb->truesize;
2849 }
2850 
2851 /**
2852  * __qdf_nbuf_get_allocsize() - Return the actual size of the skb->head
2853  * excluding the header and variable data area
2854  * @skb: sk buff
2855  *
2856  * Return: actual allocated size of network buffer
2857  */
2858 static inline unsigned int __qdf_nbuf_get_allocsize(struct sk_buff *skb)
2859 {
2860 	return SKB_WITH_OVERHEAD(skb->truesize) -
2861 		SKB_DATA_ALIGN(sizeof(struct sk_buff));
2862 }
2863 
2864 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2865 /**
2866  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2867  * from the total skb mem and DP tx/rx skb mem
2868  * @nbytes: number of bytes
2869  * @dir: direction
2870  * @is_mapped: is mapped or unmapped memory
2871  *
2872  * Return: none
2873  */
2874 static inline void __qdf_record_nbuf_nbytes(
2875 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2876 {
2877 	if (is_mapped) {
2878 		if (dir == QDF_DMA_TO_DEVICE) {
2879 			qdf_mem_dp_tx_skb_cnt_inc();
2880 			qdf_mem_dp_tx_skb_inc(nbytes);
2881 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2882 			qdf_mem_dp_rx_skb_cnt_inc();
2883 			qdf_mem_dp_rx_skb_inc(nbytes);
2884 		}
2885 		qdf_mem_skb_total_inc(nbytes);
2886 	} else {
2887 		if (dir == QDF_DMA_TO_DEVICE) {
2888 			qdf_mem_dp_tx_skb_cnt_dec();
2889 			qdf_mem_dp_tx_skb_dec(nbytes);
2890 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2891 			qdf_mem_dp_rx_skb_cnt_dec();
2892 			qdf_mem_dp_rx_skb_dec(nbytes);
2893 		}
2894 		qdf_mem_skb_total_dec(nbytes);
2895 	}
2896 }
2897 
2898 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2899 static inline void __qdf_record_nbuf_nbytes(
2900 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2901 {
2902 }
2903 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2904 
2905 static inline struct sk_buff *
2906 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2907 {
2908 	return skb_dequeue(skb_queue_head);
2909 }
2910 
2911 static inline
2912 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2913 {
2914 	return skb_queue_head->qlen;
2915 }
2916 
2917 static inline
2918 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2919 					struct sk_buff *skb)
2920 {
2921 	return skb_queue_tail(skb_queue_head, skb);
2922 }
2923 
2924 static inline
2925 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2926 {
2927 	return skb_queue_head_init(skb_queue_head);
2928 }
2929 
2930 static inline
2931 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2932 {
2933 	return skb_queue_purge(skb_queue_head);
2934 }
2935 
2936 static inline
2937 int __qdf_nbuf_queue_empty(__qdf_nbuf_queue_head_t *nbuf_queue_head)
2938 {
2939 	return skb_queue_empty(nbuf_queue_head);
2940 }
2941 
2942 /**
2943  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2944  * @skb_queue_head: skb list for which lock is to be acquired
2945  *
2946  * Return: void
2947  */
2948 static inline
2949 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2950 {
2951 	spin_lock_bh(&skb_queue_head->lock);
2952 }
2953 
2954 /**
2955  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2956  * @skb_queue_head: skb list for which lock is to be release
2957  *
2958  * Return: void
2959  */
2960 static inline
2961 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2962 {
2963 	spin_unlock_bh(&skb_queue_head->lock);
2964 }
2965 
2966 /**
2967  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2968  * @nbuf: qdf_nbuf_t
2969  * @idx: Index for which frag size is requested
2970  *
2971  * Return: Frag size
2972  */
2973 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2974 							   uint8_t idx)
2975 {
2976 	unsigned int size = 0;
2977 
2978 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2979 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2980 	return size;
2981 }
2982 
2983 /**
2984  * __qdf_nbuf_get_frag_addr() - Get nbuf frag address at index idx
2985  * @nbuf: qdf_nbuf_t
2986  * @idx: Index for which frag address is requested
2987  *
2988  * Return: Frag address in success, else NULL
2989  */
2990 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2991 						    uint8_t idx)
2992 {
2993 	__qdf_frag_t frag_addr = NULL;
2994 
2995 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2996 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2997 	return frag_addr;
2998 }
2999 
3000 /**
3001  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
3002  * @nbuf: qdf_nbuf_t
3003  * @idx: Frag index
3004  * @size: Size by which frag_size needs to be increased/decreased
3005  *        +Ve means increase, -Ve means decrease
3006  * @truesize: truesize
3007  */
3008 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
3009 						 int size,
3010 						 unsigned int truesize)
3011 {
3012 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
3013 }
3014 
3015 /**
3016  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
3017  *          and adjust length by size.
3018  * @nbuf: qdf_nbuf_t
3019  * @idx: Frag index
3020  * @offset: Frag page offset should be moved by offset.
3021  *      +Ve - Move offset forward.
3022  *      -Ve - Move offset backward.
3023  *
3024  * Return: QDF_STATUS
3025  */
3026 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
3027 					    int offset);
3028 
3029 /**
3030  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
3031  * @nbuf: nbuf pointer
3032  * @idx: frag idx need to be removed
3033  * @truesize: truesize of frag
3034  *
3035  * Return : void
3036  */
3037 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
3038 /**
3039  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
3040  * @buf: Frag pointer needs to be added in nbuf frag
3041  * @nbuf: qdf_nbuf_t where frag will be added
3042  * @offset: Offset in frag to be added to nbuf_frags
3043  * @frag_len: Frag length
3044  * @truesize: truesize
3045  * @take_frag_ref: Whether to take ref for frag or not
3046  *      This bool must be set as per below comdition:
3047  *      1. False: If this frag is being added in any nbuf
3048  *              for the first time after allocation.
3049  *      2. True: If frag is already attached part of any
3050  *              nbuf.
3051  *
3052  * It takes ref_count based on boolean flag take_frag_ref
3053  */
3054 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
3055 			    int offset, int frag_len,
3056 			    unsigned int truesize, bool take_frag_ref);
3057 
3058 /**
3059  * __qdf_nbuf_ref_frag() - get frag reference
3060  * @buf: Pointer to nbuf
3061  *
3062  * Return: void
3063  */
3064 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
3065 
3066 /**
3067  * __qdf_nbuf_set_mark() - Set nbuf mark
3068  * @buf: Pointer to nbuf
3069  * @mark: Value to set mark
3070  *
3071  * Return: None
3072  */
3073 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
3074 {
3075 	buf->mark = mark;
3076 }
3077 
3078 /**
3079  * __qdf_nbuf_get_mark() - Get nbuf mark
3080  * @buf: Pointer to nbuf
3081  *
3082  * Return: Value of mark
3083  */
3084 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
3085 {
3086 	return buf->mark;
3087 }
3088 
3089 /**
3090  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
3091  * the data pointer to the end pointer
3092  * @nbuf: qdf_nbuf_t
3093  *
3094  * Return: size of skb from data pointer to end pointer
3095  */
3096 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
3097 {
3098 	return (skb_end_pointer(nbuf) - nbuf->data);
3099 }
3100 
3101 /**
3102  * __qdf_nbuf_set_data_len() - Return the data_len of the nbuf
3103  * @nbuf: qdf_nbuf_t
3104  * @len: data_len to be set
3105  *
3106  * Return: value of data_len
3107  */
3108 static inline
3109 qdf_size_t __qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf, uint32_t len)
3110 {
3111 	return nbuf->data_len = len;
3112 }
3113 
3114 /**
3115  * __qdf_nbuf_get_only_data_len() - Return the data_len of the nbuf
3116  * @nbuf: qdf_nbuf_t
3117  *
3118  * Return: value of data_len
3119  */
3120 static inline qdf_size_t __qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)
3121 {
3122 	return nbuf->data_len;
3123 }
3124 
3125 /**
3126  * __qdf_nbuf_set_hash() - set the hash of the buf
3127  * @buf: Network buf instance
3128  * @len: len to be set
3129  *
3130  * Return: None
3131  */
3132 static inline void __qdf_nbuf_set_hash(__qdf_nbuf_t buf, uint32_t len)
3133 {
3134 	buf->hash = len;
3135 }
3136 
3137 /**
3138  * __qdf_nbuf_set_sw_hash() - set the sw hash of the buf
3139  * @buf: Network buf instance
3140  * @len: len to be set
3141  *
3142  * Return: None
3143  */
3144 static inline void __qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf, uint32_t len)
3145 {
3146 	buf->sw_hash = len;
3147 }
3148 
3149 /**
3150  * __qdf_nbuf_set_csum_start() - set the csum start of the buf
3151  * @buf: Network buf instance
3152  * @len: len to be set
3153  *
3154  * Return: None
3155  */
3156 static inline void __qdf_nbuf_set_csum_start(__qdf_nbuf_t buf, uint16_t len)
3157 {
3158 	buf->csum_start = len;
3159 }
3160 
3161 /**
3162  * __qdf_nbuf_set_csum_offset() - set the csum offset of the buf
3163  * @buf: Network buf instance
3164  * @len: len to be set
3165  *
3166  * Return: None
3167  */
3168 static inline void __qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf, uint16_t len)
3169 {
3170 	buf->csum_offset = len;
3171 }
3172 
3173 /**
3174  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
3175  * @skb: Pointer to network buffer
3176  *
3177  * Return: Return the number of gso segments
3178  */
3179 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
3180 {
3181 	return skb_shinfo(skb)->gso_segs;
3182 }
3183 
3184 /**
3185  * __qdf_nbuf_set_gso_segs() - set the number of gso segments
3186  * @skb: Pointer to network buffer
3187  * @val: val to be set
3188  *
3189  * Return: None
3190  */
3191 static inline void __qdf_nbuf_set_gso_segs(struct sk_buff *skb, uint16_t val)
3192 {
3193 	skb_shinfo(skb)->gso_segs = val;
3194 }
3195 
3196 /**
3197  * __qdf_nbuf_set_gso_type_udp_l4() - set the gso type to GSO UDP L4
3198  * @skb: Pointer to network buffer
3199  *
3200  * Return: None
3201  */
3202 static inline void __qdf_nbuf_set_gso_type_udp_l4(struct sk_buff *skb)
3203 {
3204 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3205 }
3206 
3207 /**
3208  * __qdf_nbuf_set_ip_summed_partial() - set the ip summed to CHECKSUM_PARTIAL
3209  * @skb: Pointer to network buffer
3210  *
3211  * Return: None
3212  */
3213 static inline void __qdf_nbuf_set_ip_summed_partial(struct sk_buff *skb)
3214 {
3215 	skb->ip_summed = CHECKSUM_PARTIAL;
3216 }
3217 
3218 /**
3219  * __qdf_nbuf_get_gso_size() - Return the number of gso size
3220  * @skb: Pointer to network buffer
3221  *
3222  * Return: Return the number of gso segments
3223  */
3224 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
3225 {
3226 	return skb_shinfo(skb)->gso_size;
3227 }
3228 
3229 /**
3230  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
3231  * @skb: Pointer to network buffer
3232  * @val: the number of GSO segments
3233  *
3234  * Return: None
3235  */
3236 static inline void
3237 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
3238 {
3239 	skb_shinfo(skb)->gso_size = val;
3240 }
3241 
3242 /**
3243  * __qdf_nbuf_kfree() - Free nbuf using kfree
3244  * @skb: Pointer to network buffer
3245  *
3246  * This function is called to free the skb on failure cases
3247  *
3248  * Return: None
3249  */
3250 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
3251 {
3252 	kfree_skb(skb);
3253 }
3254 
3255 /**
3256  * __qdf_nbuf_dev_kfree_list() - Free nbuf list using dev based os call
3257  * @nbuf_queue_head: Pointer to nbuf queue head
3258  *
3259  * This function is called to free the nbuf list on failure cases
3260  *
3261  * Return: None
3262  */
3263 void
3264 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head);
3265 
3266 /**
3267  * __qdf_nbuf_dev_queue_head() - queue a buffer using dev at the list head
3268  * @nbuf_queue_head: Pointer to skb list head
3269  * @buff: Pointer to nbuf
3270  *
3271  * This function is called to queue buffer at the skb list head
3272  *
3273  * Return: None
3274  */
3275 static inline void
3276 __qdf_nbuf_dev_queue_head(__qdf_nbuf_queue_head_t *nbuf_queue_head,
3277 			  __qdf_nbuf_t buff)
3278 {
3279 	 __skb_queue_head(nbuf_queue_head, buff);
3280 }
3281 
3282 /**
3283  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
3284  * @skb: Pointer to network buffer
3285  *
3286  * This function is called to free the skb on failure cases
3287  *
3288  * Return: None
3289  */
3290 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
3291 {
3292 	dev_kfree_skb(skb);
3293 }
3294 
3295 /**
3296  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
3297  * @skb: Network buffer
3298  *
3299  * Return: TRUE if skb pkt type is mcast
3300  *         FALSE if not
3301  */
3302 static inline
3303 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
3304 {
3305 	return skb->pkt_type == PACKET_MULTICAST;
3306 }
3307 
3308 /**
3309  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
3310  * @skb: Network buffer
3311  *
3312  * Return: TRUE if skb pkt type is mcast
3313  *         FALSE if not
3314  */
3315 static inline
3316 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
3317 {
3318 	return skb->pkt_type == PACKET_BROADCAST;
3319 }
3320 
3321 /**
3322  * __qdf_nbuf_set_dev() - set dev of network buffer
3323  * @skb: Pointer to network buffer
3324  * @dev: value to be set in dev of network buffer
3325  *
3326  * Return: void
3327  */
3328 static inline
3329 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
3330 {
3331 	skb->dev = dev;
3332 }
3333 
3334 /**
3335  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
3336  * @skb: Pointer to network buffer
3337  *
3338  * Return: dev mtu value in nbuf
3339  */
3340 static inline
3341 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
3342 {
3343 	return skb->dev->mtu;
3344 }
3345 
3346 /**
3347  * __qdf_nbuf_set_protocol_eth_type_trans() - set protocol using eth trans
3348  *                                            os API
3349  * @skb: Pointer to network buffer
3350  *
3351  * Return: None
3352  */
3353 static inline
3354 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
3355 {
3356 	skb->protocol = eth_type_trans(skb, skb->dev);
3357 }
3358 
3359 /**
3360  * __qdf_nbuf_net_timedelta() - get time delta
3361  * @t: time as __qdf_ktime_t object
3362  *
3363  * Return: time delta as ktime_t object
3364  */
3365 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
3366 {
3367 	return net_timedelta(t);
3368 }
3369 
3370 #ifdef CONFIG_NBUF_AP_PLATFORM
3371 #include <i_qdf_nbuf_w.h>
3372 #else
3373 #include <i_qdf_nbuf_m.h>
3374 #endif
3375 #endif /*_I_QDF_NET_BUF_H */
3376