xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
45 /* Since commit
46  *  baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
47  *
48  * the function netif_rx() can be used in preemptible/thread context as
49  * well as in interrupt context.
50  *
51  * Use netif_rx().
52  */
53 #define netif_rx_ni(skb) netif_rx(skb)
54 #endif
55 
56 /*
57  * Use socket buffer as the underlying implementation as skbuf .
58  * Linux use sk_buff to represent both packet and data,
59  * so we use sk_buffer to represent both skbuf .
60  */
61 typedef struct sk_buff *__qdf_nbuf_t;
62 
63 /**
64  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
65  *
66  * This is used for skb queue management via linux skb buff head APIs
67  */
68 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
69 
70 /**
71  * typedef __qdf_nbuf_get_shinfo for skb_shinfo linux struct
72  *
73  * This is used for skb shared info via linux skb shinfo APIs
74  */
75 typedef struct skb_shared_info *__qdf_nbuf_shared_info_t;
76 
77 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
78 
79 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
80 
81 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
82  * max tx fragments added by the driver
83  * The driver will always add one tx fragment (the tx descriptor)
84  */
85 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
86 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
87 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
88 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
89 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
90 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
91 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
92 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
93 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
94 
95 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
96 
97 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
98 #define IEEE80211_RADIOTAP_HE 23
99 #define IEEE80211_RADIOTAP_HE_MU 24
100 #endif
101 
102 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
103 
104 #define IEEE80211_RADIOTAP_EXT1_USIG	1
105 #define IEEE80211_RADIOTAP_EXT1_EHT	2
106 
107 /* mark the first packet after wow wakeup */
108 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
109 
110 /* TCP Related MASK */
111 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
112 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
113 #define QDF_NBUF_PKT_TCPOP_RST			0x04
114 
115 /*
116  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
117  */
118 typedef union {
119 	uint64_t       u64;
120 	qdf_dma_addr_t dma_addr;
121 } qdf_paddr_t;
122 
123 /**
124  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
125  *                    - data passed between layers of the driver.
126  *
127  * Notes:
128  *   1. Hard limited to 48 bytes. Please count your bytes
129  *   2. The size of this structure has to be easily calculable and
130  *      consistently so: do not use any conditional compile flags
131  *   3. Split into a common part followed by a tx/rx overlay
132  *   4. There is only one extra frag, which represents the HTC/HTT header
133  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
134  *      for the priv_cb_w since it must be at same offset for both
135  *      TX and RX union
136  *   6. "ipa.owned" bit must be first member in both TX and RX unions
137  *      for the priv_cb_m since it must be at same offset for both
138  *      TX and RX union.
139  *
140  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
141  *
142  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
143  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
144  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
145  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
146  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
147  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
148  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
149  * @rx.dev.priv_cb_w.flow_idx_invalid: flow entry is not found
150  * @rx.dev.priv_cb_w.flow_idx_timeout: flow entry search timed out
151  * @rx.dev.priv_cb_w.rsvd: rerserved bits
152  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
153  *
154  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
155  * @rx.dev.priv_cb_m.flush_ind: flush indication
156  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
157  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
158  * @rx.dev.priv_cb_m.exc_frm: exception frame
159  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
160  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
161 					     sw exception bit from ring desc
162  * @rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
163  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
164  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
165  * @rx.dev.priv_cb_m.lro_ctx: LRO context
166  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
167  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
168  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
169  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
170  *
171  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
172  * @rx.tcp_proto: L4 protocol is TCP
173  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
174  * @rx.ipv6_proto: L3 protocol is IPV6
175  * @rx.ip_offset: offset to IP header
176  * @rx.tcp_offset: offset to TCP header
177  * @rx_ctx_id: Rx context id
178  * @num_elements_in_list: number of elements in the nbuf list
179  *
180  * @rx.tcp_udp_chksum: L4 payload checksum
181  * @rx.tcp_wim: TCP window size
182  *
183  * @rx.flow_id: 32bit flow id
184  *
185  * @rx.flag_chfrag_start: first MSDU in an AMSDU
186  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
187  * @rx.flag_chfrag_end: last MSDU in an AMSDU
188  * @rx.flag_retry: flag to indicate MSDU is retried
189  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
190  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
191  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
192  * @rx.flag_is_frag: flag to indicate skb has frag list
193  * @rx.rsrvd: reserved
194  *
195  * @rx.trace: combined structure for DP and protocol trace
196  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
197  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
198  * @rx.trace.dp_trace: flag (Datapath trace)
199  * @rx.trace.packet_track: RX_DATA packet
200  * @rx.trace.rsrvd: enable packet logging
201  *
202  * @rx.vdev_id: vdev_id for RX pkt
203  * @rx.is_raw_frame: RAW frame
204  * @rx.fcs_err: FCS error
205  * @rx.tid_val: tid value
206  * @rx.reserved: reserved
207  * @rx.ftype: mcast2ucast, TSO, SG, MESH
208  *
209  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
210  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
211  *
212  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
213  *                 + (1) CE classification enablement bit
214  *                 + (2) packet type (802.3 or Ethernet type II)
215  *                 + (3) packet offset (usually length of HTC/HTT descr)
216  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
217  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
218  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
219  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
220  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
221  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
222  * @tx.dev.priv_cb_m.reserved: reserved
223  *
224  * @tx.ftype: mcast2ucast, TSO, SG, MESH
225  * @tx.vdev_id: vdev (for protocol trace)
226  * @tx.len: length of efrag pointed by the above pointers
227  *
228  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
229  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
230  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
231  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
232  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
233  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
234  * @tx.flags.bits.flag_ext_header: extended flags
235  * @tx.flags.bits.is_critical: flag indicating a critical frame
236  * @tx.trace: combined structure for DP and protocol trace
237  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
238  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
239  * @tx.trace.is_packet_priv:
240  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
241  * @tx.trace.to_fw: Flag to indicate send this packet to FW
242  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
243  *                          + (MGMT_ACTION)] - 4 bits
244  * @tx.trace.dp_trace: flag (Datapath trace)
245  * @tx.trace.is_bcast: flag (Broadcast packet)
246  * @tx.trace.is_mcast: flag (Multicast packet)
247  * @tx.trace.packet_type: flag (Packet type)
248  * @tx.trace.htt2_frm: flag (high-latency path only)
249  * @tx.trace.print: enable packet logging
250  *
251  * @tx.vaddr: virtual address of ~
252  * @tx.paddr: physical/DMA address of ~
253  */
254 struct qdf_nbuf_cb {
255 	/* common */
256 	qdf_paddr_t paddr; /* of skb->data */
257 	/* valid only in one direction */
258 	union {
259 		/* Note: MAX: 40 bytes */
260 		struct {
261 			union {
262 				struct {
263 					void *ext_cb_ptr;
264 					void *fctx;
265 					uint16_t msdu_len : 14,
266 						 flag_intra_bss : 1,
267 						 ipa_smmu_map : 1;
268 					uint16_t peer_id;
269 					uint8_t protocol_tag;
270 					uint8_t flow_idx_invalid: 1,
271 						flow_idx_timeout: 1,
272 						rsvd:6;
273 					uint16_t flow_tag;
274 				} priv_cb_w;
275 				struct {
276 					/* ipa_owned bit is common between rx
277 					 * control block and tx control block.
278 					 * Do not change location of this bit.
279 					 */
280 					uint32_t ipa_owned:1,
281 						 peer_cached_buf_frm:1,
282 						 flush_ind:1,
283 						 packet_buf_pool:1,
284 						 l3_hdr_pad:3,
285 						 /* exception frame flag */
286 						 exc_frm:1,
287 						 ipa_smmu_map:1,
288 						 reo_dest_ind_or_sw_excpt:5,
289 						 lmac_id:2,
290 						 reserved1:16;
291 					uint32_t tcp_seq_num;
292 					uint32_t tcp_ack_num;
293 					union {
294 						struct {
295 							uint16_t msdu_len;
296 							uint16_t peer_id;
297 						} wifi3;
298 						struct {
299 							uint32_t map_index;
300 						} wifi2;
301 					} dp;
302 					unsigned char *lro_ctx;
303 				} priv_cb_m;
304 			} dev;
305 			uint32_t lro_eligible:1,
306 				tcp_proto:1,
307 				tcp_pure_ack:1,
308 				ipv6_proto:1,
309 				ip_offset:7,
310 				tcp_offset:7,
311 				rx_ctx_id:4,
312 				fcs_err:1,
313 				is_raw_frame:1,
314 				num_elements_in_list:8;
315 			uint32_t tcp_udp_chksum:16,
316 				 tcp_win:16;
317 			uint32_t flow_id;
318 			uint8_t flag_chfrag_start:1,
319 				flag_chfrag_cont:1,
320 				flag_chfrag_end:1,
321 				flag_retry:1,
322 				flag_da_mcbc:1,
323 				flag_da_valid:1,
324 				flag_sa_valid:1,
325 				flag_is_frag:1;
326 			union {
327 				uint8_t packet_state;
328 				uint8_t dp_trace:1,
329 					packet_track:3,
330 					rsrvd:4;
331 			} trace;
332 			uint16_t vdev_id:8,
333 				 tid_val:4,
334 				 ftype:4;
335 		} rx;
336 
337 		/* Note: MAX: 40 bytes */
338 		struct {
339 			union {
340 				struct {
341 					void *ext_cb_ptr;
342 					void *fctx;
343 				} priv_cb_w;
344 				struct {
345 					/* ipa_owned bit is common between rx
346 					 * control block and tx control block.
347 					 * Do not change location of this bit.
348 					 */
349 					struct {
350 						uint32_t owned:1,
351 							priv:31;
352 					} ipa;
353 					uint32_t data_attr;
354 					uint16_t desc_id;
355 					uint16_t mgmt_desc_id;
356 					struct {
357 						uint8_t bi_map:1,
358 							reserved:7;
359 					} dma_option;
360 					uint8_t flag_notify_comp:1,
361 						rsvd:7;
362 					uint8_t reserved[2];
363 				} priv_cb_m;
364 			} dev;
365 			uint8_t ftype;
366 			uint8_t vdev_id;
367 			uint16_t len;
368 			union {
369 				struct {
370 					uint8_t flag_efrag:1,
371 						flag_nbuf:1,
372 						num:1,
373 						flag_chfrag_start:1,
374 						flag_chfrag_cont:1,
375 						flag_chfrag_end:1,
376 						flag_ext_header:1,
377 						is_critical:1;
378 				} bits;
379 				uint8_t u8;
380 			} flags;
381 			struct {
382 				uint8_t packet_state:7,
383 					is_packet_priv:1;
384 				uint8_t packet_track:3,
385 					to_fw:1,
386 					/* used only for hl */
387 					htt2_frm:1,
388 					proto_type:3;
389 				uint8_t dp_trace:1,
390 					is_bcast:1,
391 					is_mcast:1,
392 					packet_type:4,
393 					print:1;
394 			} trace;
395 			unsigned char *vaddr;
396 			qdf_paddr_t paddr;
397 		} tx;
398 	} u;
399 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
400 
401 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
402 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
403 			(sizeof(struct qdf_nbuf_cb)) <=
404 			sizeof_field(struct sk_buff, cb));
405 #else
406 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
407 			(sizeof(struct qdf_nbuf_cb)) <=
408 			FIELD_SIZEOF(struct sk_buff, cb));
409 #endif
410 
411 /**
412  *  access macros to qdf_nbuf_cb
413  *  Note: These macros can be used as L-values as well as R-values.
414  *        When used as R-values, they effectively function as "get" macros
415  *        When used as L_values, they effectively function as "set" macros
416  */
417 
418 #define QDF_NBUF_CB_PADDR(skb) \
419 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
420 
421 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
422 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
423 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
424 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
425 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
426 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
427 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
428 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
429 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
430 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
431 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
432 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
433 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
434 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
435 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
436 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
437 
438 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
439 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
440 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
441 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
442 
443 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
444 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
445 
446 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
447 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
448 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
449 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
450 
451 #define QDF_NBUF_CB_RX_FTYPE(skb) \
452 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
453 
454 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
455 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
456 
457 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
458 	(((struct qdf_nbuf_cb *) \
459 	((skb)->cb))->u.rx.flag_chfrag_start)
460 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
461 	(((struct qdf_nbuf_cb *) \
462 	((skb)->cb))->u.rx.flag_chfrag_cont)
463 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
464 		(((struct qdf_nbuf_cb *) \
465 		((skb)->cb))->u.rx.flag_chfrag_end)
466 
467 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
468 	(((struct qdf_nbuf_cb *) \
469 	((skb)->cb))->u.rx.flag_da_mcbc)
470 
471 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
472 	(((struct qdf_nbuf_cb *) \
473 	((skb)->cb))->u.rx.flag_da_valid)
474 
475 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
476 	(((struct qdf_nbuf_cb *) \
477 	((skb)->cb))->u.rx.flag_sa_valid)
478 
479 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
480 	(((struct qdf_nbuf_cb *) \
481 	((skb)->cb))->u.rx.flag_retry)
482 
483 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
484 	(((struct qdf_nbuf_cb *) \
485 	((skb)->cb))->u.rx.is_raw_frame)
486 
487 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
488 	(((struct qdf_nbuf_cb *) \
489 	((skb)->cb))->u.rx.tid_val)
490 
491 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
492 	(((struct qdf_nbuf_cb *) \
493 	((skb)->cb))->u.rx.flag_is_frag)
494 
495 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
496 	(((struct qdf_nbuf_cb *) \
497 	((skb)->cb))->u.rx.fcs_err)
498 
499 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
500 	qdf_nbuf_set_state(skb, PACKET_STATE)
501 
502 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
503 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
504 
505 #define QDF_NBUF_CB_TX_FTYPE(skb) \
506 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
507 
508 
509 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
510 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
511 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
512 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
513 
514 /* Tx Flags Accessor Macros*/
515 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
516 	(((struct qdf_nbuf_cb *) \
517 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
518 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
519 	(((struct qdf_nbuf_cb *) \
520 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
521 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
522 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
523 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
524 	(((struct qdf_nbuf_cb *) \
525 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
526 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
527 	(((struct qdf_nbuf_cb *) \
528 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
529 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
530 		(((struct qdf_nbuf_cb *) \
531 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
532 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
533 		(((struct qdf_nbuf_cb *) \
534 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
535 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
536 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
537 
538 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
539 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
540 /* End of Tx Flags Accessor Macros */
541 
542 /* Tx trace accessor macros */
543 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
544 	(((struct qdf_nbuf_cb *) \
545 		((skb)->cb))->u.tx.trace.packet_state)
546 
547 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
548 	(((struct qdf_nbuf_cb *) \
549 		((skb)->cb))->u.tx.trace.is_packet_priv)
550 
551 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
552 	(((struct qdf_nbuf_cb *) \
553 		((skb)->cb))->u.tx.trace.packet_track)
554 
555 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
556 	(((struct qdf_nbuf_cb *) \
557 		((skb)->cb))->u.tx.trace.to_fw)
558 
559 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
560 		(((struct qdf_nbuf_cb *) \
561 			((skb)->cb))->u.rx.trace.packet_track)
562 
563 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
564 	(((struct qdf_nbuf_cb *) \
565 		((skb)->cb))->u.tx.trace.proto_type)
566 
567 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
568 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
569 
570 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
571 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
572 
573 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
574 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
575 
576 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
577 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
578 
579 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
580 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
581 
582 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
583 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
584 
585 #define QDF_NBUF_CB_SET_BCAST(skb) \
586 	(((struct qdf_nbuf_cb *) \
587 		((skb)->cb))->u.tx.trace.is_bcast = true)
588 
589 #define QDF_NBUF_CB_SET_MCAST(skb) \
590 	(((struct qdf_nbuf_cb *) \
591 		((skb)->cb))->u.tx.trace.is_mcast = true)
592 /* End of Tx trace accessor macros */
593 
594 
595 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
596 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
597 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
598 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
599 
600 /* assume the OS provides a single fragment */
601 #define __qdf_nbuf_get_num_frags(skb)		   \
602 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
603 
604 #define __qdf_nbuf_reset_num_frags(skb) \
605 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
606 
607 /**
608  *   end of nbuf->cb access macros
609  */
610 
611 typedef void (*qdf_nbuf_trace_update_t)(char *);
612 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
613 
614 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
615 
616 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
617 	(QDF_NBUF_CB_PADDR(skb) = paddr)
618 
619 #define __qdf_nbuf_frag_push_head(					\
620 	skb, frag_len, frag_vaddr, frag_paddr)				\
621 	do {					\
622 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
623 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
624 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
625 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
626 	} while (0)
627 
628 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
629 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
630 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
631 
632 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
633 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
634 
635 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
636 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
637 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
638 	 /* assume that the OS only provides a single fragment */	\
639 	 QDF_NBUF_CB_PADDR(skb))
640 
641 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
642 
643 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
644 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
645 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
646 
647 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
648 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
649 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
650 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
651 
652 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
653 	do {								\
654 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
655 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
656 		if (frag_num)						\
657 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
658 							      is_wstrm; \
659 		else					\
660 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
661 							      is_wstrm; \
662 	} while (0)
663 
664 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
665 	do { \
666 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
667 	} while (0)
668 
669 #define __qdf_nbuf_get_vdev_ctx(skb) \
670 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
671 
672 #define __qdf_nbuf_set_tx_ftype(skb, type) \
673 	do { \
674 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
675 	} while (0)
676 
677 #define __qdf_nbuf_get_tx_ftype(skb) \
678 		 QDF_NBUF_CB_TX_FTYPE((skb))
679 
680 
681 #define __qdf_nbuf_set_rx_ftype(skb, type) \
682 	do { \
683 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
684 	} while (0)
685 
686 #define __qdf_nbuf_get_rx_ftype(skb) \
687 		 QDF_NBUF_CB_RX_FTYPE((skb))
688 
689 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
690 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
691 
692 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
693 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
694 
695 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
696 	do { \
697 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
698 	} while (0)
699 
700 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
701 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
702 
703 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
704 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
705 
706 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
707 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
708 
709 #define __qdf_nbuf_set_da_mcbc(skb, val) \
710 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
711 
712 #define __qdf_nbuf_is_da_mcbc(skb) \
713 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
714 
715 #define __qdf_nbuf_set_da_valid(skb, val) \
716 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
717 
718 #define __qdf_nbuf_is_da_valid(skb) \
719 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
720 
721 #define __qdf_nbuf_set_sa_valid(skb, val) \
722 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
723 
724 #define __qdf_nbuf_is_sa_valid(skb) \
725 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
726 
727 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
728 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
729 
730 #define __qdf_nbuf_is_rx_retry_flag(skb) \
731 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
732 
733 #define __qdf_nbuf_set_raw_frame(skb, val) \
734 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
735 
736 #define __qdf_nbuf_is_raw_frame(skb) \
737 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
738 
739 #define __qdf_nbuf_get_tid_val(skb) \
740 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
741 
742 #define __qdf_nbuf_set_tid_val(skb, val) \
743 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
744 
745 #define __qdf_nbuf_set_is_frag(skb, val) \
746 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
747 
748 #define __qdf_nbuf_is_frag(skb) \
749 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
750 
751 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
752 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
753 
754 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
755 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
756 
757 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
758 	do { \
759 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
760 	} while (0)
761 
762 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
763 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
764 
765 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
766 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
767 
768 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
769 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
770 
771 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
772 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
773 
774 #define __qdf_nbuf_trace_get_proto_type(skb) \
775 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
776 
777 #define __qdf_nbuf_data_attr_get(skb)		\
778 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
779 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
780 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
781 
782 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
783 		skb_queue_walk_safe(queue, var, tvar)
784 
785 /**
786  * __qdf_nbuf_num_frags_init() - init extra frags
787  * @skb: sk buffer
788  *
789  * Return: none
790  */
791 static inline
792 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
793 {
794 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
795 }
796 
797 /*
798  * prototypes. Implemented in qdf_nbuf.c
799  */
800 
801 /**
802  * __qdf_nbuf_alloc() - Allocate nbuf
803  * @osdev: Device handle
804  * @size: Netbuf requested size
805  * @reserve: headroom to start with
806  * @align: Align
807  * @prio: Priority
808  * @func: Function name of the call site
809  * @line: line number of the call site
810  *
811  * This allocates an nbuf aligns if needed and reserves some space in the front,
812  * since the reserve is done after alignment the reserve value if being
813  * unaligned will result in an unaligned address.
814  *
815  * Return: nbuf or %NULL if no memory
816  */
817 __qdf_nbuf_t
818 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
819 		 int prio, const char *func, uint32_t line);
820 
821 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
822 				     const char *func, uint32_t line);
823 
824 #if defined(QCA_DP_NBUF_FAST_PPEDS)
825 /**
826  * __qdf_nbuf_alloc_ppe_ds() - Allocates nbuf
827  * @osdev: Device handle
828  * @size: Netbuf requested size
829  * @func: Function name of the call site
830  * @line: line number of the call site
831  *
832  * This allocates an nbuf for wifi module
833  * in DS mode and uses __netdev_alloc_skb_no_skb_reset API.
834  * The netdev API invokes skb_recycler_alloc with reset_skb
835  * as false. Hence, recycler pool will not do reset_struct
836  * when it allocates DS used buffer to DS module, which will
837  * helps to improve the performance
838  *
839  * Return: nbuf or %NULL if no memory
840  */
841 
842 __qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
843 				     const char *func, uint32_t line);
844 #endif /* QCA_DP_NBUF_FAST_PPEDS */
845 
846 /**
847  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
848  * @size: Size to be allocated for skb
849  * @reserve: Reserve headroom size
850  * @align: Align data
851  * @func: Function name of the call site
852  * @line: Line number of the callsite
853  *
854  * This API allocates a nbuf and aligns it if needed and reserves some headroom
855  * space after the alignment where nbuf is not allocated from skb recycler pool.
856  *
857  * Return: Allocated nbuf pointer
858  */
859 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
860 					  const char *func, uint32_t line);
861 
862 /**
863  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
864  * @skb: Pointer to network buffer
865  *
866  * if GFP_ATOMIC is overkill then we can check whether its
867  * called from interrupt context and then do it or else in
868  * normal case use GFP_KERNEL
869  *
870  * example     use "in_irq() || irqs_disabled()"
871  *
872  * Return: cloned skb
873  */
874 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
875 
876 void __qdf_nbuf_free(struct sk_buff *skb);
877 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
878 			struct sk_buff *skb, qdf_dma_dir_t dir);
879 void __qdf_nbuf_unmap(__qdf_device_t osdev,
880 			struct sk_buff *skb, qdf_dma_dir_t dir);
881 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
882 				 struct sk_buff *skb, qdf_dma_dir_t dir);
883 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
884 			struct sk_buff *skb, qdf_dma_dir_t dir);
885 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
886 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
887 
888 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
889 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
890 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
891 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
892 	qdf_dma_dir_t dir, int nbytes);
893 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
894 	qdf_dma_dir_t dir, int nbytes);
895 
896 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
897 	qdf_dma_dir_t dir);
898 
899 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
900 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
901 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
902 QDF_STATUS __qdf_nbuf_frag_map(
903 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
904 	int offset, qdf_dma_dir_t dir, int cur_frag);
905 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
906 
907 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
908 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
909 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
910 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
911 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
912 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
913 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
914 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
915 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
916 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
917 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
918 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
919 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
920 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
921 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
922 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
923 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
924 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
925 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf);
926 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf);
927 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
928 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
929 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
930 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
931 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
932 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
933 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
934 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
935 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
936 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
937 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
938 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
939 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
940 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
941 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
942 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
943 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
944 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
945 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
946 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
947 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
948 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
949 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
950 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
951 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
952 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
953 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
954 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
955 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
956 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
957 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
958 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
959 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
960 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
961 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
962 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
963 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
964 
965 #ifdef QDF_NBUF_GLOBAL_COUNT
966 int __qdf_nbuf_count_get(void);
967 void __qdf_nbuf_count_inc(struct sk_buff *skb);
968 void __qdf_nbuf_count_dec(struct sk_buff *skb);
969 void __qdf_nbuf_mod_init(void);
970 void __qdf_nbuf_mod_exit(void);
971 
972 #else
973 
974 static inline int __qdf_nbuf_count_get(void)
975 {
976 	return 0;
977 }
978 
979 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
980 {
981 	return;
982 }
983 
984 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
985 {
986 	return;
987 }
988 
989 static inline void __qdf_nbuf_mod_init(void)
990 {
991 	return;
992 }
993 
994 static inline void __qdf_nbuf_mod_exit(void)
995 {
996 	return;
997 }
998 #endif
999 
1000 /**
1001  * __qdf_to_status() - OS to QDF status conversion
1002  * @error : OS error
1003  *
1004  * Return: QDF status
1005  */
1006 static inline QDF_STATUS __qdf_to_status(signed int error)
1007 {
1008 	switch (error) {
1009 	case 0:
1010 		return QDF_STATUS_SUCCESS;
1011 	case ENOMEM:
1012 	case -ENOMEM:
1013 		return QDF_STATUS_E_NOMEM;
1014 	default:
1015 		return QDF_STATUS_E_NOSUPPORT;
1016 	}
1017 }
1018 
1019 /**
1020  * __qdf_nbuf_len() - return the amount of valid data in the skb
1021  * @skb: Pointer to network buffer
1022  *
1023  * This API returns the amount of valid data in the skb, If there are frags
1024  * then it returns total length.
1025  *
1026  * Return: network buffer length
1027  */
1028 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
1029 {
1030 	int i, extra_frag_len = 0;
1031 
1032 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
1033 	if (i > 0)
1034 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
1035 
1036 	return extra_frag_len + skb->len;
1037 }
1038 
1039 /**
1040  * __qdf_nbuf_cat() - link two nbufs
1041  * @dst: Buffer to piggyback into
1042  * @src: Buffer to put
1043  *
1044  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
1045  * It is callers responsibility to free the src skb.
1046  *
1047  * Return: QDF_STATUS (status of the call) if failed the src skb
1048  *         is released
1049  */
1050 static inline QDF_STATUS
1051 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1052 {
1053 	QDF_STATUS error = 0;
1054 
1055 	qdf_assert(dst && src);
1056 
1057 	/*
1058 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1059 	 * buffer, first check whether the current buffer is already large
1060 	 * enough.
1061 	 */
1062 	if (skb_tailroom(dst) < src->len) {
1063 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1064 		if (error)
1065 			return __qdf_to_status(error);
1066 	}
1067 
1068 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1069 	skb_put(dst, src->len);
1070 	return __qdf_to_status(error);
1071 }
1072 
1073 /*
1074  * nbuf manipulation routines
1075  */
1076 /**
1077  * __qdf_nbuf_headroom() - return the amount of tail space available
1078  * @buf: Pointer to network buffer
1079  *
1080  * Return: amount of tail room
1081  */
1082 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1083 {
1084 	return skb_headroom(skb);
1085 }
1086 
1087 /**
1088  * __qdf_nbuf_tailroom() - return the amount of tail space available
1089  * @buf: Pointer to network buffer
1090  *
1091  * Return: amount of tail room
1092  */
1093 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1094 {
1095 	return skb_tailroom(skb);
1096 }
1097 
1098 /**
1099  * __qdf_nbuf_put_tail() - Puts data in the end
1100  * @skb: Pointer to network buffer
1101  * @size: size to be pushed
1102  *
1103  * Return: data pointer of this buf where new data has to be
1104  *         put, or NULL if there is not enough room in this buf.
1105  */
1106 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1107 {
1108 	if (skb_tailroom(skb) < size) {
1109 		if (unlikely(pskb_expand_head(skb, 0,
1110 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1111 			dev_kfree_skb_any(skb);
1112 			return NULL;
1113 		}
1114 	}
1115 	return skb_put(skb, size);
1116 }
1117 
1118 /**
1119  * __qdf_nbuf_trim_tail() - trim data out from the end
1120  * @skb: Pointer to network buffer
1121  * @size: size to be popped
1122  *
1123  * Return: none
1124  */
1125 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1126 {
1127 	return skb_trim(skb, skb->len - size);
1128 }
1129 
1130 
1131 /*
1132  * prototypes. Implemented in qdf_nbuf.c
1133  */
1134 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1135 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1136 				qdf_nbuf_rx_cksum_t *cksum);
1137 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1138 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1139 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1140 void __qdf_nbuf_ref(struct sk_buff *skb);
1141 int __qdf_nbuf_shared(struct sk_buff *skb);
1142 
1143 /**
1144  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1145  * @skb: sk buff
1146  *
1147  * Return: number of fragments
1148  */
1149 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1150 {
1151 	return skb_shinfo(skb)->nr_frags;
1152 }
1153 
1154 /**
1155  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1156  * @skb: sk buff
1157  *
1158  * This API returns a total number of fragments from the fraglist
1159  * Return: total number of fragments
1160  */
1161 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1162 {
1163 	uint32_t num_frag = 0;
1164 	struct sk_buff *list = NULL;
1165 
1166 	num_frag = skb_shinfo(skb)->nr_frags;
1167 	skb_walk_frags(skb, list)
1168 		num_frag += skb_shinfo(list)->nr_frags;
1169 
1170 	return num_frag;
1171 }
1172 
1173 /*
1174  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1175  */
1176 #define __qdf_nbuf_pool_delete(osdev)
1177 
1178 /**
1179  * __qdf_nbuf_copy() - returns a private copy of the skb
1180  * @skb: Pointer to network buffer
1181  *
1182  * This API returns a private copy of the skb, the skb returned is completely
1183  *  modifiable by callers
1184  *
1185  * Return: skb or NULL
1186  */
1187 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1188 {
1189 	struct sk_buff *skb_new = NULL;
1190 
1191 	skb_new = skb_copy(skb, GFP_ATOMIC);
1192 	if (skb_new) {
1193 		__qdf_nbuf_count_inc(skb_new);
1194 	}
1195 	return skb_new;
1196 }
1197 
1198 #define __qdf_nbuf_reserve      skb_reserve
1199 
1200 /**
1201  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1202  * @skb: Pointer to network buffer
1203  * @data: data pointer
1204  *
1205  * Return: none
1206  */
1207 static inline void
1208 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1209 {
1210 	skb->data = data;
1211 }
1212 
1213 /**
1214  * __qdf_nbuf_set_len() - set buffer data length
1215  * @skb: Pointer to network buffer
1216  * @len: data length
1217  *
1218  * Return: none
1219  */
1220 static inline void
1221 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1222 {
1223 	skb->len = len;
1224 }
1225 
1226 /**
1227  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1228  * @skb: Pointer to network buffer
1229  * @len: skb data length
1230  *
1231  * Return: none
1232  */
1233 static inline void
1234 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1235 {
1236 	skb_set_tail_pointer(skb, len);
1237 }
1238 
1239 /**
1240  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1241  * @skb: Pointer to network buffer
1242  * @list: list to use
1243  *
1244  * This is a lockless version, driver must acquire locks if it
1245  * needs to synchronize
1246  *
1247  * Return: none
1248  */
1249 static inline void
1250 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1251 {
1252 	__skb_unlink(skb, list);
1253 }
1254 
1255 /**
1256  * __qdf_nbuf_reset() - reset the buffer data and pointer
1257  * @buf: Network buf instance
1258  * @reserve: reserve
1259  * @align: align
1260  *
1261  * Return: none
1262  */
1263 static inline void
1264 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1265 {
1266 	int offset;
1267 
1268 	skb_push(skb, skb_headroom(skb));
1269 	skb_put(skb, skb_tailroom(skb));
1270 	memset(skb->data, 0x0, skb->len);
1271 	skb_trim(skb, 0);
1272 	skb_reserve(skb, NET_SKB_PAD);
1273 	memset(skb->cb, 0x0, sizeof(skb->cb));
1274 
1275 	/*
1276 	 * The default is for netbuf fragments to be interpreted
1277 	 * as wordstreams rather than bytestreams.
1278 	 */
1279 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1280 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1281 
1282 	/*
1283 	 * Align & make sure that the tail & data are adjusted properly
1284 	 */
1285 
1286 	if (align) {
1287 		offset = ((unsigned long)skb->data) % align;
1288 		if (offset)
1289 			skb_reserve(skb, align - offset);
1290 	}
1291 
1292 	skb_reserve(skb, reserve);
1293 }
1294 
1295 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1296 /**
1297  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1298  *                                       in kernel
1299  *
1300  * Return: true if dev_scratch is supported
1301  *         false if dev_scratch is not supported
1302  */
1303 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1304 {
1305 	return true;
1306 }
1307 
1308 /**
1309  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1310  * @skb: Pointer to network buffer
1311  *
1312  * Return: dev_scratch if dev_scratch supported
1313  *         0 if dev_scratch not supported
1314  */
1315 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1316 {
1317 	return skb->dev_scratch;
1318 }
1319 
1320 /**
1321  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1322  * @skb: Pointer to network buffer
1323  * @value: value to be set in dev_scratch of network buffer
1324  *
1325  * Return: void
1326  */
1327 static inline void
1328 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1329 {
1330 	skb->dev_scratch = value;
1331 }
1332 #else
1333 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1334 {
1335 	return false;
1336 }
1337 
1338 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1339 {
1340 	return 0;
1341 }
1342 
1343 static inline void
1344 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1345 {
1346 }
1347 #endif /* KERNEL_VERSION(4, 14, 0) */
1348 
1349 /**
1350  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1351  * @skb: Pointer to network buffer
1352  *
1353  * Return: Pointer to head buffer
1354  */
1355 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1356 {
1357 	return skb->head;
1358 }
1359 
1360 /**
1361  * __qdf_nbuf_data() - return the pointer to data header in the skb
1362  * @skb: Pointer to network buffer
1363  *
1364  * Return: Pointer to skb data
1365  */
1366 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1367 {
1368 	return skb->data;
1369 }
1370 
1371 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1372 {
1373 	return (uint8_t *)&skb->data;
1374 }
1375 
1376 /**
1377  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1378  * @skb: Pointer to network buffer
1379  *
1380  * Return: skb protocol
1381  */
1382 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1383 {
1384 	return skb->protocol;
1385 }
1386 
1387 /**
1388  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1389  * @skb: Pointer to network buffer
1390  *
1391  * Return: skb ip_summed
1392  */
1393 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1394 {
1395 	return skb->ip_summed;
1396 }
1397 
1398 /**
1399  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1400  * @skb: Pointer to network buffer
1401  * @ip_summed: ip checksum
1402  *
1403  * Return: none
1404  */
1405 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1406 		 uint8_t ip_summed)
1407 {
1408 	skb->ip_summed = ip_summed;
1409 }
1410 
1411 /**
1412  * __qdf_nbuf_get_priority() - return the priority value of the skb
1413  * @skb: Pointer to network buffer
1414  *
1415  * Return: skb priority
1416  */
1417 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1418 {
1419 	return skb->priority;
1420 }
1421 
1422 /**
1423  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1424  * @skb: Pointer to network buffer
1425  * @p: priority
1426  *
1427  * Return: none
1428  */
1429 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1430 {
1431 	skb->priority = p;
1432 }
1433 
1434 /**
1435  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1436  * @skb: Current skb
1437  * @next_skb: Next skb
1438  *
1439  * Return: void
1440  */
1441 static inline void
1442 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1443 {
1444 	skb->next = skb_next;
1445 }
1446 
1447 /**
1448  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1449  * @skb: Current skb
1450  *
1451  * Return: the next skb pointed to by the current skb
1452  */
1453 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1454 {
1455 	return skb->next;
1456 }
1457 
1458 /**
1459  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1460  * @skb: Current skb
1461  * @next_skb: Next skb
1462  *
1463  * This fn is used to link up extensions to the head skb. Does not handle
1464  * linking to the head
1465  *
1466  * Return: none
1467  */
1468 static inline void
1469 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1470 {
1471 	skb->next = skb_next;
1472 }
1473 
1474 /**
1475  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1476  * @skb: Current skb
1477  *
1478  * Return: the next skb pointed to by the current skb
1479  */
1480 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1481 {
1482 	return skb->next;
1483 }
1484 
1485 /**
1486  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1487  * @skb_head: head_buf nbuf holding head segment (single)
1488  * @ext_list: nbuf list holding linked extensions to the head
1489  * @ext_len: Total length of all buffers in the extension list
1490  *
1491  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1492  * to the nbuf holding the head segment (seg0)
1493  *
1494  * Return: none
1495  */
1496 static inline void
1497 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1498 			struct sk_buff *ext_list, size_t ext_len)
1499 {
1500 	skb_shinfo(skb_head)->frag_list = ext_list;
1501 	skb_head->data_len += ext_len;
1502 	skb_head->len += ext_len;
1503 }
1504 
1505 /**
1506  * __qdf_nbuf_get_shinfo() - return the shared info of the skb
1507  * @skb: Pointer to network buffer
1508  *
1509  * Return: skb shared info from head buf
1510  */
1511 static inline
1512 struct skb_shared_info *__qdf_nbuf_get_shinfo(struct sk_buff *head_buf)
1513 {
1514 	return skb_shinfo(head_buf);
1515 }
1516 
1517 /**
1518  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1519  * @head_buf: Network buf holding head segment (single)
1520  *
1521  * This ext_list is populated when we have Jumbo packet, for example in case of
1522  * monitor mode amsdu packet reception, and are stiched using frags_list.
1523  *
1524  * Return: Network buf list holding linked extensions from head buf.
1525  */
1526 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1527 {
1528 	return (skb_shinfo(head_buf)->frag_list);
1529 }
1530 
1531 /**
1532  * __qdf_nbuf_get_age() - return the checksum value of the skb
1533  * @skb: Pointer to network buffer
1534  *
1535  * Return: checksum value
1536  */
1537 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1538 {
1539 	return skb->csum;
1540 }
1541 
1542 /**
1543  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1544  * @skb: Pointer to network buffer
1545  * @v: Value
1546  *
1547  * Return: none
1548  */
1549 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1550 {
1551 	skb->csum = v;
1552 }
1553 
1554 /**
1555  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1556  * @skb: Pointer to network buffer
1557  * @adj: Adjustment value
1558  *
1559  * Return: none
1560  */
1561 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1562 {
1563 	skb->csum -= adj;
1564 }
1565 
1566 /**
1567  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1568  * @skb: Pointer to network buffer
1569  * @offset: Offset value
1570  * @len: Length
1571  * @to: Destination pointer
1572  *
1573  * Return: length of the copy bits for skb
1574  */
1575 static inline int32_t
1576 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1577 {
1578 	return skb_copy_bits(skb, offset, to, len);
1579 }
1580 
1581 /**
1582  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1583  * @skb: Pointer to network buffer
1584  * @len:  Packet length
1585  *
1586  * Return: none
1587  */
1588 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1589 {
1590 	if (skb->len > len) {
1591 		skb_trim(skb, len);
1592 	} else {
1593 		if (skb_tailroom(skb) < len - skb->len) {
1594 			if (unlikely(pskb_expand_head(skb, 0,
1595 				len - skb->len - skb_tailroom(skb),
1596 				GFP_ATOMIC))) {
1597 				QDF_DEBUG_PANIC(
1598 				   "SKB tailroom is lessthan requested length."
1599 				   " tail-room: %u, len: %u, skb->len: %u",
1600 				   skb_tailroom(skb), len, skb->len);
1601 				dev_kfree_skb_any(skb);
1602 			}
1603 		}
1604 		skb_put(skb, (len - skb->len));
1605 	}
1606 }
1607 
1608 /**
1609  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1610  * @skb: Pointer to network buffer
1611  * @protocol: Protocol type
1612  *
1613  * Return: none
1614  */
1615 static inline void
1616 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1617 {
1618 	skb->protocol = protocol;
1619 }
1620 
1621 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1622 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1623 
1624 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1625 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1626 
1627 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1628 				      uint32_t *lo, uint32_t *hi);
1629 
1630 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1631 	struct qdf_tso_info_t *tso_info);
1632 
1633 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1634 			  struct qdf_tso_seg_elem_t *tso_seg,
1635 			  bool is_last_seg);
1636 
1637 #ifdef FEATURE_TSO
1638 /**
1639  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1640  *                                    payload len
1641  * @skb: buffer
1642  *
1643  * Return: size
1644  */
1645 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1646 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1647 
1648 #else
1649 static inline
1650 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1651 {
1652 	return 0;
1653 }
1654 
1655 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1656 {
1657 	return 0;
1658 }
1659 
1660 #endif /* FEATURE_TSO */
1661 
1662 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1663 {
1664 	if (skb_is_gso(skb) &&
1665 		(skb_is_gso_v6(skb) ||
1666 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1667 		return true;
1668 	else
1669 		return false;
1670 }
1671 
1672 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1673 
1674 int __qdf_nbuf_get_users(struct sk_buff *skb);
1675 
1676 /**
1677  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1678  *			      and get hw_classify by peeking
1679  *			      into packet
1680  * @nbuf:		Network buffer (skb on Linux)
1681  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1682  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1683  *			needs to be set in case of CE classification support
1684  *			Is set by this macro.
1685  * @hw_classify:	This is a flag which is set to indicate
1686  *			CE classification is enabled.
1687  *			Do not set this bit for VLAN packets
1688  *			OR for mcast / bcast frames.
1689  *
1690  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1691  * whether to enable tx_classify bit in CE.
1692  *
1693  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1694  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1695  * it is the length and a 802.3 frame else it is Ethernet Type II
1696  * (RFC 894).
1697  * Bit 4 in pkt_subtype is the tx_classify bit
1698  *
1699  * Return:	void
1700  */
1701 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1702 				pkt_subtype, hw_classify)	\
1703 do {								\
1704 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1705 	uint16_t ether_type = ntohs(eh->h_proto);		\
1706 	bool is_mc_bc;						\
1707 								\
1708 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1709 		   is_multicast_ether_addr((uint8_t *)eh);	\
1710 								\
1711 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1712 		hw_classify = 1;				\
1713 		pkt_subtype = 0x01 <<				\
1714 			HTT_TX_CLASSIFY_BIT_S;			\
1715 	}							\
1716 								\
1717 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1718 		pkt_type = htt_pkt_type_ethernet;		\
1719 								\
1720 } while (0)
1721 
1722 /**
1723  * nbuf private buffer routines
1724  */
1725 
1726 /**
1727  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1728  * @skb: Pointer to network buffer
1729  * @addr: Pointer to store header's addr
1730  * @m_len: network buffer length
1731  *
1732  * Return: none
1733  */
1734 static inline void
1735 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1736 {
1737 	*addr = skb->data;
1738 	*len = skb->len;
1739 }
1740 
1741 /**
1742  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1743  * @head: Head pointer
1744  * @tail: Tail pointer
1745  * @qlen: Queue length
1746  */
1747 typedef struct __qdf_nbuf_qhead {
1748 	struct sk_buff *head;
1749 	struct sk_buff *tail;
1750 	unsigned int qlen;
1751 } __qdf_nbuf_queue_t;
1752 
1753 /******************Functions *************/
1754 
1755 /**
1756  * __qdf_nbuf_queue_init() - initiallize the queue head
1757  * @qhead: Queue head
1758  *
1759  * Return: QDF status
1760  */
1761 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1762 {
1763 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1764 	return QDF_STATUS_SUCCESS;
1765 }
1766 
1767 /**
1768  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1769  * @qhead: Queue head
1770  * @skb: Pointer to network buffer
1771  *
1772  * This is a lockless version, driver must acquire locks if it
1773  * needs to synchronize
1774  *
1775  * Return: none
1776  */
1777 static inline void
1778 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1779 {
1780 	skb->next = NULL;       /*Nullify the next ptr */
1781 
1782 	if (!qhead->head)
1783 		qhead->head = skb;
1784 	else
1785 		qhead->tail->next = skb;
1786 
1787 	qhead->tail = skb;
1788 	qhead->qlen++;
1789 }
1790 
1791 /**
1792  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1793  * @dest: target netbuf queue
1794  * @src:  source netbuf queue
1795  *
1796  * Return: target netbuf queue
1797  */
1798 static inline __qdf_nbuf_queue_t *
1799 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1800 {
1801 	if (!dest)
1802 		return NULL;
1803 	else if (!src || !(src->head))
1804 		return dest;
1805 
1806 	if (!(dest->head))
1807 		dest->head = src->head;
1808 	else
1809 		dest->tail->next = src->head;
1810 
1811 	dest->tail = src->tail;
1812 	dest->qlen += src->qlen;
1813 	return dest;
1814 }
1815 
1816 /**
1817  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1818  * @qhead: Queue head
1819  * @skb: Pointer to network buffer
1820  *
1821  * This is a lockless version, driver must acquire locks if it needs to
1822  * synchronize
1823  *
1824  * Return: none
1825  */
1826 static inline void
1827 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1828 {
1829 	if (!qhead->head) {
1830 		/*Empty queue Tail pointer Must be updated */
1831 		qhead->tail = skb;
1832 	}
1833 	skb->next = qhead->head;
1834 	qhead->head = skb;
1835 	qhead->qlen++;
1836 }
1837 
1838 /**
1839  * __qdf_nbuf_queue_remove_last() - remove a skb from the tail of the queue
1840  * @qhead: Queue head
1841  *
1842  * This is a lockless version. Driver should take care of the locks
1843  *
1844  * Return: skb or NULL
1845  */
1846 static inline struct sk_buff *
1847 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
1848 {
1849 	__qdf_nbuf_t tmp_tail, node = NULL;
1850 
1851 	if (qhead->head) {
1852 		qhead->qlen--;
1853 		tmp_tail = qhead->tail;
1854 		node = qhead->head;
1855 		if (qhead->head == qhead->tail) {
1856 			qhead->head = NULL;
1857 			qhead->tail = NULL;
1858 			return node;
1859 		} else {
1860 			while (tmp_tail != node->next)
1861 			       node = node->next;
1862 			qhead->tail = node;
1863 			return node->next;
1864 		}
1865 	}
1866 	return node;
1867 }
1868 
1869 /**
1870  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1871  * @qhead: Queue head
1872  *
1873  * This is a lockless version. Driver should take care of the locks
1874  *
1875  * Return: skb or NULL
1876  */
1877 static inline
1878 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1879 {
1880 	__qdf_nbuf_t tmp = NULL;
1881 
1882 	if (qhead->head) {
1883 		qhead->qlen--;
1884 		tmp = qhead->head;
1885 		if (qhead->head == qhead->tail) {
1886 			qhead->head = NULL;
1887 			qhead->tail = NULL;
1888 		} else {
1889 			qhead->head = tmp->next;
1890 		}
1891 		tmp->next = NULL;
1892 	}
1893 	return tmp;
1894 }
1895 
1896 /**
1897  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1898  * @qhead: head of queue
1899  *
1900  * Return: NULL if the queue is empty
1901  */
1902 static inline struct sk_buff *
1903 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1904 {
1905 	return qhead->head;
1906 }
1907 
1908 /**
1909  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1910  * @qhead: head of queue
1911  *
1912  * Return: NULL if the queue is empty
1913  */
1914 static inline struct sk_buff *
1915 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1916 {
1917 	return qhead->tail;
1918 }
1919 
1920 /**
1921  * __qdf_nbuf_queue_len() - return the queue length
1922  * @qhead: Queue head
1923  *
1924  * Return: Queue length
1925  */
1926 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1927 {
1928 	return qhead->qlen;
1929 }
1930 
1931 /**
1932  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1933  * @skb: Pointer to network buffer
1934  *
1935  * This API returns the next skb from packet chain, remember the skb is
1936  * still in the queue
1937  *
1938  * Return: NULL if no packets are there
1939  */
1940 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1941 {
1942 	return skb->next;
1943 }
1944 
1945 /**
1946  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1947  * @qhead: Queue head
1948  *
1949  * Return: true if length is 0 else false
1950  */
1951 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1952 {
1953 	return qhead->qlen == 0;
1954 }
1955 
1956 /*
1957  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1958  * Because the queue head will most likely put in some structure,
1959  * we don't use pointer type as the definition.
1960  */
1961 
1962 /*
1963  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1964  * Because the queue head will most likely put in some structure,
1965  * we don't use pointer type as the definition.
1966  */
1967 
1968 static inline void
1969 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1970 {
1971 }
1972 
1973 /**
1974  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1975  *        expands the headroom
1976  *        in the data region. In case of failure the skb is released.
1977  * @skb: sk buff
1978  * @headroom: size of headroom
1979  *
1980  * Return: skb or NULL
1981  */
1982 static inline struct sk_buff *
1983 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1984 {
1985 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1986 		dev_kfree_skb_any(skb);
1987 		skb = NULL;
1988 	}
1989 	return skb;
1990 }
1991 
1992 /**
1993  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1994  *        exapnds the tailroom
1995  *        in data region. In case of failure it releases the skb.
1996  * @skb: sk buff
1997  * @tailroom: size of tailroom
1998  *
1999  * Return: skb or NULL
2000  */
2001 static inline struct sk_buff *
2002 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
2003 {
2004 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
2005 		return skb;
2006 	/**
2007 	 * unlikely path
2008 	 */
2009 	dev_kfree_skb_any(skb);
2010 	return NULL;
2011 }
2012 
2013 /**
2014  * __qdf_nbuf_linearize() - skb linearize
2015  * @skb: sk buff
2016  *
2017  * create a version of the specified nbuf whose contents
2018  * can be safely modified without affecting other
2019  * users.If the nbuf is non-linear then this function
2020  * linearize. if unable to linearize returns -ENOMEM on
2021  * success 0 is returned
2022  *
2023  * Return: 0 on Success, -ENOMEM on failure is returned.
2024  */
2025 static inline int
2026 __qdf_nbuf_linearize(struct sk_buff *skb)
2027 {
2028 	return skb_linearize(skb);
2029 }
2030 
2031 /**
2032  * __qdf_nbuf_unshare() - skb unshare
2033  * @skb: sk buff
2034  *
2035  * create a version of the specified nbuf whose contents
2036  * can be safely modified without affecting other
2037  * users.If the nbuf is a clone then this function
2038  * creates a new copy of the data. If the buffer is not
2039  * a clone the original buffer is returned.
2040  *
2041  * Return: skb or NULL
2042  */
2043 static inline struct sk_buff *
2044 __qdf_nbuf_unshare(struct sk_buff *skb)
2045 {
2046 	struct sk_buff *skb_new;
2047 
2048 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
2049 
2050 	skb_new = skb_unshare(skb, GFP_ATOMIC);
2051 	if (skb_new)
2052 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
2053 
2054 	return skb_new;
2055 }
2056 
2057 /**
2058  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
2059  *@buf: sk buff
2060  *
2061  * Return: true/false
2062  */
2063 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
2064 {
2065 	return skb_cloned(skb);
2066 }
2067 
2068 /**
2069  * __qdf_nbuf_pool_init() - init pool
2070  * @net: net handle
2071  *
2072  * Return: QDF status
2073  */
2074 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2075 {
2076 	return QDF_STATUS_SUCCESS;
2077 }
2078 
2079 /*
2080  * adf_nbuf_pool_delete() implementation - do nothing in linux
2081  */
2082 #define __qdf_nbuf_pool_delete(osdev)
2083 
2084 /**
2085  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2086  *        release the skb.
2087  * @skb: sk buff
2088  * @headroom: size of headroom
2089  * @tailroom: size of tailroom
2090  *
2091  * Return: skb or NULL
2092  */
2093 static inline struct sk_buff *
2094 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2095 {
2096 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2097 		return skb;
2098 
2099 	dev_kfree_skb_any(skb);
2100 	return NULL;
2101 }
2102 
2103 /**
2104  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2105  * @buf: Network buf instance
2106  * @headroom: Additional headroom to be added
2107  * @tailroom: Additional tailroom to be added
2108  *
2109  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2110  *	or NULL if there is no memory
2111  */
2112 static inline struct sk_buff *
2113 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2114 {
2115 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2116 }
2117 
2118 /**
2119  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2120  * @buf: Network buf instance
2121  *
2122  * Return: True, if buf has frag_list else return False
2123  */
2124 static inline bool
2125 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2126 {
2127 	return skb_has_frag_list(buf);
2128 }
2129 
2130 /**
2131  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2132  * @buf: Network buf instance
2133  *
2134  * Return: Network buf instance
2135  */
2136 static inline struct sk_buff *
2137 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2138 {
2139 	struct sk_buff *list;
2140 
2141 	if (!__qdf_nbuf_has_fraglist(buf))
2142 		return NULL;
2143 
2144 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2145 		;
2146 
2147 	return list;
2148 }
2149 
2150 /**
2151  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2152  * @buf: Network buf instance
2153  *
2154  * Return: void
2155  */
2156 static inline void
2157 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2158 {
2159 	struct sk_buff *list;
2160 
2161 	skb_walk_frags(buf, list)
2162 		skb_get(list);
2163 }
2164 
2165 /**
2166  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2167  *
2168  * Return: true/false
2169  */
2170 static inline bool
2171 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2172 			 uint8_t **where)
2173 {
2174 	qdf_assert(0);
2175 	return false;
2176 }
2177 
2178 /**
2179  * __qdf_nbuf_reset_ctxt() - mem zero control block
2180  * @nbuf: buffer
2181  *
2182  * Return: none
2183  */
2184 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2185 {
2186 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2187 }
2188 
2189 /**
2190  * __qdf_nbuf_network_header() - get network header
2191  * @buf: buffer
2192  *
2193  * Return: network header pointer
2194  */
2195 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2196 {
2197 	return skb_network_header(buf);
2198 }
2199 
2200 /**
2201  * __qdf_nbuf_transport_header() - get transport header
2202  * @buf: buffer
2203  *
2204  * Return: transport header pointer
2205  */
2206 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2207 {
2208 	return skb_transport_header(buf);
2209 }
2210 
2211 /**
2212  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2213  *  passed as part of network buffer by network stack
2214  * @skb: sk buff
2215  *
2216  * Return: TCP MSS size
2217  *
2218  */
2219 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2220 {
2221 	return skb_shinfo(skb)->gso_size;
2222 }
2223 
2224 /**
2225  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2226  * @nbuf: sk buff
2227  *
2228  * Return: none
2229  */
2230 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2231 
2232 /*
2233  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2234  * @nbuf: sk buff
2235  *
2236  * Return: void ptr
2237  */
2238 static inline void *
2239 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2240 {
2241 	return (void *)nbuf->cb;
2242 }
2243 
2244 /**
2245  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2246  * @skb: sk buff
2247  *
2248  * Return: head size
2249  */
2250 static inline size_t
2251 __qdf_nbuf_headlen(struct sk_buff *skb)
2252 {
2253 	return skb_headlen(skb);
2254 }
2255 
2256 /**
2257  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2258  * @buf: sk buff
2259  *
2260  * Return: true/false
2261  */
2262 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2263 {
2264 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2265 }
2266 
2267 /**
2268  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2269  * @buf: sk buff
2270  *
2271  * Return: true/false
2272  */
2273 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2274 {
2275 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2276 }
2277 
2278 /**
2279  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2280  * @skb: sk buff
2281  *
2282  * Return: size of l2+l3+l4 header length
2283  */
2284 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2285 {
2286 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2287 }
2288 
2289 /**
2290  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2291  * @skb: sk buff
2292  *
2293  * Return: size of TCP header length
2294  */
2295 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2296 {
2297 	return tcp_hdrlen(skb);
2298 }
2299 
2300 /**
2301  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2302  * @buf: sk buff
2303  *
2304  * Return:  true/false
2305  */
2306 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2307 {
2308 	if (skb_is_nonlinear(skb))
2309 		return true;
2310 	else
2311 		return false;
2312 }
2313 
2314 /**
2315  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2316  * @buf: sk buff
2317  *
2318  * Return: TCP sequence number
2319  */
2320 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2321 {
2322 	return ntohl(tcp_hdr(skb)->seq);
2323 }
2324 
2325 /**
2326  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2327  *@buf: sk buff
2328  *
2329  * Return: data pointer to typecast into your priv structure
2330  */
2331 static inline char *
2332 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2333 {
2334 	return &skb->cb[8];
2335 }
2336 
2337 /**
2338  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2339  * @buf: Pointer to nbuf
2340  *
2341  * Return: None
2342  */
2343 static inline void
2344 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2345 {
2346 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2347 }
2348 
2349 /**
2350  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2351  *
2352  * @buf: sk buff
2353  * @queue_id: Queue id
2354  *
2355  * Return: void
2356  */
2357 static inline void
2358 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2359 {
2360 	skb_record_rx_queue(skb, queue_id);
2361 }
2362 
2363 /**
2364  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2365  *
2366  * @buf: sk buff
2367  *
2368  * Return: Queue mapping
2369  */
2370 static inline uint16_t
2371 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2372 {
2373 	return skb->queue_mapping;
2374 }
2375 
2376 /**
2377  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2378  *
2379  * @buf: sk buff
2380  * @val: queue_id
2381  *
2382  */
2383 static inline void
2384 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2385 {
2386 	skb_set_queue_mapping(skb, val);
2387 }
2388 
2389 /**
2390  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2391  *
2392  * @buf: sk buff
2393  *
2394  * Return: void
2395  */
2396 static inline void
2397 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2398 {
2399 	__net_timestamp(skb);
2400 }
2401 
2402 /**
2403  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2404  *
2405  * @buf: sk buff
2406  *
2407  * Return: timestamp stored in skb in ms
2408  */
2409 static inline uint64_t
2410 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2411 {
2412 	return ktime_to_ms(skb_get_ktime(skb));
2413 }
2414 
2415 /**
2416  * __qdf_nbuf_get_timestamp_us() - get the timestamp for frame
2417  *
2418  * @buf: sk buff
2419  *
2420  * Return: timestamp stored in skb in us
2421  */
2422 static inline uint64_t
2423 __qdf_nbuf_get_timestamp_us(struct sk_buff *skb)
2424 {
2425 	return ktime_to_us(skb_get_ktime(skb));
2426 }
2427 
2428 /**
2429  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2430  *
2431  * @buf: sk buff
2432  *
2433  * Return: time difference in ms
2434  */
2435 static inline uint64_t
2436 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2437 {
2438 	return ktime_to_ms(net_timedelta(skb->tstamp));
2439 }
2440 
2441 /**
2442  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2443  *
2444  * @buf: sk buff
2445  *
2446  * Return: time difference in micro seconds
2447  */
2448 static inline uint64_t
2449 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2450 {
2451 	return ktime_to_us(net_timedelta(skb->tstamp));
2452 }
2453 
2454 /**
2455  * __qdf_nbuf_orphan() - orphan a nbuf
2456  * @skb: sk buff
2457  *
2458  * If a buffer currently has an owner then we call the
2459  * owner's destructor function
2460  *
2461  * Return: void
2462  */
2463 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2464 {
2465 	return skb_orphan(skb);
2466 }
2467 
2468 /**
2469  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2470  * head pointer to end pointer
2471  * @nbuf: qdf_nbuf_t
2472  *
2473  * Return: size of network buffer from head pointer to end
2474  * pointer
2475  */
2476 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2477 {
2478 	return skb_end_offset(nbuf);
2479 }
2480 
2481 /**
2482  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2483  * including the header and variable data area
2484  * @skb: sk buff
2485  *
2486  * Return: size of network buffer
2487  */
2488 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2489 {
2490 	return skb->truesize;
2491 }
2492 
2493 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2494 /**
2495  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2496  * from the total skb mem and DP tx/rx skb mem
2497  * @nbytes: number of bytes
2498  * @dir: direction
2499  * @is_mapped: is mapped or unmapped memory
2500  *
2501  * Return: none
2502  */
2503 static inline void __qdf_record_nbuf_nbytes(
2504 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2505 {
2506 	if (is_mapped) {
2507 		if (dir == QDF_DMA_TO_DEVICE) {
2508 			qdf_mem_dp_tx_skb_cnt_inc();
2509 			qdf_mem_dp_tx_skb_inc(nbytes);
2510 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2511 			qdf_mem_dp_rx_skb_cnt_inc();
2512 			qdf_mem_dp_rx_skb_inc(nbytes);
2513 		}
2514 		qdf_mem_skb_total_inc(nbytes);
2515 	} else {
2516 		if (dir == QDF_DMA_TO_DEVICE) {
2517 			qdf_mem_dp_tx_skb_cnt_dec();
2518 			qdf_mem_dp_tx_skb_dec(nbytes);
2519 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2520 			qdf_mem_dp_rx_skb_cnt_dec();
2521 			qdf_mem_dp_rx_skb_dec(nbytes);
2522 		}
2523 		qdf_mem_skb_total_dec(nbytes);
2524 	}
2525 }
2526 
2527 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2528 static inline void __qdf_record_nbuf_nbytes(
2529 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2530 {
2531 }
2532 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2533 
2534 /**
2535  * __qdf_nbuf_map_nbytes_single() - map nbytes
2536  * @osdev: os device
2537  * @buf: buffer
2538  * @dir: direction
2539  * @nbytes: number of bytes
2540  *
2541  * Return: QDF_STATUS
2542  */
2543 #ifdef A_SIMOS_DEVHOST
2544 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2545 		qdf_device_t osdev, struct sk_buff *buf,
2546 		qdf_dma_dir_t dir, int nbytes)
2547 {
2548 	qdf_dma_addr_t paddr;
2549 
2550 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2551 	return QDF_STATUS_SUCCESS;
2552 }
2553 #else
2554 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2555 		qdf_device_t osdev, struct sk_buff *buf,
2556 		qdf_dma_dir_t dir, int nbytes)
2557 {
2558 	qdf_dma_addr_t paddr;
2559 	QDF_STATUS ret;
2560 
2561 	/* assume that the OS only provides a single fragment */
2562 	QDF_NBUF_CB_PADDR(buf) = paddr =
2563 		dma_map_single(osdev->dev, buf->data,
2564 			       nbytes, __qdf_dma_dir_to_os(dir));
2565 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2566 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2567 	if (QDF_IS_STATUS_SUCCESS(ret))
2568 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2569 					 dir, true);
2570 	return ret;
2571 }
2572 #endif
2573 /**
2574  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2575  * @osdev: os device
2576  * @buf: buffer
2577  * @dir: direction
2578  * @nbytes: number of bytes
2579  *
2580  * Return: none
2581  */
2582 #if defined(A_SIMOS_DEVHOST)
2583 static inline void
2584 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2585 			       qdf_dma_dir_t dir, int nbytes)
2586 {
2587 }
2588 
2589 #else
2590 static inline void
2591 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2592 			       qdf_dma_dir_t dir, int nbytes)
2593 {
2594 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2595 
2596 	if (qdf_likely(paddr)) {
2597 		__qdf_record_nbuf_nbytes(
2598 			__qdf_nbuf_get_end_offset(buf), dir, false);
2599 		dma_unmap_single(osdev->dev, paddr, nbytes,
2600 				 __qdf_dma_dir_to_os(dir));
2601 		return;
2602 	}
2603 }
2604 #endif
2605 
2606 static inline struct sk_buff *
2607 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2608 {
2609 	return skb_dequeue(skb_queue_head);
2610 }
2611 
2612 static inline
2613 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2614 {
2615 	return skb_queue_head->qlen;
2616 }
2617 
2618 static inline
2619 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2620 					struct sk_buff *skb)
2621 {
2622 	return skb_queue_tail(skb_queue_head, skb);
2623 }
2624 
2625 static inline
2626 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2627 {
2628 	return skb_queue_head_init(skb_queue_head);
2629 }
2630 
2631 static inline
2632 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2633 {
2634 	return skb_queue_purge(skb_queue_head);
2635 }
2636 
2637 static inline
2638 int __qdf_nbuf_queue_empty(__qdf_nbuf_queue_head_t *nbuf_queue_head)
2639 {
2640 	return skb_queue_empty(nbuf_queue_head);
2641 }
2642 
2643 /**
2644  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2645  * @head: skb list for which lock is to be acquired
2646  *
2647  * Return: void
2648  */
2649 static inline
2650 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2651 {
2652 	spin_lock_bh(&skb_queue_head->lock);
2653 }
2654 
2655 /**
2656  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2657  * @head: skb list for which lock is to be release
2658  *
2659  * Return: void
2660  */
2661 static inline
2662 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2663 {
2664 	spin_unlock_bh(&skb_queue_head->lock);
2665 }
2666 
2667 /**
2668  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2669  * @nbuf: qdf_nbuf_t
2670  * @idx: Index for which frag size is requested
2671  *
2672  * Return: Frag size
2673  */
2674 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2675 							   uint8_t idx)
2676 {
2677 	unsigned int size = 0;
2678 
2679 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2680 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2681 	return size;
2682 }
2683 
2684 /**
2685  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2686  * @nbuf: qdf_nbuf_t
2687  * @idx: Index for which frag address is requested
2688  *
2689  * Return: Frag address in success, else NULL
2690  */
2691 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2692 						    uint8_t idx)
2693 {
2694 	__qdf_frag_t frag_addr = NULL;
2695 
2696 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2697 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2698 	return frag_addr;
2699 }
2700 
2701 /**
2702  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2703  * @nbuf: qdf_nbuf_t
2704  * @idx: Frag index
2705  * @size: Size by which frag_size needs to be increased/decreased
2706  *        +Ve means increase, -Ve means decrease
2707  * @truesize: truesize
2708  */
2709 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2710 						 int size,
2711 						 unsigned int truesize)
2712 {
2713 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2714 }
2715 
2716 /**
2717  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2718  *          and adjust length by size.
2719  * @nbuf: qdf_nbuf_t
2720  * @idx: Frag index
2721  * @offset: Frag page offset should be moved by offset.
2722  *      +Ve - Move offset forward.
2723  *      -Ve - Move offset backward.
2724  *
2725  * Return: QDF_STATUS
2726  */
2727 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2728 					    int offset);
2729 
2730 /**
2731  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
2732  * @nbuf: nbuf pointer
2733  * @idx: frag idx need to be removed
2734  * @truesize: truesize of frag
2735  *
2736  * Return : void
2737  */
2738 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
2739 /**
2740  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2741  * @buf: Frag pointer needs to be added in nbuf frag
2742  * @nbuf: qdf_nbuf_t where frag will be added
2743  * @offset: Offset in frag to be added to nbuf_frags
2744  * @frag_len: Frag length
2745  * @truesize: truesize
2746  * @take_frag_ref: Whether to take ref for frag or not
2747  *      This bool must be set as per below comdition:
2748  *      1. False: If this frag is being added in any nbuf
2749  *              for the first time after allocation.
2750  *      2. True: If frag is already attached part of any
2751  *              nbuf.
2752  *
2753  * It takes ref_count based on boolean flag take_frag_ref
2754  */
2755 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2756 			    int offset, int frag_len,
2757 			    unsigned int truesize, bool take_frag_ref);
2758 
2759 /**
2760  * __qdf_nbuf_ref_frag() - get frag reference
2761  *
2762  * Return: void
2763  */
2764 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2765 
2766 /**
2767  * __qdf_nbuf_set_mark() - Set nbuf mark
2768  * @buf: Pointer to nbuf
2769  * @mark: Value to set mark
2770  *
2771  * Return: None
2772  */
2773 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2774 {
2775 	buf->mark = mark;
2776 }
2777 
2778 /**
2779  * __qdf_nbuf_get_mark() - Get nbuf mark
2780  * @buf: Pointer to nbuf
2781  *
2782  * Return: Value of mark
2783  */
2784 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2785 {
2786 	return buf->mark;
2787 }
2788 
2789 /**
2790  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2791  * the data pointer to the end pointer
2792  * @nbuf: qdf_nbuf_t
2793  *
2794  * Return: size of skb from data pointer to end pointer
2795  */
2796 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2797 {
2798 	return (skb_end_pointer(nbuf) - nbuf->data);
2799 }
2800 
2801 /**
2802  * __qdf_nbuf_set_data_len() - Return the data_len of the nbuf
2803  * @nbuf: qdf_nbuf_t
2804  *
2805  * Return: value of data_len
2806  */
2807 static inline
2808 qdf_size_t __qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf, uint32_t len)
2809 {
2810 	return nbuf->data_len = len;
2811 }
2812 
2813 /**
2814  * __qdf_nbuf_get_only_data_len() - Return the data_len of the nbuf
2815  * @nbuf: qdf_nbuf_t
2816  *
2817  * Return: value of data_len
2818  */
2819 static inline qdf_size_t __qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)
2820 {
2821 	return nbuf->data_len;
2822 }
2823 
2824 /**
2825  * __qdf_nbuf_set_hash() - set the hash of the buf
2826  * @buf: Network buf instance
2827  * @len: len to be set
2828  *
2829  * Return: None
2830  */
2831 static inline void __qdf_nbuf_set_hash(__qdf_nbuf_t buf, uint32_t len)
2832 {
2833 	buf->hash = len;
2834 }
2835 
2836 /**
2837  * __qdf_nbuf_set_sw_hash() - set the sw hash of the buf
2838  * @buf: Network buf instance
2839  * @len: len to be set
2840  *
2841  * Return: None
2842  */
2843 static inline void __qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf, uint32_t len)
2844 {
2845 	buf->sw_hash = len;
2846 }
2847 
2848 /**
2849  * __qdf_nbuf_set_csum_start() - set the csum start of the buf
2850  * @buf: Network buf instance
2851  * @len: len to be set
2852  *
2853  * Return: None
2854  */
2855 static inline void __qdf_nbuf_set_csum_start(__qdf_nbuf_t buf, uint16_t len)
2856 {
2857 	buf->csum_start = len;
2858 }
2859 
2860 /**
2861  * __qdf_nbuf_set_csum_offset() - set the csum offset of the buf
2862  * @buf: Network buf instance
2863  * @len: len to be set
2864  *
2865  * Return: None
2866  */
2867 static inline void __qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf, uint16_t len)
2868 {
2869 	buf->csum_offset = len;
2870 }
2871 
2872 /**
2873  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2874  * @skb: Pointer to network buffer
2875  *
2876  * Return: Return the number of gso segments
2877  */
2878 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2879 {
2880 	return skb_shinfo(skb)->gso_segs;
2881 }
2882 
2883 /**
2884  * __qdf_nbuf_set_gso_segs() - set the number of gso segments
2885  * @skb: Pointer to network buffer
2886  * @val: val to be set
2887  *
2888  * Return: None
2889  */
2890 static inline void __qdf_nbuf_set_gso_segs(struct sk_buff *skb, uint16_t val)
2891 {
2892 	skb_shinfo(skb)->gso_segs = val;
2893 }
2894 
2895 /**
2896  * __qdf_nbuf_set_gso_type_udp_l4() - set the gso type to GSO UDP L4
2897  * @skb: Pointer to network buffer
2898  *
2899  * Return: None
2900  */
2901 static inline void __qdf_nbuf_set_gso_type_udp_l4(struct sk_buff *skb)
2902 {
2903 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
2904 }
2905 
2906 /**
2907  * __qdf_nbuf_set_ip_summed_partial() - set the ip summed to CHECKSUM_PARTIAL
2908  * @skb: Pointer to network buffer
2909  *
2910  * Return: None
2911  */
2912 static inline void __qdf_nbuf_set_ip_summed_partial(struct sk_buff *skb)
2913 {
2914 	skb->ip_summed = CHECKSUM_PARTIAL;
2915 }
2916 
2917 /**
2918  * __qdf_nbuf_get_gso_size() - Return the number of gso size
2919  * @skb: Pointer to network buffer
2920  *
2921  * Return: Return the number of gso segments
2922  */
2923 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
2924 {
2925 	return skb_shinfo(skb)->gso_size;
2926 }
2927 
2928 /**
2929  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
2930  * @skb: Pointer to network buffer
2931  *
2932  * Return: Return the number of gso segments
2933  */
2934 static inline void
2935 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
2936 {
2937 	skb_shinfo(skb)->gso_size = val;
2938 }
2939 
2940 /**
2941  * __qdf_nbuf_kfree() - Free nbuf using kfree
2942  * @buf: Pointer to network buffer
2943  *
2944  * This function is called to free the skb on failure cases
2945  *
2946  * Return: None
2947  */
2948 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
2949 {
2950 	kfree_skb(skb);
2951 }
2952 
2953 /**
2954  * __qdf_nbuf_dev_kfree_list() - Free nbuf list using dev based os call
2955  * @skb_queue_head: Pointer to nbuf queue head
2956  *
2957  * This function is called to free the nbuf list on failure cases
2958  *
2959  * Return: None
2960  */
2961 void
2962 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head);
2963 
2964 /**
2965  * __qdf_nbuf_dev_queue_head() - queue a buffer using dev at the list head
2966  * @skb_queue_head: Pointer to skb list head
2967  * @buff: Pointer to nbuf
2968  *
2969  * This function is called to queue buffer at the skb list head
2970  *
2971  * Return: None
2972  */
2973 static inline void
2974 __qdf_nbuf_dev_queue_head(__qdf_nbuf_queue_head_t *nbuf_queue_head,
2975 			  __qdf_nbuf_t buff)
2976 {
2977 	 __skb_queue_head(nbuf_queue_head, buff);
2978 }
2979 
2980 /**
2981  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
2982  * @buf: Pointer to network buffer
2983  *
2984  * This function is called to free the skb on failure cases
2985  *
2986  * Return: None
2987  */
2988 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
2989 {
2990 	dev_kfree_skb(skb);
2991 }
2992 
2993 /**
2994  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
2995  * @buf: Network buffer
2996  *
2997  * Return: TRUE if skb pkt type is mcast
2998  *         FALSE if not
2999  */
3000 static inline
3001 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
3002 {
3003 	return skb->pkt_type == PACKET_MULTICAST;
3004 }
3005 
3006 /**
3007  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
3008  * @buf: Network buffer
3009  *
3010  * Return: TRUE if skb pkt type is mcast
3011  *         FALSE if not
3012  */
3013 static inline
3014 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
3015 {
3016 	return skb->pkt_type == PACKET_BROADCAST;
3017 }
3018 
3019 /**
3020  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
3021  * @buf: Pointer to network buffer
3022  * @value: value to be set in dev_scratch of network buffer
3023  *
3024  * Return: void
3025  */
3026 static inline
3027 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
3028 {
3029 	skb->dev = dev;
3030 }
3031 
3032 /**
3033  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
3034  * @buf: Pointer to network buffer
3035  *
3036  * Return: dev mtu value in nbuf
3037  */
3038 static inline
3039 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
3040 {
3041 	return skb->dev->mtu;
3042 }
3043 
3044 /**
3045  * __qdf_nbuf_set_protocol_eth_tye_trans() - set protocol using eth trans os API
3046  * @buf: Pointer to network buffer
3047  *
3048  * Return: None
3049  */
3050 static inline
3051 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
3052 {
3053 	skb->protocol = eth_type_trans(skb, skb->dev);
3054 }
3055 
3056 /*
3057  * __qdf_nbuf_net_timedelta() - get time delta
3058  * @t: time as __qdf_ktime_t object
3059  *
3060  * Return: time delta as ktime_t object
3061  */
3062 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
3063 {
3064 	return net_timedelta(t);
3065 }
3066 
3067 #ifdef CONFIG_NBUF_AP_PLATFORM
3068 #include <i_qdf_nbuf_w.h>
3069 #else
3070 #include <i_qdf_nbuf_m.h>
3071 #endif
3072 #endif /*_I_QDF_NET_BUF_H */
3073