xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
45 /* Since commit
46  *  baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
47  *
48  * the function netif_rx() can be used in preemptible/thread context as
49  * well as in interrupt context.
50  *
51  * Use netif_rx().
52  */
53 #define netif_rx_ni(skb) netif_rx(skb)
54 #endif
55 
56 /*
57  * Use socket buffer as the underlying implementation as skbuf .
58  * Linux use sk_buff to represent both packet and data,
59  * so we use sk_buffer to represent both skbuf .
60  */
61 typedef struct sk_buff *__qdf_nbuf_t;
62 
63 /**
64  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
65  *
66  * This is used for skb queue management via linux skb buff head APIs
67  */
68 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
69 
70 /**
71  * typedef __qdf_nbuf_get_shinfo for skb_shinfo linux struct
72  *
73  * This is used for skb shared info via linux skb shinfo APIs
74  */
75 typedef struct skb_shared_info *__qdf_nbuf_shared_info_t;
76 
77 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
78 
79 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
80 
81 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
82  * max tx fragments added by the driver
83  * The driver will always add one tx fragment (the tx descriptor)
84  */
85 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
86 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
87 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
88 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
89 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
90 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
91 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
92 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
93 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
94 #define QDF_NBUF_CB_PACKET_TYPE_TCP_ACK 9
95 
96 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
97 
98 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
99 #define IEEE80211_RADIOTAP_HE 23
100 #define IEEE80211_RADIOTAP_HE_MU 24
101 #endif
102 
103 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
104 
105 #define IEEE80211_RADIOTAP_EXT1_USIG	1
106 #define IEEE80211_RADIOTAP_EXT1_EHT	2
107 
108 /* mark the first packet after wow wakeup */
109 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
110 
111 /* TCP Related MASK */
112 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
113 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
114 #define QDF_NBUF_PKT_TCPOP_RST			0x04
115 
116 /*
117  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
118  */
119 typedef union {
120 	uint64_t       u64;
121 	qdf_dma_addr_t dma_addr;
122 } qdf_paddr_t;
123 
124 /**
125  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
126  *                    - data passed between layers of the driver.
127  *
128  * Notes:
129  *   1. Hard limited to 48 bytes. Please count your bytes
130  *   2. The size of this structure has to be easily calculable and
131  *      consistently so: do not use any conditional compile flags
132  *   3. Split into a common part followed by a tx/rx overlay
133  *   4. There is only one extra frag, which represents the HTC/HTT header
134  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
135  *      for the priv_cb_w since it must be at same offset for both
136  *      TX and RX union
137  *   6. "ipa.owned" bit must be first member in both TX and RX unions
138  *      for the priv_cb_m since it must be at same offset for both
139  *      TX and RX union.
140  *
141  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
142  *
143  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
144  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
145  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
146  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
147  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
148  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
149  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
150  * @rx.dev.priv_cb_w.flow_idx_valid: flow entry is found
151  * @rx.dev.priv_cb_w.flow_idx_timeout: flow entry search timed out
152  * @rx.dev.priv_cb_w.rsvd: rerserved bits
153  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
154  *
155  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
156  * @rx.dev.priv_cb_m.flush_ind: flush indication
157  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
158  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
159  * @rx.dev.priv_cb_m.exc_frm: exception frame
160  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
161  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
162 					     sw exception bit from ring desc
163  * @rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
164  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
165  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
166  * @rx.dev.priv_cb_m.lro_ctx: LRO context
167  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
168  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
169  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
170  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
171  *
172  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
173  * @rx.tcp_proto: L4 protocol is TCP
174  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
175  * @rx.ipv6_proto: L3 protocol is IPV6
176  * @rx.ip_offset: offset to IP header
177  * @rx.tcp_offset: offset to TCP header
178  * @rx_ctx_id: Rx context id
179  * @num_elements_in_list: number of elements in the nbuf list
180  *
181  * @rx.tcp_udp_chksum: L4 payload checksum
182  * @rx.tcp_wim: TCP window size
183  *
184  * @rx.flow_id: 32bit flow id
185  *
186  * @rx.flag_chfrag_start: first MSDU in an AMSDU
187  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
188  * @rx.flag_chfrag_end: last MSDU in an AMSDU
189  * @rx.flag_retry: flag to indicate MSDU is retried
190  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
191  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
192  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
193  * @rx.flag_is_frag: flag to indicate skb has frag list
194  * @rx.rsrvd: reserved
195  *
196  * @rx.trace: combined structure for DP and protocol trace
197  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
198  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
199  * @rx.trace.dp_trace: flag (Datapath trace)
200  * @rx.trace.packet_track: RX_DATA packet
201  * @rx.trace.rsrvd: enable packet logging
202  *
203  * @rx.vdev_id: vdev_id for RX pkt
204  * @rx.is_raw_frame: RAW frame
205  * @rx.fcs_err: FCS error
206  * @rx.tid_val: tid value
207  * @rx.reserved: reserved
208  * @rx.ftype: mcast2ucast, TSO, SG, MESH
209  *
210  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
211  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
212  *
213  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
214  *                 + (1) CE classification enablement bit
215  *                 + (2) packet type (802.3 or Ethernet type II)
216  *                 + (3) packet offset (usually length of HTC/HTT descr)
217  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
218  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
219  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
220  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
221  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
222  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
223  * @tx.dev.priv_cb_m.reserved: reserved
224  *
225  * @tx.ftype: mcast2ucast, TSO, SG, MESH
226  * @tx.vdev_id: vdev (for protocol trace)
227  * @tx.len: length of efrag pointed by the above pointers
228  *
229  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
230  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
231  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
232  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
233  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
234  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
235  * @tx.flags.bits.flag_ext_header: extended flags
236  * @tx.flags.bits.is_critical: flag indicating a critical frame
237  * @tx.trace: combined structure for DP and protocol trace
238  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
239  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
240  * @tx.trace.is_packet_priv:
241  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
242  * @tx.trace.to_fw: Flag to indicate send this packet to FW
243  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
244  *                          + (MGMT_ACTION)] - 4 bits
245  * @tx.trace.dp_trace: flag (Datapath trace)
246  * @tx.trace.is_bcast: flag (Broadcast packet)
247  * @tx.trace.is_mcast: flag (Multicast packet)
248  * @tx.trace.packet_type: flag (Packet type)
249  * @tx.trace.htt2_frm: flag (high-latency path only)
250  * @tx.trace.print: enable packet logging
251  *
252  * @tx.vaddr: virtual address of ~
253  * @tx.paddr: physical/DMA address of ~
254  */
255 struct qdf_nbuf_cb {
256 	/* common */
257 	qdf_paddr_t paddr; /* of skb->data */
258 	/* valid only in one direction */
259 	union {
260 		/* Note: MAX: 40 bytes */
261 		struct {
262 			union {
263 				struct {
264 					void *ext_cb_ptr;
265 					void *fctx;
266 					uint16_t msdu_len : 14,
267 						 flag_intra_bss : 1,
268 						 ipa_smmu_map : 1;
269 					uint16_t peer_id;
270 					uint8_t protocol_tag;
271 					uint8_t flow_idx_valid: 1,
272 						flow_idx_timeout: 1,
273 						rsvd:6;
274 					uint16_t flow_tag;
275 				} priv_cb_w;
276 				struct {
277 					/* ipa_owned bit is common between rx
278 					 * control block and tx control block.
279 					 * Do not change location of this bit.
280 					 */
281 					uint32_t ipa_owned:1,
282 						 peer_cached_buf_frm:1,
283 						 flush_ind:1,
284 						 packet_buf_pool:1,
285 						 l3_hdr_pad:3,
286 						 /* exception frame flag */
287 						 exc_frm:1,
288 						 ipa_smmu_map:1,
289 						 reo_dest_ind_or_sw_excpt:5,
290 						 lmac_id:2,
291 						 reserved1:16;
292 					uint32_t tcp_seq_num;
293 					uint32_t tcp_ack_num;
294 					union {
295 						struct {
296 							uint16_t msdu_len;
297 							uint16_t peer_id;
298 						} wifi3;
299 						struct {
300 							uint32_t map_index;
301 						} wifi2;
302 					} dp;
303 					unsigned char *lro_ctx;
304 				} priv_cb_m;
305 			} dev;
306 			uint32_t lro_eligible:1,
307 				tcp_proto:1,
308 				tcp_pure_ack:1,
309 				ipv6_proto:1,
310 				ip_offset:7,
311 				tcp_offset:7,
312 				rx_ctx_id:4,
313 				fcs_err:1,
314 				is_raw_frame:1,
315 				num_elements_in_list:8;
316 			uint32_t tcp_udp_chksum:16,
317 				 tcp_win:16;
318 			uint32_t flow_id;
319 			uint8_t flag_chfrag_start:1,
320 				flag_chfrag_cont:1,
321 				flag_chfrag_end:1,
322 				flag_retry:1,
323 				flag_da_mcbc:1,
324 				flag_da_valid:1,
325 				flag_sa_valid:1,
326 				flag_is_frag:1;
327 			union {
328 				uint8_t packet_state;
329 				uint8_t dp_trace:1,
330 					packet_track:3,
331 					rsrvd:4;
332 			} trace;
333 			uint16_t vdev_id:8,
334 				 tid_val:4,
335 				 ftype:4;
336 		} rx;
337 
338 		/* Note: MAX: 40 bytes */
339 		struct {
340 			union {
341 				struct {
342 					void *ext_cb_ptr;
343 					void *fctx;
344 				} priv_cb_w;
345 				struct {
346 					/* ipa_owned bit is common between rx
347 					 * control block and tx control block.
348 					 * Do not change location of this bit.
349 					 */
350 					struct {
351 						uint32_t owned:1,
352 							priv:31;
353 					} ipa;
354 					uint32_t data_attr;
355 					uint16_t desc_id;
356 					uint16_t mgmt_desc_id;
357 					struct {
358 						uint8_t bi_map:1,
359 							reserved:7;
360 					} dma_option;
361 					uint8_t flag_notify_comp:1,
362 						rsvd:7;
363 					uint8_t reserved[2];
364 				} priv_cb_m;
365 			} dev;
366 			uint8_t ftype;
367 			uint8_t vdev_id;
368 			uint16_t len;
369 			union {
370 				struct {
371 					uint8_t flag_efrag:1,
372 						flag_nbuf:1,
373 						num:1,
374 						flag_chfrag_start:1,
375 						flag_chfrag_cont:1,
376 						flag_chfrag_end:1,
377 						flag_ext_header:1,
378 						is_critical:1;
379 				} bits;
380 				uint8_t u8;
381 			} flags;
382 			struct {
383 				uint8_t packet_state:7,
384 					is_packet_priv:1;
385 				uint8_t packet_track:3,
386 					to_fw:1,
387 					/* used only for hl */
388 					htt2_frm:1,
389 					proto_type:3;
390 				uint8_t dp_trace:1,
391 					is_bcast:1,
392 					is_mcast:1,
393 					packet_type:4,
394 					print:1;
395 			} trace;
396 			unsigned char *vaddr;
397 			qdf_paddr_t paddr;
398 		} tx;
399 	} u;
400 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
401 
402 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
403 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
404 			(sizeof(struct qdf_nbuf_cb)) <=
405 			sizeof_field(struct sk_buff, cb));
406 #else
407 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
408 			(sizeof(struct qdf_nbuf_cb)) <=
409 			FIELD_SIZEOF(struct sk_buff, cb));
410 #endif
411 
412 /**
413  *  access macros to qdf_nbuf_cb
414  *  Note: These macros can be used as L-values as well as R-values.
415  *        When used as R-values, they effectively function as "get" macros
416  *        When used as L_values, they effectively function as "set" macros
417  */
418 
419 #define QDF_NBUF_CB_PADDR(skb) \
420 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
421 
422 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
423 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
424 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
425 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
426 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
427 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
428 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
429 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
430 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
431 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
432 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
433 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
434 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
435 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
436 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
437 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
438 
439 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
440 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
441 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
442 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
443 
444 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
445 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
446 
447 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
448 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
449 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
450 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
451 
452 #define QDF_NBUF_CB_RX_FTYPE(skb) \
453 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
454 
455 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
456 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
457 
458 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
459 	(((struct qdf_nbuf_cb *) \
460 	((skb)->cb))->u.rx.flag_chfrag_start)
461 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
462 	(((struct qdf_nbuf_cb *) \
463 	((skb)->cb))->u.rx.flag_chfrag_cont)
464 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
465 		(((struct qdf_nbuf_cb *) \
466 		((skb)->cb))->u.rx.flag_chfrag_end)
467 
468 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
469 	(((struct qdf_nbuf_cb *) \
470 	((skb)->cb))->u.rx.flag_da_mcbc)
471 
472 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
473 	(((struct qdf_nbuf_cb *) \
474 	((skb)->cb))->u.rx.flag_da_valid)
475 
476 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
477 	(((struct qdf_nbuf_cb *) \
478 	((skb)->cb))->u.rx.flag_sa_valid)
479 
480 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
481 	(((struct qdf_nbuf_cb *) \
482 	((skb)->cb))->u.rx.flag_retry)
483 
484 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
485 	(((struct qdf_nbuf_cb *) \
486 	((skb)->cb))->u.rx.is_raw_frame)
487 
488 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
489 	(((struct qdf_nbuf_cb *) \
490 	((skb)->cb))->u.rx.tid_val)
491 
492 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
493 	(((struct qdf_nbuf_cb *) \
494 	((skb)->cb))->u.rx.flag_is_frag)
495 
496 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
497 	(((struct qdf_nbuf_cb *) \
498 	((skb)->cb))->u.rx.fcs_err)
499 
500 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
501 	qdf_nbuf_set_state(skb, PACKET_STATE)
502 
503 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
504 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
505 
506 #define QDF_NBUF_CB_TX_FTYPE(skb) \
507 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
508 
509 
510 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
511 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
512 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
513 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
514 
515 /* Tx Flags Accessor Macros*/
516 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
517 	(((struct qdf_nbuf_cb *) \
518 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
519 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
520 	(((struct qdf_nbuf_cb *) \
521 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
522 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
523 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
524 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
525 	(((struct qdf_nbuf_cb *) \
526 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
527 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
528 	(((struct qdf_nbuf_cb *) \
529 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
530 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
531 		(((struct qdf_nbuf_cb *) \
532 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
533 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
534 		(((struct qdf_nbuf_cb *) \
535 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
536 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
537 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
538 
539 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
540 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
541 /* End of Tx Flags Accessor Macros */
542 
543 /* Tx trace accessor macros */
544 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
545 	(((struct qdf_nbuf_cb *) \
546 		((skb)->cb))->u.tx.trace.packet_state)
547 
548 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
549 	(((struct qdf_nbuf_cb *) \
550 		((skb)->cb))->u.tx.trace.is_packet_priv)
551 
552 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
553 	(((struct qdf_nbuf_cb *) \
554 		((skb)->cb))->u.tx.trace.packet_track)
555 
556 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
557 	(((struct qdf_nbuf_cb *) \
558 		((skb)->cb))->u.tx.trace.to_fw)
559 
560 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
561 		(((struct qdf_nbuf_cb *) \
562 			((skb)->cb))->u.rx.trace.packet_track)
563 
564 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
565 	(((struct qdf_nbuf_cb *) \
566 		((skb)->cb))->u.tx.trace.proto_type)
567 
568 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
569 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
570 
571 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
572 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
573 
574 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
575 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
576 
577 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
578 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
579 
580 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
581 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
582 
583 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
584 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
585 
586 #define QDF_NBUF_CB_SET_BCAST(skb) \
587 	(((struct qdf_nbuf_cb *) \
588 		((skb)->cb))->u.tx.trace.is_bcast = true)
589 
590 #define QDF_NBUF_CB_SET_MCAST(skb) \
591 	(((struct qdf_nbuf_cb *) \
592 		((skb)->cb))->u.tx.trace.is_mcast = true)
593 /* End of Tx trace accessor macros */
594 
595 
596 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
597 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
598 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
599 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
600 
601 /* assume the OS provides a single fragment */
602 #define __qdf_nbuf_get_num_frags(skb)		   \
603 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
604 
605 #define __qdf_nbuf_reset_num_frags(skb) \
606 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
607 
608 /**
609  *   end of nbuf->cb access macros
610  */
611 
612 typedef void (*qdf_nbuf_trace_update_t)(char *);
613 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
614 
615 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
616 
617 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
618 	(QDF_NBUF_CB_PADDR(skb) = paddr)
619 
620 #define __qdf_nbuf_frag_push_head(					\
621 	skb, frag_len, frag_vaddr, frag_paddr)				\
622 	do {					\
623 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
624 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
625 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
626 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
627 	} while (0)
628 
629 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
630 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
631 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
632 
633 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
634 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
635 
636 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
637 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
638 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
639 	 /* assume that the OS only provides a single fragment */	\
640 	 QDF_NBUF_CB_PADDR(skb))
641 
642 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
643 
644 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
645 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
646 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
647 
648 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
649 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
650 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
651 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
652 
653 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
654 	do {								\
655 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
656 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
657 		if (frag_num)						\
658 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
659 							      is_wstrm; \
660 		else					\
661 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
662 							      is_wstrm; \
663 	} while (0)
664 
665 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
666 	do { \
667 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
668 	} while (0)
669 
670 #define __qdf_nbuf_get_vdev_ctx(skb) \
671 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
672 
673 #define __qdf_nbuf_set_tx_ftype(skb, type) \
674 	do { \
675 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
676 	} while (0)
677 
678 #define __qdf_nbuf_get_tx_ftype(skb) \
679 		 QDF_NBUF_CB_TX_FTYPE((skb))
680 
681 
682 #define __qdf_nbuf_set_rx_ftype(skb, type) \
683 	do { \
684 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
685 	} while (0)
686 
687 #define __qdf_nbuf_get_rx_ftype(skb) \
688 		 QDF_NBUF_CB_RX_FTYPE((skb))
689 
690 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
691 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
692 
693 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
694 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
695 
696 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
697 	do { \
698 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
699 	} while (0)
700 
701 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
702 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
703 
704 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
705 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
706 
707 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
708 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
709 
710 #define __qdf_nbuf_set_da_mcbc(skb, val) \
711 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
712 
713 #define __qdf_nbuf_is_da_mcbc(skb) \
714 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
715 
716 #define __qdf_nbuf_set_da_valid(skb, val) \
717 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
718 
719 #define __qdf_nbuf_is_da_valid(skb) \
720 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
721 
722 #define __qdf_nbuf_set_sa_valid(skb, val) \
723 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
724 
725 #define __qdf_nbuf_is_sa_valid(skb) \
726 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
727 
728 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
729 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
730 
731 #define __qdf_nbuf_is_rx_retry_flag(skb) \
732 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
733 
734 #define __qdf_nbuf_set_raw_frame(skb, val) \
735 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
736 
737 #define __qdf_nbuf_is_raw_frame(skb) \
738 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
739 
740 #define __qdf_nbuf_get_tid_val(skb) \
741 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
742 
743 #define __qdf_nbuf_set_tid_val(skb, val) \
744 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
745 
746 #define __qdf_nbuf_set_is_frag(skb, val) \
747 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
748 
749 #define __qdf_nbuf_is_frag(skb) \
750 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
751 
752 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
753 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
754 
755 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
756 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
757 
758 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
759 	do { \
760 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
761 	} while (0)
762 
763 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
764 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
765 
766 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
767 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
768 
769 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
770 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
771 
772 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
773 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
774 
775 #define __qdf_nbuf_trace_get_proto_type(skb) \
776 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
777 
778 #define __qdf_nbuf_data_attr_get(skb)		\
779 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
780 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
781 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
782 
783 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
784 		skb_queue_walk_safe(queue, var, tvar)
785 
786 /**
787  * __qdf_nbuf_num_frags_init() - init extra frags
788  * @skb: sk buffer
789  *
790  * Return: none
791  */
792 static inline
793 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
794 {
795 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
796 }
797 
798 /*
799  * prototypes. Implemented in qdf_nbuf.c
800  */
801 
802 /**
803  * __qdf_nbuf_alloc() - Allocate nbuf
804  * @osdev: Device handle
805  * @size: Netbuf requested size
806  * @reserve: headroom to start with
807  * @align: Align
808  * @prio: Priority
809  * @func: Function name of the call site
810  * @line: line number of the call site
811  *
812  * This allocates an nbuf aligns if needed and reserves some space in the front,
813  * since the reserve is done after alignment the reserve value if being
814  * unaligned will result in an unaligned address.
815  *
816  * Return: nbuf or %NULL if no memory
817  */
818 __qdf_nbuf_t
819 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
820 		 int prio, const char *func, uint32_t line);
821 
822 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
823 				     const char *func, uint32_t line);
824 
825 #if defined(QCA_DP_NBUF_FAST_PPEDS)
826 /**
827  * __qdf_nbuf_alloc_ppe_ds() - Allocates nbuf
828  * @osdev: Device handle
829  * @size: Netbuf requested size
830  * @func: Function name of the call site
831  * @line: line number of the call site
832  *
833  * This allocates an nbuf for wifi module
834  * in DS mode and uses __netdev_alloc_skb_no_skb_reset API.
835  * The netdev API invokes skb_recycler_alloc with reset_skb
836  * as false. Hence, recycler pool will not do reset_struct
837  * when it allocates DS used buffer to DS module, which will
838  * helps to improve the performance
839  *
840  * Return: nbuf or %NULL if no memory
841  */
842 
843 __qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
844 				     const char *func, uint32_t line);
845 #endif /* QCA_DP_NBUF_FAST_PPEDS */
846 
847 /**
848  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
849  * @size: Size to be allocated for skb
850  * @reserve: Reserve headroom size
851  * @align: Align data
852  * @func: Function name of the call site
853  * @line: Line number of the callsite
854  *
855  * This API allocates a nbuf and aligns it if needed and reserves some headroom
856  * space after the alignment where nbuf is not allocated from skb recycler pool.
857  *
858  * Return: Allocated nbuf pointer
859  */
860 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
861 					  const char *func, uint32_t line);
862 
863 /**
864  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
865  * @skb: Pointer to network buffer
866  *
867  * if GFP_ATOMIC is overkill then we can check whether its
868  * called from interrupt context and then do it or else in
869  * normal case use GFP_KERNEL
870  *
871  * example     use "in_irq() || irqs_disabled()"
872  *
873  * Return: cloned skb
874  */
875 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
876 
877 void __qdf_nbuf_free(struct sk_buff *skb);
878 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
879 			struct sk_buff *skb, qdf_dma_dir_t dir);
880 void __qdf_nbuf_unmap(__qdf_device_t osdev,
881 			struct sk_buff *skb, qdf_dma_dir_t dir);
882 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
883 				 struct sk_buff *skb, qdf_dma_dir_t dir);
884 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
885 			struct sk_buff *skb, qdf_dma_dir_t dir);
886 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
887 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
888 
889 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
890 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
891 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
892 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
893 	qdf_dma_dir_t dir, int nbytes);
894 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
895 	qdf_dma_dir_t dir, int nbytes);
896 
897 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
898 	qdf_dma_dir_t dir);
899 
900 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
901 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
902 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
903 QDF_STATUS __qdf_nbuf_frag_map(
904 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
905 	int offset, qdf_dma_dir_t dir, int cur_frag);
906 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
907 
908 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
909 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
910 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
911 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
912 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
913 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
914 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
915 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
916 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
917 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
918 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
919 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
920 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
921 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
922 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
923 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
924 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
925 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
926 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf);
927 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf);
928 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
929 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
930 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
931 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
932 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
933 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
934 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
935 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
936 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
937 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
938 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
939 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
940 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
941 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
942 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
943 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
944 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
945 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
946 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
947 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
948 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
949 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
950 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
951 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
952 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
953 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
954 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
955 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
956 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
957 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
958 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
959 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
960 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
961 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
962 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
963 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
964 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
965 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb);
966 
967 #ifdef QDF_NBUF_GLOBAL_COUNT
968 int __qdf_nbuf_count_get(void);
969 void __qdf_nbuf_count_inc(struct sk_buff *skb);
970 void __qdf_nbuf_count_dec(struct sk_buff *skb);
971 void __qdf_nbuf_mod_init(void);
972 void __qdf_nbuf_mod_exit(void);
973 
974 #else
975 
976 static inline int __qdf_nbuf_count_get(void)
977 {
978 	return 0;
979 }
980 
981 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
982 {
983 	return;
984 }
985 
986 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
987 {
988 	return;
989 }
990 
991 static inline void __qdf_nbuf_mod_init(void)
992 {
993 	return;
994 }
995 
996 static inline void __qdf_nbuf_mod_exit(void)
997 {
998 	return;
999 }
1000 #endif
1001 
1002 /**
1003  * __qdf_to_status() - OS to QDF status conversion
1004  * @error : OS error
1005  *
1006  * Return: QDF status
1007  */
1008 static inline QDF_STATUS __qdf_to_status(signed int error)
1009 {
1010 	switch (error) {
1011 	case 0:
1012 		return QDF_STATUS_SUCCESS;
1013 	case ENOMEM:
1014 	case -ENOMEM:
1015 		return QDF_STATUS_E_NOMEM;
1016 	default:
1017 		return QDF_STATUS_E_NOSUPPORT;
1018 	}
1019 }
1020 
1021 /**
1022  * __qdf_nbuf_len() - return the amount of valid data in the skb
1023  * @skb: Pointer to network buffer
1024  *
1025  * This API returns the amount of valid data in the skb, If there are frags
1026  * then it returns total length.
1027  *
1028  * Return: network buffer length
1029  */
1030 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
1031 {
1032 	int i, extra_frag_len = 0;
1033 
1034 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
1035 	if (i > 0)
1036 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
1037 
1038 	return extra_frag_len + skb->len;
1039 }
1040 
1041 /**
1042  * __qdf_nbuf_cat() - link two nbufs
1043  * @dst: Buffer to piggyback into
1044  * @src: Buffer to put
1045  *
1046  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
1047  * It is callers responsibility to free the src skb.
1048  *
1049  * Return: QDF_STATUS (status of the call) if failed the src skb
1050  *         is released
1051  */
1052 static inline QDF_STATUS
1053 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1054 {
1055 	QDF_STATUS error = 0;
1056 
1057 	qdf_assert(dst && src);
1058 
1059 	/*
1060 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1061 	 * buffer, first check whether the current buffer is already large
1062 	 * enough.
1063 	 */
1064 	if (skb_tailroom(dst) < src->len) {
1065 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1066 		if (error)
1067 			return __qdf_to_status(error);
1068 	}
1069 
1070 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1071 	skb_put(dst, src->len);
1072 	return __qdf_to_status(error);
1073 }
1074 
1075 /*
1076  * nbuf manipulation routines
1077  */
1078 /**
1079  * __qdf_nbuf_headroom() - return the amount of tail space available
1080  * @buf: Pointer to network buffer
1081  *
1082  * Return: amount of tail room
1083  */
1084 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1085 {
1086 	return skb_headroom(skb);
1087 }
1088 
1089 /**
1090  * __qdf_nbuf_tailroom() - return the amount of tail space available
1091  * @buf: Pointer to network buffer
1092  *
1093  * Return: amount of tail room
1094  */
1095 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1096 {
1097 	return skb_tailroom(skb);
1098 }
1099 
1100 /**
1101  * __qdf_nbuf_put_tail() - Puts data in the end
1102  * @skb: Pointer to network buffer
1103  * @size: size to be pushed
1104  *
1105  * Return: data pointer of this buf where new data has to be
1106  *         put, or NULL if there is not enough room in this buf.
1107  */
1108 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1109 {
1110 	if (skb_tailroom(skb) < size) {
1111 		if (unlikely(pskb_expand_head(skb, 0,
1112 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1113 			dev_kfree_skb_any(skb);
1114 			return NULL;
1115 		}
1116 	}
1117 	return skb_put(skb, size);
1118 }
1119 
1120 /**
1121  * __qdf_nbuf_trim_tail() - trim data out from the end
1122  * @skb: Pointer to network buffer
1123  * @size: size to be popped
1124  *
1125  * Return: none
1126  */
1127 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1128 {
1129 	return skb_trim(skb, skb->len - size);
1130 }
1131 
1132 
1133 /*
1134  * prototypes. Implemented in qdf_nbuf.c
1135  */
1136 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1137 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1138 				qdf_nbuf_rx_cksum_t *cksum);
1139 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1140 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1141 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1142 void __qdf_nbuf_ref(struct sk_buff *skb);
1143 int __qdf_nbuf_shared(struct sk_buff *skb);
1144 
1145 /**
1146  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1147  * @skb: sk buff
1148  *
1149  * Return: number of fragments
1150  */
1151 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1152 {
1153 	return skb_shinfo(skb)->nr_frags;
1154 }
1155 
1156 /**
1157  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1158  * @skb: sk buff
1159  *
1160  * This API returns a total number of fragments from the fraglist
1161  * Return: total number of fragments
1162  */
1163 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1164 {
1165 	uint32_t num_frag = 0;
1166 	struct sk_buff *list = NULL;
1167 
1168 	num_frag = skb_shinfo(skb)->nr_frags;
1169 	skb_walk_frags(skb, list)
1170 		num_frag += skb_shinfo(list)->nr_frags;
1171 
1172 	return num_frag;
1173 }
1174 
1175 /*
1176  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1177  */
1178 #define __qdf_nbuf_pool_delete(osdev)
1179 
1180 /**
1181  * __qdf_nbuf_copy() - returns a private copy of the skb
1182  * @skb: Pointer to network buffer
1183  *
1184  * This API returns a private copy of the skb, the skb returned is completely
1185  *  modifiable by callers
1186  *
1187  * Return: skb or NULL
1188  */
1189 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1190 {
1191 	struct sk_buff *skb_new = NULL;
1192 
1193 	skb_new = skb_copy(skb, GFP_ATOMIC);
1194 	if (skb_new) {
1195 		__qdf_nbuf_count_inc(skb_new);
1196 	}
1197 	return skb_new;
1198 }
1199 
1200 #define __qdf_nbuf_reserve      skb_reserve
1201 
1202 /**
1203  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1204  * @skb: Pointer to network buffer
1205  * @data: data pointer
1206  *
1207  * Return: none
1208  */
1209 static inline void
1210 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1211 {
1212 	skb->data = data;
1213 }
1214 
1215 /**
1216  * __qdf_nbuf_set_len() - set buffer data length
1217  * @skb: Pointer to network buffer
1218  * @len: data length
1219  *
1220  * Return: none
1221  */
1222 static inline void
1223 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1224 {
1225 	skb->len = len;
1226 }
1227 
1228 /**
1229  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1230  * @skb: Pointer to network buffer
1231  * @len: skb data length
1232  *
1233  * Return: none
1234  */
1235 static inline void
1236 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1237 {
1238 	skb_set_tail_pointer(skb, len);
1239 }
1240 
1241 /**
1242  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1243  * @skb: Pointer to network buffer
1244  * @list: list to use
1245  *
1246  * This is a lockless version, driver must acquire locks if it
1247  * needs to synchronize
1248  *
1249  * Return: none
1250  */
1251 static inline void
1252 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1253 {
1254 	__skb_unlink(skb, list);
1255 }
1256 
1257 /**
1258  * __qdf_nbuf_reset() - reset the buffer data and pointer
1259  * @buf: Network buf instance
1260  * @reserve: reserve
1261  * @align: align
1262  *
1263  * Return: none
1264  */
1265 static inline void
1266 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1267 {
1268 	int offset;
1269 
1270 	skb_push(skb, skb_headroom(skb));
1271 	skb_put(skb, skb_tailroom(skb));
1272 	memset(skb->data, 0x0, skb->len);
1273 	skb_trim(skb, 0);
1274 	skb_reserve(skb, NET_SKB_PAD);
1275 	memset(skb->cb, 0x0, sizeof(skb->cb));
1276 
1277 	/*
1278 	 * The default is for netbuf fragments to be interpreted
1279 	 * as wordstreams rather than bytestreams.
1280 	 */
1281 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1282 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1283 
1284 	/*
1285 	 * Align & make sure that the tail & data are adjusted properly
1286 	 */
1287 
1288 	if (align) {
1289 		offset = ((unsigned long)skb->data) % align;
1290 		if (offset)
1291 			skb_reserve(skb, align - offset);
1292 	}
1293 
1294 	skb_reserve(skb, reserve);
1295 }
1296 
1297 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1298 /**
1299  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1300  *                                       in kernel
1301  *
1302  * Return: true if dev_scratch is supported
1303  *         false if dev_scratch is not supported
1304  */
1305 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1306 {
1307 	return true;
1308 }
1309 
1310 /**
1311  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1312  * @skb: Pointer to network buffer
1313  *
1314  * Return: dev_scratch if dev_scratch supported
1315  *         0 if dev_scratch not supported
1316  */
1317 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1318 {
1319 	return skb->dev_scratch;
1320 }
1321 
1322 /**
1323  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1324  * @skb: Pointer to network buffer
1325  * @value: value to be set in dev_scratch of network buffer
1326  *
1327  * Return: void
1328  */
1329 static inline void
1330 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1331 {
1332 	skb->dev_scratch = value;
1333 }
1334 #else
1335 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1336 {
1337 	return false;
1338 }
1339 
1340 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1341 {
1342 	return 0;
1343 }
1344 
1345 static inline void
1346 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1347 {
1348 }
1349 #endif /* KERNEL_VERSION(4, 14, 0) */
1350 
1351 /**
1352  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1353  * @skb: Pointer to network buffer
1354  *
1355  * Return: Pointer to head buffer
1356  */
1357 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1358 {
1359 	return skb->head;
1360 }
1361 
1362 /**
1363  * __qdf_nbuf_data() - return the pointer to data header in the skb
1364  * @skb: Pointer to network buffer
1365  *
1366  * Return: Pointer to skb data
1367  */
1368 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1369 {
1370 	return skb->data;
1371 }
1372 
1373 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1374 {
1375 	return (uint8_t *)&skb->data;
1376 }
1377 
1378 /**
1379  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1380  * @skb: Pointer to network buffer
1381  *
1382  * Return: skb protocol
1383  */
1384 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1385 {
1386 	return skb->protocol;
1387 }
1388 
1389 /**
1390  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1391  * @skb: Pointer to network buffer
1392  *
1393  * Return: skb ip_summed
1394  */
1395 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1396 {
1397 	return skb->ip_summed;
1398 }
1399 
1400 /**
1401  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1402  * @skb: Pointer to network buffer
1403  * @ip_summed: ip checksum
1404  *
1405  * Return: none
1406  */
1407 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1408 		 uint8_t ip_summed)
1409 {
1410 	skb->ip_summed = ip_summed;
1411 }
1412 
1413 /**
1414  * __qdf_nbuf_get_priority() - return the priority value of the skb
1415  * @skb: Pointer to network buffer
1416  *
1417  * Return: skb priority
1418  */
1419 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1420 {
1421 	return skb->priority;
1422 }
1423 
1424 /**
1425  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1426  * @skb: Pointer to network buffer
1427  * @p: priority
1428  *
1429  * Return: none
1430  */
1431 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1432 {
1433 	skb->priority = p;
1434 }
1435 
1436 /**
1437  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1438  * @skb: Current skb
1439  * @next_skb: Next skb
1440  *
1441  * Return: void
1442  */
1443 static inline void
1444 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1445 {
1446 	skb->next = skb_next;
1447 }
1448 
1449 /**
1450  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1451  * @skb: Current skb
1452  *
1453  * Return: the next skb pointed to by the current skb
1454  */
1455 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1456 {
1457 	return skb->next;
1458 }
1459 
1460 /**
1461  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1462  * @skb: Current skb
1463  * @next_skb: Next skb
1464  *
1465  * This fn is used to link up extensions to the head skb. Does not handle
1466  * linking to the head
1467  *
1468  * Return: none
1469  */
1470 static inline void
1471 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1472 {
1473 	skb->next = skb_next;
1474 }
1475 
1476 /**
1477  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1478  * @skb: Current skb
1479  *
1480  * Return: the next skb pointed to by the current skb
1481  */
1482 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1483 {
1484 	return skb->next;
1485 }
1486 
1487 /**
1488  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1489  * @skb_head: head_buf nbuf holding head segment (single)
1490  * @ext_list: nbuf list holding linked extensions to the head
1491  * @ext_len: Total length of all buffers in the extension list
1492  *
1493  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1494  * to the nbuf holding the head segment (seg0)
1495  *
1496  * Return: none
1497  */
1498 static inline void
1499 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1500 			struct sk_buff *ext_list, size_t ext_len)
1501 {
1502 	skb_shinfo(skb_head)->frag_list = ext_list;
1503 	skb_head->data_len += ext_len;
1504 	skb_head->len += ext_len;
1505 }
1506 
1507 /**
1508  * __qdf_nbuf_get_shinfo() - return the shared info of the skb
1509  * @skb: Pointer to network buffer
1510  *
1511  * Return: skb shared info from head buf
1512  */
1513 static inline
1514 struct skb_shared_info *__qdf_nbuf_get_shinfo(struct sk_buff *head_buf)
1515 {
1516 	return skb_shinfo(head_buf);
1517 }
1518 
1519 /**
1520  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1521  * @head_buf: Network buf holding head segment (single)
1522  *
1523  * This ext_list is populated when we have Jumbo packet, for example in case of
1524  * monitor mode amsdu packet reception, and are stiched using frags_list.
1525  *
1526  * Return: Network buf list holding linked extensions from head buf.
1527  */
1528 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1529 {
1530 	return (skb_shinfo(head_buf)->frag_list);
1531 }
1532 
1533 /**
1534  * __qdf_nbuf_get_age() - return the checksum value of the skb
1535  * @skb: Pointer to network buffer
1536  *
1537  * Return: checksum value
1538  */
1539 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1540 {
1541 	return skb->csum;
1542 }
1543 
1544 /**
1545  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1546  * @skb: Pointer to network buffer
1547  * @v: Value
1548  *
1549  * Return: none
1550  */
1551 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1552 {
1553 	skb->csum = v;
1554 }
1555 
1556 /**
1557  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1558  * @skb: Pointer to network buffer
1559  * @adj: Adjustment value
1560  *
1561  * Return: none
1562  */
1563 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1564 {
1565 	skb->csum -= adj;
1566 }
1567 
1568 /**
1569  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1570  * @skb: Pointer to network buffer
1571  * @offset: Offset value
1572  * @len: Length
1573  * @to: Destination pointer
1574  *
1575  * Return: length of the copy bits for skb
1576  */
1577 static inline int32_t
1578 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1579 {
1580 	return skb_copy_bits(skb, offset, to, len);
1581 }
1582 
1583 /**
1584  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1585  * @skb: Pointer to network buffer
1586  * @len:  Packet length
1587  *
1588  * Return: none
1589  */
1590 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1591 {
1592 	if (skb->len > len) {
1593 		skb_trim(skb, len);
1594 	} else {
1595 		if (skb_tailroom(skb) < len - skb->len) {
1596 			if (unlikely(pskb_expand_head(skb, 0,
1597 				len - skb->len - skb_tailroom(skb),
1598 				GFP_ATOMIC))) {
1599 				QDF_DEBUG_PANIC(
1600 				   "SKB tailroom is lessthan requested length."
1601 				   " tail-room: %u, len: %u, skb->len: %u",
1602 				   skb_tailroom(skb), len, skb->len);
1603 				dev_kfree_skb_any(skb);
1604 			}
1605 		}
1606 		skb_put(skb, (len - skb->len));
1607 	}
1608 }
1609 
1610 /**
1611  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1612  * @skb: Pointer to network buffer
1613  * @protocol: Protocol type
1614  *
1615  * Return: none
1616  */
1617 static inline void
1618 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1619 {
1620 	skb->protocol = protocol;
1621 }
1622 
1623 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1624 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1625 
1626 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1627 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1628 
1629 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1630 				      uint32_t *lo, uint32_t *hi);
1631 
1632 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1633 	struct qdf_tso_info_t *tso_info);
1634 
1635 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1636 			  struct qdf_tso_seg_elem_t *tso_seg,
1637 			  bool is_last_seg);
1638 
1639 #ifdef FEATURE_TSO
1640 /**
1641  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1642  *                                    payload len
1643  * @skb: buffer
1644  *
1645  * Return: size
1646  */
1647 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1648 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1649 
1650 #else
1651 static inline
1652 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1653 {
1654 	return 0;
1655 }
1656 
1657 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1658 {
1659 	return 0;
1660 }
1661 
1662 #endif /* FEATURE_TSO */
1663 
1664 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1665 {
1666 	if (skb_is_gso(skb) &&
1667 		(skb_is_gso_v6(skb) ||
1668 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1669 		return true;
1670 	else
1671 		return false;
1672 }
1673 
1674 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1675 
1676 int __qdf_nbuf_get_users(struct sk_buff *skb);
1677 
1678 /**
1679  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1680  *			      and get hw_classify by peeking
1681  *			      into packet
1682  * @nbuf:		Network buffer (skb on Linux)
1683  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1684  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1685  *			needs to be set in case of CE classification support
1686  *			Is set by this macro.
1687  * @hw_classify:	This is a flag which is set to indicate
1688  *			CE classification is enabled.
1689  *			Do not set this bit for VLAN packets
1690  *			OR for mcast / bcast frames.
1691  *
1692  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1693  * whether to enable tx_classify bit in CE.
1694  *
1695  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1696  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1697  * it is the length and a 802.3 frame else it is Ethernet Type II
1698  * (RFC 894).
1699  * Bit 4 in pkt_subtype is the tx_classify bit
1700  *
1701  * Return:	void
1702  */
1703 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1704 				pkt_subtype, hw_classify)	\
1705 do {								\
1706 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1707 	uint16_t ether_type = ntohs(eh->h_proto);		\
1708 	bool is_mc_bc;						\
1709 								\
1710 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1711 		   is_multicast_ether_addr((uint8_t *)eh);	\
1712 								\
1713 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1714 		hw_classify = 1;				\
1715 		pkt_subtype = 0x01 <<				\
1716 			HTT_TX_CLASSIFY_BIT_S;			\
1717 	}							\
1718 								\
1719 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1720 		pkt_type = htt_pkt_type_ethernet;		\
1721 								\
1722 } while (0)
1723 
1724 /**
1725  * nbuf private buffer routines
1726  */
1727 
1728 /**
1729  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1730  * @skb: Pointer to network buffer
1731  * @addr: Pointer to store header's addr
1732  * @m_len: network buffer length
1733  *
1734  * Return: none
1735  */
1736 static inline void
1737 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1738 {
1739 	*addr = skb->data;
1740 	*len = skb->len;
1741 }
1742 
1743 /**
1744  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1745  * @head: Head pointer
1746  * @tail: Tail pointer
1747  * @qlen: Queue length
1748  */
1749 typedef struct __qdf_nbuf_qhead {
1750 	struct sk_buff *head;
1751 	struct sk_buff *tail;
1752 	unsigned int qlen;
1753 } __qdf_nbuf_queue_t;
1754 
1755 /******************Functions *************/
1756 
1757 /**
1758  * __qdf_nbuf_queue_init() - initiallize the queue head
1759  * @qhead: Queue head
1760  *
1761  * Return: QDF status
1762  */
1763 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1764 {
1765 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1766 	return QDF_STATUS_SUCCESS;
1767 }
1768 
1769 /**
1770  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1771  * @qhead: Queue head
1772  * @skb: Pointer to network buffer
1773  *
1774  * This is a lockless version, driver must acquire locks if it
1775  * needs to synchronize
1776  *
1777  * Return: none
1778  */
1779 static inline void
1780 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1781 {
1782 	skb->next = NULL;       /*Nullify the next ptr */
1783 
1784 	if (!qhead->head)
1785 		qhead->head = skb;
1786 	else
1787 		qhead->tail->next = skb;
1788 
1789 	qhead->tail = skb;
1790 	qhead->qlen++;
1791 }
1792 
1793 /**
1794  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1795  * @dest: target netbuf queue
1796  * @src:  source netbuf queue
1797  *
1798  * Return: target netbuf queue
1799  */
1800 static inline __qdf_nbuf_queue_t *
1801 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1802 {
1803 	if (!dest)
1804 		return NULL;
1805 	else if (!src || !(src->head))
1806 		return dest;
1807 
1808 	if (!(dest->head))
1809 		dest->head = src->head;
1810 	else
1811 		dest->tail->next = src->head;
1812 
1813 	dest->tail = src->tail;
1814 	dest->qlen += src->qlen;
1815 	return dest;
1816 }
1817 
1818 /**
1819  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1820  * @qhead: Queue head
1821  * @skb: Pointer to network buffer
1822  *
1823  * This is a lockless version, driver must acquire locks if it needs to
1824  * synchronize
1825  *
1826  * Return: none
1827  */
1828 static inline void
1829 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1830 {
1831 	if (!qhead->head) {
1832 		/*Empty queue Tail pointer Must be updated */
1833 		qhead->tail = skb;
1834 	}
1835 	skb->next = qhead->head;
1836 	qhead->head = skb;
1837 	qhead->qlen++;
1838 }
1839 
1840 /**
1841  * __qdf_nbuf_queue_remove_last() - remove a skb from the tail of the queue
1842  * @qhead: Queue head
1843  *
1844  * This is a lockless version. Driver should take care of the locks
1845  *
1846  * Return: skb or NULL
1847  */
1848 static inline struct sk_buff *
1849 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
1850 {
1851 	__qdf_nbuf_t tmp_tail, node = NULL;
1852 
1853 	if (qhead->head) {
1854 		qhead->qlen--;
1855 		tmp_tail = qhead->tail;
1856 		node = qhead->head;
1857 		if (qhead->head == qhead->tail) {
1858 			qhead->head = NULL;
1859 			qhead->tail = NULL;
1860 			return node;
1861 		} else {
1862 			while (tmp_tail != node->next)
1863 			       node = node->next;
1864 			qhead->tail = node;
1865 			return node->next;
1866 		}
1867 	}
1868 	return node;
1869 }
1870 
1871 /**
1872  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1873  * @qhead: Queue head
1874  *
1875  * This is a lockless version. Driver should take care of the locks
1876  *
1877  * Return: skb or NULL
1878  */
1879 static inline
1880 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1881 {
1882 	__qdf_nbuf_t tmp = NULL;
1883 
1884 	if (qhead->head) {
1885 		qhead->qlen--;
1886 		tmp = qhead->head;
1887 		if (qhead->head == qhead->tail) {
1888 			qhead->head = NULL;
1889 			qhead->tail = NULL;
1890 		} else {
1891 			qhead->head = tmp->next;
1892 		}
1893 		tmp->next = NULL;
1894 	}
1895 	return tmp;
1896 }
1897 
1898 /**
1899  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1900  * @qhead: head of queue
1901  *
1902  * Return: NULL if the queue is empty
1903  */
1904 static inline struct sk_buff *
1905 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1906 {
1907 	return qhead->head;
1908 }
1909 
1910 /**
1911  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1912  * @qhead: head of queue
1913  *
1914  * Return: NULL if the queue is empty
1915  */
1916 static inline struct sk_buff *
1917 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1918 {
1919 	return qhead->tail;
1920 }
1921 
1922 /**
1923  * __qdf_nbuf_queue_len() - return the queue length
1924  * @qhead: Queue head
1925  *
1926  * Return: Queue length
1927  */
1928 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1929 {
1930 	return qhead->qlen;
1931 }
1932 
1933 /**
1934  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1935  * @skb: Pointer to network buffer
1936  *
1937  * This API returns the next skb from packet chain, remember the skb is
1938  * still in the queue
1939  *
1940  * Return: NULL if no packets are there
1941  */
1942 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1943 {
1944 	return skb->next;
1945 }
1946 
1947 /**
1948  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1949  * @qhead: Queue head
1950  *
1951  * Return: true if length is 0 else false
1952  */
1953 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1954 {
1955 	return qhead->qlen == 0;
1956 }
1957 
1958 /*
1959  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1960  * Because the queue head will most likely put in some structure,
1961  * we don't use pointer type as the definition.
1962  */
1963 
1964 /*
1965  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1966  * Because the queue head will most likely put in some structure,
1967  * we don't use pointer type as the definition.
1968  */
1969 
1970 static inline void
1971 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1972 {
1973 }
1974 
1975 /**
1976  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1977  *        expands the headroom
1978  *        in the data region. In case of failure the skb is released.
1979  * @skb: sk buff
1980  * @headroom: size of headroom
1981  *
1982  * Return: skb or NULL
1983  */
1984 static inline struct sk_buff *
1985 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1986 {
1987 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1988 		dev_kfree_skb_any(skb);
1989 		skb = NULL;
1990 	}
1991 	return skb;
1992 }
1993 
1994 /**
1995  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1996  *        exapnds the tailroom
1997  *        in data region. In case of failure it releases the skb.
1998  * @skb: sk buff
1999  * @tailroom: size of tailroom
2000  *
2001  * Return: skb or NULL
2002  */
2003 static inline struct sk_buff *
2004 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
2005 {
2006 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
2007 		return skb;
2008 	/**
2009 	 * unlikely path
2010 	 */
2011 	dev_kfree_skb_any(skb);
2012 	return NULL;
2013 }
2014 
2015 /**
2016  * __qdf_nbuf_linearize() - skb linearize
2017  * @skb: sk buff
2018  *
2019  * create a version of the specified nbuf whose contents
2020  * can be safely modified without affecting other
2021  * users.If the nbuf is non-linear then this function
2022  * linearize. if unable to linearize returns -ENOMEM on
2023  * success 0 is returned
2024  *
2025  * Return: 0 on Success, -ENOMEM on failure is returned.
2026  */
2027 static inline int
2028 __qdf_nbuf_linearize(struct sk_buff *skb)
2029 {
2030 	return skb_linearize(skb);
2031 }
2032 
2033 /**
2034  * __qdf_nbuf_unshare() - skb unshare
2035  * @skb: sk buff
2036  *
2037  * create a version of the specified nbuf whose contents
2038  * can be safely modified without affecting other
2039  * users.If the nbuf is a clone then this function
2040  * creates a new copy of the data. If the buffer is not
2041  * a clone the original buffer is returned.
2042  *
2043  * Return: skb or NULL
2044  */
2045 static inline struct sk_buff *
2046 __qdf_nbuf_unshare(struct sk_buff *skb)
2047 {
2048 	struct sk_buff *skb_new;
2049 
2050 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
2051 
2052 	skb_new = skb_unshare(skb, GFP_ATOMIC);
2053 	if (skb_new)
2054 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
2055 
2056 	return skb_new;
2057 }
2058 
2059 /**
2060  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
2061  *@buf: sk buff
2062  *
2063  * Return: true/false
2064  */
2065 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
2066 {
2067 	return skb_cloned(skb);
2068 }
2069 
2070 /**
2071  * __qdf_nbuf_pool_init() - init pool
2072  * @net: net handle
2073  *
2074  * Return: QDF status
2075  */
2076 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2077 {
2078 	return QDF_STATUS_SUCCESS;
2079 }
2080 
2081 /*
2082  * adf_nbuf_pool_delete() implementation - do nothing in linux
2083  */
2084 #define __qdf_nbuf_pool_delete(osdev)
2085 
2086 /**
2087  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2088  *        release the skb.
2089  * @skb: sk buff
2090  * @headroom: size of headroom
2091  * @tailroom: size of tailroom
2092  *
2093  * Return: skb or NULL
2094  */
2095 static inline struct sk_buff *
2096 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2097 {
2098 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2099 		return skb;
2100 
2101 	dev_kfree_skb_any(skb);
2102 	return NULL;
2103 }
2104 
2105 /**
2106  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2107  * @buf: Network buf instance
2108  * @headroom: Additional headroom to be added
2109  * @tailroom: Additional tailroom to be added
2110  *
2111  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2112  *	or NULL if there is no memory
2113  */
2114 static inline struct sk_buff *
2115 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2116 {
2117 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2118 }
2119 
2120 /**
2121  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2122  * @buf: Network buf instance
2123  *
2124  * Return: True, if buf has frag_list else return False
2125  */
2126 static inline bool
2127 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2128 {
2129 	return skb_has_frag_list(buf);
2130 }
2131 
2132 /**
2133  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2134  * @buf: Network buf instance
2135  *
2136  * Return: Network buf instance
2137  */
2138 static inline struct sk_buff *
2139 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2140 {
2141 	struct sk_buff *list;
2142 
2143 	if (!__qdf_nbuf_has_fraglist(buf))
2144 		return NULL;
2145 
2146 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2147 		;
2148 
2149 	return list;
2150 }
2151 
2152 /**
2153  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2154  * @buf: Network buf instance
2155  *
2156  * Return: void
2157  */
2158 static inline void
2159 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2160 {
2161 	struct sk_buff *list;
2162 
2163 	skb_walk_frags(buf, list)
2164 		skb_get(list);
2165 }
2166 
2167 /**
2168  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2169  *
2170  * Return: true/false
2171  */
2172 static inline bool
2173 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2174 			 uint8_t **where)
2175 {
2176 	qdf_assert(0);
2177 	return false;
2178 }
2179 
2180 /**
2181  * __qdf_nbuf_reset_ctxt() - mem zero control block
2182  * @nbuf: buffer
2183  *
2184  * Return: none
2185  */
2186 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2187 {
2188 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2189 }
2190 
2191 /**
2192  * __qdf_nbuf_network_header() - get network header
2193  * @buf: buffer
2194  *
2195  * Return: network header pointer
2196  */
2197 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2198 {
2199 	return skb_network_header(buf);
2200 }
2201 
2202 /**
2203  * __qdf_nbuf_transport_header() - get transport header
2204  * @buf: buffer
2205  *
2206  * Return: transport header pointer
2207  */
2208 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2209 {
2210 	return skb_transport_header(buf);
2211 }
2212 
2213 /**
2214  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2215  *  passed as part of network buffer by network stack
2216  * @skb: sk buff
2217  *
2218  * Return: TCP MSS size
2219  *
2220  */
2221 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2222 {
2223 	return skb_shinfo(skb)->gso_size;
2224 }
2225 
2226 /**
2227  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2228  * @nbuf: sk buff
2229  *
2230  * Return: none
2231  */
2232 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2233 
2234 /*
2235  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2236  * @nbuf: sk buff
2237  *
2238  * Return: void ptr
2239  */
2240 static inline void *
2241 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2242 {
2243 	return (void *)nbuf->cb;
2244 }
2245 
2246 /**
2247  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2248  * @skb: sk buff
2249  *
2250  * Return: head size
2251  */
2252 static inline size_t
2253 __qdf_nbuf_headlen(struct sk_buff *skb)
2254 {
2255 	return skb_headlen(skb);
2256 }
2257 
2258 /**
2259  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2260  * @buf: sk buff
2261  *
2262  * Return: true/false
2263  */
2264 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2265 {
2266 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2267 }
2268 
2269 /**
2270  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2271  * @buf: sk buff
2272  *
2273  * Return: true/false
2274  */
2275 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2276 {
2277 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2278 }
2279 
2280 /**
2281  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2282  * @skb: sk buff
2283  *
2284  * Return: size of l2+l3+l4 header length
2285  */
2286 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2287 {
2288 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2289 }
2290 
2291 /**
2292  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2293  * @skb: sk buff
2294  *
2295  * Return: size of TCP header length
2296  */
2297 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2298 {
2299 	return tcp_hdrlen(skb);
2300 }
2301 
2302 /**
2303  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2304  * @buf: sk buff
2305  *
2306  * Return:  true/false
2307  */
2308 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2309 {
2310 	if (skb_is_nonlinear(skb))
2311 		return true;
2312 	else
2313 		return false;
2314 }
2315 
2316 /**
2317  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2318  * @buf: sk buff
2319  *
2320  * Return: TCP sequence number
2321  */
2322 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2323 {
2324 	return ntohl(tcp_hdr(skb)->seq);
2325 }
2326 
2327 /**
2328  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2329  *@buf: sk buff
2330  *
2331  * Return: data pointer to typecast into your priv structure
2332  */
2333 static inline char *
2334 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2335 {
2336 	return &skb->cb[8];
2337 }
2338 
2339 /**
2340  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2341  * @buf: Pointer to nbuf
2342  *
2343  * Return: None
2344  */
2345 static inline void
2346 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2347 {
2348 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2349 }
2350 
2351 /**
2352  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2353  *
2354  * @buf: sk buff
2355  * @queue_id: Queue id
2356  *
2357  * Return: void
2358  */
2359 static inline void
2360 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2361 {
2362 	skb_record_rx_queue(skb, queue_id);
2363 }
2364 
2365 /**
2366  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2367  *
2368  * @buf: sk buff
2369  *
2370  * Return: Queue mapping
2371  */
2372 static inline uint16_t
2373 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2374 {
2375 	return skb->queue_mapping;
2376 }
2377 
2378 /**
2379  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2380  *
2381  * @buf: sk buff
2382  * @val: queue_id
2383  *
2384  */
2385 static inline void
2386 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2387 {
2388 	skb_set_queue_mapping(skb, val);
2389 }
2390 
2391 /**
2392  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2393  *
2394  * @buf: sk buff
2395  *
2396  * Return: void
2397  */
2398 static inline void
2399 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2400 {
2401 	__net_timestamp(skb);
2402 }
2403 
2404 /**
2405  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2406  *
2407  * @buf: sk buff
2408  *
2409  * Return: timestamp stored in skb in ms
2410  */
2411 static inline uint64_t
2412 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2413 {
2414 	return ktime_to_ms(skb_get_ktime(skb));
2415 }
2416 
2417 /**
2418  * __qdf_nbuf_get_timestamp_us() - get the timestamp for frame
2419  *
2420  * @buf: sk buff
2421  *
2422  * Return: timestamp stored in skb in us
2423  */
2424 static inline uint64_t
2425 __qdf_nbuf_get_timestamp_us(struct sk_buff *skb)
2426 {
2427 	return ktime_to_us(skb_get_ktime(skb));
2428 }
2429 
2430 /**
2431  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2432  *
2433  * @buf: sk buff
2434  *
2435  * Return: time difference in ms
2436  */
2437 static inline uint64_t
2438 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2439 {
2440 	return ktime_to_ms(net_timedelta(skb->tstamp));
2441 }
2442 
2443 /**
2444  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2445  *
2446  * @buf: sk buff
2447  *
2448  * Return: time difference in micro seconds
2449  */
2450 static inline uint64_t
2451 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2452 {
2453 	return ktime_to_us(net_timedelta(skb->tstamp));
2454 }
2455 
2456 /**
2457  * __qdf_nbuf_orphan() - orphan a nbuf
2458  * @skb: sk buff
2459  *
2460  * If a buffer currently has an owner then we call the
2461  * owner's destructor function
2462  *
2463  * Return: void
2464  */
2465 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2466 {
2467 	return skb_orphan(skb);
2468 }
2469 
2470 /**
2471  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2472  * head pointer to end pointer
2473  * @nbuf: qdf_nbuf_t
2474  *
2475  * Return: size of network buffer from head pointer to end
2476  * pointer
2477  */
2478 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2479 {
2480 	return skb_end_offset(nbuf);
2481 }
2482 
2483 /**
2484  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2485  * including the header and variable data area
2486  * @skb: sk buff
2487  *
2488  * Return: size of network buffer
2489  */
2490 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2491 {
2492 	return skb->truesize;
2493 }
2494 
2495 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2496 /**
2497  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2498  * from the total skb mem and DP tx/rx skb mem
2499  * @nbytes: number of bytes
2500  * @dir: direction
2501  * @is_mapped: is mapped or unmapped memory
2502  *
2503  * Return: none
2504  */
2505 static inline void __qdf_record_nbuf_nbytes(
2506 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2507 {
2508 	if (is_mapped) {
2509 		if (dir == QDF_DMA_TO_DEVICE) {
2510 			qdf_mem_dp_tx_skb_cnt_inc();
2511 			qdf_mem_dp_tx_skb_inc(nbytes);
2512 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2513 			qdf_mem_dp_rx_skb_cnt_inc();
2514 			qdf_mem_dp_rx_skb_inc(nbytes);
2515 		}
2516 		qdf_mem_skb_total_inc(nbytes);
2517 	} else {
2518 		if (dir == QDF_DMA_TO_DEVICE) {
2519 			qdf_mem_dp_tx_skb_cnt_dec();
2520 			qdf_mem_dp_tx_skb_dec(nbytes);
2521 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2522 			qdf_mem_dp_rx_skb_cnt_dec();
2523 			qdf_mem_dp_rx_skb_dec(nbytes);
2524 		}
2525 		qdf_mem_skb_total_dec(nbytes);
2526 	}
2527 }
2528 
2529 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2530 static inline void __qdf_record_nbuf_nbytes(
2531 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2532 {
2533 }
2534 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2535 
2536 /**
2537  * __qdf_nbuf_map_nbytes_single() - map nbytes
2538  * @osdev: os device
2539  * @buf: buffer
2540  * @dir: direction
2541  * @nbytes: number of bytes
2542  *
2543  * Return: QDF_STATUS
2544  */
2545 #ifdef A_SIMOS_DEVHOST
2546 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2547 		qdf_device_t osdev, struct sk_buff *buf,
2548 		qdf_dma_dir_t dir, int nbytes)
2549 {
2550 	qdf_dma_addr_t paddr;
2551 
2552 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2553 	return QDF_STATUS_SUCCESS;
2554 }
2555 #else
2556 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2557 		qdf_device_t osdev, struct sk_buff *buf,
2558 		qdf_dma_dir_t dir, int nbytes)
2559 {
2560 	qdf_dma_addr_t paddr;
2561 	QDF_STATUS ret;
2562 
2563 	/* assume that the OS only provides a single fragment */
2564 	QDF_NBUF_CB_PADDR(buf) = paddr =
2565 		dma_map_single(osdev->dev, buf->data,
2566 			       nbytes, __qdf_dma_dir_to_os(dir));
2567 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2568 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2569 	if (QDF_IS_STATUS_SUCCESS(ret))
2570 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2571 					 dir, true);
2572 	return ret;
2573 }
2574 #endif
2575 /**
2576  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2577  * @osdev: os device
2578  * @buf: buffer
2579  * @dir: direction
2580  * @nbytes: number of bytes
2581  *
2582  * Return: none
2583  */
2584 #if defined(A_SIMOS_DEVHOST)
2585 static inline void
2586 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2587 			       qdf_dma_dir_t dir, int nbytes)
2588 {
2589 }
2590 
2591 #else
2592 static inline void
2593 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2594 			       qdf_dma_dir_t dir, int nbytes)
2595 {
2596 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2597 
2598 	if (qdf_likely(paddr)) {
2599 		__qdf_record_nbuf_nbytes(
2600 			__qdf_nbuf_get_end_offset(buf), dir, false);
2601 		dma_unmap_single(osdev->dev, paddr, nbytes,
2602 				 __qdf_dma_dir_to_os(dir));
2603 		return;
2604 	}
2605 }
2606 #endif
2607 
2608 static inline struct sk_buff *
2609 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2610 {
2611 	return skb_dequeue(skb_queue_head);
2612 }
2613 
2614 static inline
2615 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2616 {
2617 	return skb_queue_head->qlen;
2618 }
2619 
2620 static inline
2621 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2622 					struct sk_buff *skb)
2623 {
2624 	return skb_queue_tail(skb_queue_head, skb);
2625 }
2626 
2627 static inline
2628 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2629 {
2630 	return skb_queue_head_init(skb_queue_head);
2631 }
2632 
2633 static inline
2634 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2635 {
2636 	return skb_queue_purge(skb_queue_head);
2637 }
2638 
2639 static inline
2640 int __qdf_nbuf_queue_empty(__qdf_nbuf_queue_head_t *nbuf_queue_head)
2641 {
2642 	return skb_queue_empty(nbuf_queue_head);
2643 }
2644 
2645 /**
2646  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2647  * @head: skb list for which lock is to be acquired
2648  *
2649  * Return: void
2650  */
2651 static inline
2652 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2653 {
2654 	spin_lock_bh(&skb_queue_head->lock);
2655 }
2656 
2657 /**
2658  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2659  * @head: skb list for which lock is to be release
2660  *
2661  * Return: void
2662  */
2663 static inline
2664 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2665 {
2666 	spin_unlock_bh(&skb_queue_head->lock);
2667 }
2668 
2669 /**
2670  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2671  * @nbuf: qdf_nbuf_t
2672  * @idx: Index for which frag size is requested
2673  *
2674  * Return: Frag size
2675  */
2676 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2677 							   uint8_t idx)
2678 {
2679 	unsigned int size = 0;
2680 
2681 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2682 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2683 	return size;
2684 }
2685 
2686 /**
2687  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2688  * @nbuf: qdf_nbuf_t
2689  * @idx: Index for which frag address is requested
2690  *
2691  * Return: Frag address in success, else NULL
2692  */
2693 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2694 						    uint8_t idx)
2695 {
2696 	__qdf_frag_t frag_addr = NULL;
2697 
2698 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2699 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2700 	return frag_addr;
2701 }
2702 
2703 /**
2704  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2705  * @nbuf: qdf_nbuf_t
2706  * @idx: Frag index
2707  * @size: Size by which frag_size needs to be increased/decreased
2708  *        +Ve means increase, -Ve means decrease
2709  * @truesize: truesize
2710  */
2711 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2712 						 int size,
2713 						 unsigned int truesize)
2714 {
2715 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2716 }
2717 
2718 /**
2719  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2720  *          and adjust length by size.
2721  * @nbuf: qdf_nbuf_t
2722  * @idx: Frag index
2723  * @offset: Frag page offset should be moved by offset.
2724  *      +Ve - Move offset forward.
2725  *      -Ve - Move offset backward.
2726  *
2727  * Return: QDF_STATUS
2728  */
2729 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2730 					    int offset);
2731 
2732 /**
2733  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
2734  * @nbuf: nbuf pointer
2735  * @idx: frag idx need to be removed
2736  * @truesize: truesize of frag
2737  *
2738  * Return : void
2739  */
2740 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
2741 /**
2742  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2743  * @buf: Frag pointer needs to be added in nbuf frag
2744  * @nbuf: qdf_nbuf_t where frag will be added
2745  * @offset: Offset in frag to be added to nbuf_frags
2746  * @frag_len: Frag length
2747  * @truesize: truesize
2748  * @take_frag_ref: Whether to take ref for frag or not
2749  *      This bool must be set as per below comdition:
2750  *      1. False: If this frag is being added in any nbuf
2751  *              for the first time after allocation.
2752  *      2. True: If frag is already attached part of any
2753  *              nbuf.
2754  *
2755  * It takes ref_count based on boolean flag take_frag_ref
2756  */
2757 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2758 			    int offset, int frag_len,
2759 			    unsigned int truesize, bool take_frag_ref);
2760 
2761 /**
2762  * __qdf_nbuf_ref_frag() - get frag reference
2763  *
2764  * Return: void
2765  */
2766 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2767 
2768 /**
2769  * __qdf_nbuf_set_mark() - Set nbuf mark
2770  * @buf: Pointer to nbuf
2771  * @mark: Value to set mark
2772  *
2773  * Return: None
2774  */
2775 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2776 {
2777 	buf->mark = mark;
2778 }
2779 
2780 /**
2781  * __qdf_nbuf_get_mark() - Get nbuf mark
2782  * @buf: Pointer to nbuf
2783  *
2784  * Return: Value of mark
2785  */
2786 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2787 {
2788 	return buf->mark;
2789 }
2790 
2791 /**
2792  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2793  * the data pointer to the end pointer
2794  * @nbuf: qdf_nbuf_t
2795  *
2796  * Return: size of skb from data pointer to end pointer
2797  */
2798 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2799 {
2800 	return (skb_end_pointer(nbuf) - nbuf->data);
2801 }
2802 
2803 /**
2804  * __qdf_nbuf_set_data_len() - Return the data_len of the nbuf
2805  * @nbuf: qdf_nbuf_t
2806  *
2807  * Return: value of data_len
2808  */
2809 static inline
2810 qdf_size_t __qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf, uint32_t len)
2811 {
2812 	return nbuf->data_len = len;
2813 }
2814 
2815 /**
2816  * __qdf_nbuf_get_only_data_len() - Return the data_len of the nbuf
2817  * @nbuf: qdf_nbuf_t
2818  *
2819  * Return: value of data_len
2820  */
2821 static inline qdf_size_t __qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)
2822 {
2823 	return nbuf->data_len;
2824 }
2825 
2826 /**
2827  * __qdf_nbuf_set_hash() - set the hash of the buf
2828  * @buf: Network buf instance
2829  * @len: len to be set
2830  *
2831  * Return: None
2832  */
2833 static inline void __qdf_nbuf_set_hash(__qdf_nbuf_t buf, uint32_t len)
2834 {
2835 	buf->hash = len;
2836 }
2837 
2838 /**
2839  * __qdf_nbuf_set_sw_hash() - set the sw hash of the buf
2840  * @buf: Network buf instance
2841  * @len: len to be set
2842  *
2843  * Return: None
2844  */
2845 static inline void __qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf, uint32_t len)
2846 {
2847 	buf->sw_hash = len;
2848 }
2849 
2850 /**
2851  * __qdf_nbuf_set_csum_start() - set the csum start of the buf
2852  * @buf: Network buf instance
2853  * @len: len to be set
2854  *
2855  * Return: None
2856  */
2857 static inline void __qdf_nbuf_set_csum_start(__qdf_nbuf_t buf, uint16_t len)
2858 {
2859 	buf->csum_start = len;
2860 }
2861 
2862 /**
2863  * __qdf_nbuf_set_csum_offset() - set the csum offset of the buf
2864  * @buf: Network buf instance
2865  * @len: len to be set
2866  *
2867  * Return: None
2868  */
2869 static inline void __qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf, uint16_t len)
2870 {
2871 	buf->csum_offset = len;
2872 }
2873 
2874 /**
2875  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2876  * @skb: Pointer to network buffer
2877  *
2878  * Return: Return the number of gso segments
2879  */
2880 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2881 {
2882 	return skb_shinfo(skb)->gso_segs;
2883 }
2884 
2885 /**
2886  * __qdf_nbuf_set_gso_segs() - set the number of gso segments
2887  * @skb: Pointer to network buffer
2888  * @val: val to be set
2889  *
2890  * Return: None
2891  */
2892 static inline void __qdf_nbuf_set_gso_segs(struct sk_buff *skb, uint16_t val)
2893 {
2894 	skb_shinfo(skb)->gso_segs = val;
2895 }
2896 
2897 /**
2898  * __qdf_nbuf_set_gso_type_udp_l4() - set the gso type to GSO UDP L4
2899  * @skb: Pointer to network buffer
2900  *
2901  * Return: None
2902  */
2903 static inline void __qdf_nbuf_set_gso_type_udp_l4(struct sk_buff *skb)
2904 {
2905 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
2906 }
2907 
2908 /**
2909  * __qdf_nbuf_set_ip_summed_partial() - set the ip summed to CHECKSUM_PARTIAL
2910  * @skb: Pointer to network buffer
2911  *
2912  * Return: None
2913  */
2914 static inline void __qdf_nbuf_set_ip_summed_partial(struct sk_buff *skb)
2915 {
2916 	skb->ip_summed = CHECKSUM_PARTIAL;
2917 }
2918 
2919 /**
2920  * __qdf_nbuf_get_gso_size() - Return the number of gso size
2921  * @skb: Pointer to network buffer
2922  *
2923  * Return: Return the number of gso segments
2924  */
2925 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
2926 {
2927 	return skb_shinfo(skb)->gso_size;
2928 }
2929 
2930 /**
2931  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
2932  * @skb: Pointer to network buffer
2933  *
2934  * Return: Return the number of gso segments
2935  */
2936 static inline void
2937 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
2938 {
2939 	skb_shinfo(skb)->gso_size = val;
2940 }
2941 
2942 /**
2943  * __qdf_nbuf_kfree() - Free nbuf using kfree
2944  * @buf: Pointer to network buffer
2945  *
2946  * This function is called to free the skb on failure cases
2947  *
2948  * Return: None
2949  */
2950 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
2951 {
2952 	kfree_skb(skb);
2953 }
2954 
2955 /**
2956  * __qdf_nbuf_dev_kfree_list() - Free nbuf list using dev based os call
2957  * @skb_queue_head: Pointer to nbuf queue head
2958  *
2959  * This function is called to free the nbuf list on failure cases
2960  *
2961  * Return: None
2962  */
2963 void
2964 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head);
2965 
2966 /**
2967  * __qdf_nbuf_dev_queue_head() - queue a buffer using dev at the list head
2968  * @skb_queue_head: Pointer to skb list head
2969  * @buff: Pointer to nbuf
2970  *
2971  * This function is called to queue buffer at the skb list head
2972  *
2973  * Return: None
2974  */
2975 static inline void
2976 __qdf_nbuf_dev_queue_head(__qdf_nbuf_queue_head_t *nbuf_queue_head,
2977 			  __qdf_nbuf_t buff)
2978 {
2979 	 __skb_queue_head(nbuf_queue_head, buff);
2980 }
2981 
2982 /**
2983  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
2984  * @buf: Pointer to network buffer
2985  *
2986  * This function is called to free the skb on failure cases
2987  *
2988  * Return: None
2989  */
2990 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
2991 {
2992 	dev_kfree_skb(skb);
2993 }
2994 
2995 /**
2996  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
2997  * @buf: Network buffer
2998  *
2999  * Return: TRUE if skb pkt type is mcast
3000  *         FALSE if not
3001  */
3002 static inline
3003 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
3004 {
3005 	return skb->pkt_type == PACKET_MULTICAST;
3006 }
3007 
3008 /**
3009  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
3010  * @buf: Network buffer
3011  *
3012  * Return: TRUE if skb pkt type is mcast
3013  *         FALSE if not
3014  */
3015 static inline
3016 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
3017 {
3018 	return skb->pkt_type == PACKET_BROADCAST;
3019 }
3020 
3021 /**
3022  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
3023  * @buf: Pointer to network buffer
3024  * @value: value to be set in dev_scratch of network buffer
3025  *
3026  * Return: void
3027  */
3028 static inline
3029 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
3030 {
3031 	skb->dev = dev;
3032 }
3033 
3034 /**
3035  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
3036  * @buf: Pointer to network buffer
3037  *
3038  * Return: dev mtu value in nbuf
3039  */
3040 static inline
3041 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
3042 {
3043 	return skb->dev->mtu;
3044 }
3045 
3046 /**
3047  * __qdf_nbuf_set_protocol_eth_tye_trans() - set protocol using eth trans os API
3048  * @buf: Pointer to network buffer
3049  *
3050  * Return: None
3051  */
3052 static inline
3053 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
3054 {
3055 	skb->protocol = eth_type_trans(skb, skb->dev);
3056 }
3057 
3058 /*
3059  * __qdf_nbuf_net_timedelta() - get time delta
3060  * @t: time as __qdf_ktime_t object
3061  *
3062  * Return: time delta as ktime_t object
3063  */
3064 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
3065 {
3066 	return net_timedelta(t);
3067 }
3068 
3069 #ifdef CONFIG_NBUF_AP_PLATFORM
3070 #include <i_qdf_nbuf_w.h>
3071 #else
3072 #include <i_qdf_nbuf_m.h>
3073 #endif
3074 #endif /*_I_QDF_NET_BUF_H */
3075