xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
45 /* Since commit
46  *  baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
47  *
48  * the function netif_rx() can be used in preemptible/thread context as
49  * well as in interrupt context.
50  *
51  * Use netif_rx().
52  */
53 #define netif_rx_ni(skb) netif_rx(skb)
54 #endif
55 
56 /*
57  * Use socket buffer as the underlying implementation as skbuf .
58  * Linux use sk_buff to represent both packet and data,
59  * so we use sk_buffer to represent both skbuf .
60  */
61 typedef struct sk_buff *__qdf_nbuf_t;
62 
63 /**
64  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
65  *
66  * This is used for skb queue management via linux skb buff head APIs
67  */
68 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
69 
70 /**
71  * typedef __qdf_nbuf_get_shinfo for skb_shinfo linux struct
72  *
73  * This is used for skb shared info via linux skb shinfo APIs
74  */
75 typedef struct skb_shared_info *__qdf_nbuf_shared_info_t;
76 
77 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
78 
79 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
80 
81 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
82  * max tx fragments added by the driver
83  * The driver will always add one tx fragment (the tx descriptor)
84  */
85 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
86 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
87 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
88 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
89 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
90 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
91 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
92 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
93 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
94 
95 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
96 
97 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
98 #define IEEE80211_RADIOTAP_HE 23
99 #define IEEE80211_RADIOTAP_HE_MU 24
100 #endif
101 
102 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
103 
104 #define IEEE80211_RADIOTAP_EXT1_USIG	1
105 #define IEEE80211_RADIOTAP_EXT1_EHT	2
106 
107 /* mark the first packet after wow wakeup */
108 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
109 
110 /* TCP Related MASK */
111 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
112 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
113 #define QDF_NBUF_PKT_TCPOP_RST			0x04
114 
115 /*
116  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
117  */
118 typedef union {
119 	uint64_t       u64;
120 	qdf_dma_addr_t dma_addr;
121 } qdf_paddr_t;
122 
123 /**
124  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
125  *                    - data passed between layers of the driver.
126  *
127  * Notes:
128  *   1. Hard limited to 48 bytes. Please count your bytes
129  *   2. The size of this structure has to be easily calculable and
130  *      consistently so: do not use any conditional compile flags
131  *   3. Split into a common part followed by a tx/rx overlay
132  *   4. There is only one extra frag, which represents the HTC/HTT header
133  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
134  *      for the priv_cb_w since it must be at same offset for both
135  *      TX and RX union
136  *   6. "ipa.owned" bit must be first member in both TX and RX unions
137  *      for the priv_cb_m since it must be at same offset for both
138  *      TX and RX union.
139  *
140  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
141  *
142  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
143  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
144  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
145  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
146  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
147  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
148  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
149  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
150  *
151  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
152  * @rx.dev.priv_cb_m.flush_ind: flush indication
153  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
154  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
155  * @rx.dev.priv_cb_m.exc_frm: exception frame
156  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
157  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
158 					     sw exception bit from ring desc
159  * @rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
160  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
161  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
162  * @rx.dev.priv_cb_m.lro_ctx: LRO context
163  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
164  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
165  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
166  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
167  *
168  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
169  * @rx.tcp_proto: L4 protocol is TCP
170  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
171  * @rx.ipv6_proto: L3 protocol is IPV6
172  * @rx.ip_offset: offset to IP header
173  * @rx.tcp_offset: offset to TCP header
174  * @rx_ctx_id: Rx context id
175  * @num_elements_in_list: number of elements in the nbuf list
176  *
177  * @rx.tcp_udp_chksum: L4 payload checksum
178  * @rx.tcp_wim: TCP window size
179  *
180  * @rx.flow_id: 32bit flow id
181  *
182  * @rx.flag_chfrag_start: first MSDU in an AMSDU
183  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
184  * @rx.flag_chfrag_end: last MSDU in an AMSDU
185  * @rx.flag_retry: flag to indicate MSDU is retried
186  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
187  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
188  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
189  * @rx.flag_is_frag: flag to indicate skb has frag list
190  * @rx.rsrvd: reserved
191  *
192  * @rx.trace: combined structure for DP and protocol trace
193  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
194  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
195  * @rx.trace.dp_trace: flag (Datapath trace)
196  * @rx.trace.packet_track: RX_DATA packet
197  * @rx.trace.rsrvd: enable packet logging
198  *
199  * @rx.vdev_id: vdev_id for RX pkt
200  * @rx.is_raw_frame: RAW frame
201  * @rx.fcs_err: FCS error
202  * @rx.tid_val: tid value
203  * @rx.reserved: reserved
204  * @rx.ftype: mcast2ucast, TSO, SG, MESH
205  *
206  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
207  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
208  *
209  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
210  *                 + (1) CE classification enablement bit
211  *                 + (2) packet type (802.3 or Ethernet type II)
212  *                 + (3) packet offset (usually length of HTC/HTT descr)
213  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
214  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
215  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
216  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
217  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
218  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
219  * @tx.dev.priv_cb_m.reserved: reserved
220  *
221  * @tx.ftype: mcast2ucast, TSO, SG, MESH
222  * @tx.vdev_id: vdev (for protocol trace)
223  * @tx.len: length of efrag pointed by the above pointers
224  *
225  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
226  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
227  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
228  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
229  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
230  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
231  * @tx.flags.bits.flag_ext_header: extended flags
232  * @tx.flags.bits.is_critical: flag indicating a critical frame
233  * @tx.trace: combined structure for DP and protocol trace
234  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
235  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
236  * @tx.trace.is_packet_priv:
237  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
238  * @tx.trace.to_fw: Flag to indicate send this packet to FW
239  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
240  *                          + (MGMT_ACTION)] - 4 bits
241  * @tx.trace.dp_trace: flag (Datapath trace)
242  * @tx.trace.is_bcast: flag (Broadcast packet)
243  * @tx.trace.is_mcast: flag (Multicast packet)
244  * @tx.trace.packet_type: flag (Packet type)
245  * @tx.trace.htt2_frm: flag (high-latency path only)
246  * @tx.trace.print: enable packet logging
247  *
248  * @tx.vaddr: virtual address of ~
249  * @tx.paddr: physical/DMA address of ~
250  */
251 struct qdf_nbuf_cb {
252 	/* common */
253 	qdf_paddr_t paddr; /* of skb->data */
254 	/* valid only in one direction */
255 	union {
256 		/* Note: MAX: 40 bytes */
257 		struct {
258 			union {
259 				struct {
260 					void *ext_cb_ptr;
261 					void *fctx;
262 					uint16_t msdu_len : 14,
263 						 flag_intra_bss : 1,
264 						 ipa_smmu_map : 1;
265 					uint16_t peer_id;
266 					uint16_t protocol_tag;
267 					uint16_t flow_tag;
268 				} priv_cb_w;
269 				struct {
270 					/* ipa_owned bit is common between rx
271 					 * control block and tx control block.
272 					 * Do not change location of this bit.
273 					 */
274 					uint32_t ipa_owned:1,
275 						 peer_cached_buf_frm:1,
276 						 flush_ind:1,
277 						 packet_buf_pool:1,
278 						 l3_hdr_pad:3,
279 						 /* exception frame flag */
280 						 exc_frm:1,
281 						 ipa_smmu_map:1,
282 						 reo_dest_ind_or_sw_excpt:5,
283 						 lmac_id:2,
284 						 reserved1:16;
285 					uint32_t tcp_seq_num;
286 					uint32_t tcp_ack_num;
287 					union {
288 						struct {
289 							uint16_t msdu_len;
290 							uint16_t peer_id;
291 						} wifi3;
292 						struct {
293 							uint32_t map_index;
294 						} wifi2;
295 					} dp;
296 					unsigned char *lro_ctx;
297 				} priv_cb_m;
298 			} dev;
299 			uint32_t lro_eligible:1,
300 				tcp_proto:1,
301 				tcp_pure_ack:1,
302 				ipv6_proto:1,
303 				ip_offset:7,
304 				tcp_offset:7,
305 				rx_ctx_id:4,
306 				fcs_err:1,
307 				is_raw_frame:1,
308 				num_elements_in_list:8;
309 			uint32_t tcp_udp_chksum:16,
310 				 tcp_win:16;
311 			uint32_t flow_id;
312 			uint8_t flag_chfrag_start:1,
313 				flag_chfrag_cont:1,
314 				flag_chfrag_end:1,
315 				flag_retry:1,
316 				flag_da_mcbc:1,
317 				flag_da_valid:1,
318 				flag_sa_valid:1,
319 				flag_is_frag:1;
320 			union {
321 				uint8_t packet_state;
322 				uint8_t dp_trace:1,
323 					packet_track:3,
324 					rsrvd:4;
325 			} trace;
326 			uint16_t vdev_id:8,
327 				 tid_val:4,
328 				 ftype:4;
329 		} rx;
330 
331 		/* Note: MAX: 40 bytes */
332 		struct {
333 			union {
334 				struct {
335 					void *ext_cb_ptr;
336 					void *fctx;
337 				} priv_cb_w;
338 				struct {
339 					/* ipa_owned bit is common between rx
340 					 * control block and tx control block.
341 					 * Do not change location of this bit.
342 					 */
343 					struct {
344 						uint32_t owned:1,
345 							priv:31;
346 					} ipa;
347 					uint32_t data_attr;
348 					uint16_t desc_id;
349 					uint16_t mgmt_desc_id;
350 					struct {
351 						uint8_t bi_map:1,
352 							reserved:7;
353 					} dma_option;
354 					uint8_t flag_notify_comp:1,
355 						rsvd:7;
356 					uint8_t reserved[2];
357 				} priv_cb_m;
358 			} dev;
359 			uint8_t ftype;
360 			uint8_t vdev_id;
361 			uint16_t len;
362 			union {
363 				struct {
364 					uint8_t flag_efrag:1,
365 						flag_nbuf:1,
366 						num:1,
367 						flag_chfrag_start:1,
368 						flag_chfrag_cont:1,
369 						flag_chfrag_end:1,
370 						flag_ext_header:1,
371 						is_critical:1;
372 				} bits;
373 				uint8_t u8;
374 			} flags;
375 			struct {
376 				uint8_t packet_state:7,
377 					is_packet_priv:1;
378 				uint8_t packet_track:3,
379 					to_fw:1,
380 					/* used only for hl */
381 					htt2_frm:1,
382 					proto_type:3;
383 				uint8_t dp_trace:1,
384 					is_bcast:1,
385 					is_mcast:1,
386 					packet_type:4,
387 					print:1;
388 			} trace;
389 			unsigned char *vaddr;
390 			qdf_paddr_t paddr;
391 		} tx;
392 	} u;
393 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
394 
395 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
396 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
397 			(sizeof(struct qdf_nbuf_cb)) <=
398 			sizeof_field(struct sk_buff, cb));
399 #else
400 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
401 			(sizeof(struct qdf_nbuf_cb)) <=
402 			FIELD_SIZEOF(struct sk_buff, cb));
403 #endif
404 
405 /**
406  *  access macros to qdf_nbuf_cb
407  *  Note: These macros can be used as L-values as well as R-values.
408  *        When used as R-values, they effectively function as "get" macros
409  *        When used as L_values, they effectively function as "set" macros
410  */
411 
412 #define QDF_NBUF_CB_PADDR(skb) \
413 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
414 
415 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
416 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
417 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
418 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
419 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
420 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
421 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
422 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
423 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
424 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
425 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
426 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
427 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
428 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
429 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
430 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
431 
432 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
433 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
434 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
435 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
436 
437 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
438 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
439 
440 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
441 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
442 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
443 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
444 
445 #define QDF_NBUF_CB_RX_FTYPE(skb) \
446 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
447 
448 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
449 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
450 
451 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
452 	(((struct qdf_nbuf_cb *) \
453 	((skb)->cb))->u.rx.flag_chfrag_start)
454 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
455 	(((struct qdf_nbuf_cb *) \
456 	((skb)->cb))->u.rx.flag_chfrag_cont)
457 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
458 		(((struct qdf_nbuf_cb *) \
459 		((skb)->cb))->u.rx.flag_chfrag_end)
460 
461 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
462 	(((struct qdf_nbuf_cb *) \
463 	((skb)->cb))->u.rx.flag_da_mcbc)
464 
465 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
466 	(((struct qdf_nbuf_cb *) \
467 	((skb)->cb))->u.rx.flag_da_valid)
468 
469 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
470 	(((struct qdf_nbuf_cb *) \
471 	((skb)->cb))->u.rx.flag_sa_valid)
472 
473 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
474 	(((struct qdf_nbuf_cb *) \
475 	((skb)->cb))->u.rx.flag_retry)
476 
477 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
478 	(((struct qdf_nbuf_cb *) \
479 	((skb)->cb))->u.rx.is_raw_frame)
480 
481 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
482 	(((struct qdf_nbuf_cb *) \
483 	((skb)->cb))->u.rx.tid_val)
484 
485 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
486 	(((struct qdf_nbuf_cb *) \
487 	((skb)->cb))->u.rx.flag_is_frag)
488 
489 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
490 	(((struct qdf_nbuf_cb *) \
491 	((skb)->cb))->u.rx.fcs_err)
492 
493 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
494 	qdf_nbuf_set_state(skb, PACKET_STATE)
495 
496 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
497 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
498 
499 #define QDF_NBUF_CB_TX_FTYPE(skb) \
500 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
501 
502 
503 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
504 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
505 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
506 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
507 
508 /* Tx Flags Accessor Macros*/
509 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
510 	(((struct qdf_nbuf_cb *) \
511 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
512 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
513 	(((struct qdf_nbuf_cb *) \
514 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
515 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
516 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
517 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
518 	(((struct qdf_nbuf_cb *) \
519 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
520 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
521 	(((struct qdf_nbuf_cb *) \
522 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
523 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
524 		(((struct qdf_nbuf_cb *) \
525 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
526 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
527 		(((struct qdf_nbuf_cb *) \
528 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
529 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
530 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
531 
532 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
533 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
534 /* End of Tx Flags Accessor Macros */
535 
536 /* Tx trace accessor macros */
537 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
538 	(((struct qdf_nbuf_cb *) \
539 		((skb)->cb))->u.tx.trace.packet_state)
540 
541 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
542 	(((struct qdf_nbuf_cb *) \
543 		((skb)->cb))->u.tx.trace.is_packet_priv)
544 
545 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
546 	(((struct qdf_nbuf_cb *) \
547 		((skb)->cb))->u.tx.trace.packet_track)
548 
549 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
550 	(((struct qdf_nbuf_cb *) \
551 		((skb)->cb))->u.tx.trace.to_fw)
552 
553 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
554 		(((struct qdf_nbuf_cb *) \
555 			((skb)->cb))->u.rx.trace.packet_track)
556 
557 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
558 	(((struct qdf_nbuf_cb *) \
559 		((skb)->cb))->u.tx.trace.proto_type)
560 
561 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
562 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
563 
564 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
565 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
566 
567 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
568 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
569 
570 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
571 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
572 
573 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
574 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
575 
576 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
577 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
578 
579 #define QDF_NBUF_CB_SET_BCAST(skb) \
580 	(((struct qdf_nbuf_cb *) \
581 		((skb)->cb))->u.tx.trace.is_bcast = true)
582 
583 #define QDF_NBUF_CB_SET_MCAST(skb) \
584 	(((struct qdf_nbuf_cb *) \
585 		((skb)->cb))->u.tx.trace.is_mcast = true)
586 /* End of Tx trace accessor macros */
587 
588 
589 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
590 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
591 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
592 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
593 
594 /* assume the OS provides a single fragment */
595 #define __qdf_nbuf_get_num_frags(skb)		   \
596 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
597 
598 #define __qdf_nbuf_reset_num_frags(skb) \
599 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
600 
601 /**
602  *   end of nbuf->cb access macros
603  */
604 
605 typedef void (*qdf_nbuf_trace_update_t)(char *);
606 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
607 
608 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
609 
610 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
611 	(QDF_NBUF_CB_PADDR(skb) = paddr)
612 
613 #define __qdf_nbuf_frag_push_head(					\
614 	skb, frag_len, frag_vaddr, frag_paddr)				\
615 	do {					\
616 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
617 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
618 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
619 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
620 	} while (0)
621 
622 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
623 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
624 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
625 
626 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
627 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
628 
629 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
630 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
631 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
632 	 /* assume that the OS only provides a single fragment */	\
633 	 QDF_NBUF_CB_PADDR(skb))
634 
635 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
636 
637 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
638 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
639 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
640 
641 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
642 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
643 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
644 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
645 
646 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
647 	do {								\
648 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
649 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
650 		if (frag_num)						\
651 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
652 							      is_wstrm; \
653 		else					\
654 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
655 							      is_wstrm; \
656 	} while (0)
657 
658 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
659 	do { \
660 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
661 	} while (0)
662 
663 #define __qdf_nbuf_get_vdev_ctx(skb) \
664 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
665 
666 #define __qdf_nbuf_set_tx_ftype(skb, type) \
667 	do { \
668 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
669 	} while (0)
670 
671 #define __qdf_nbuf_get_tx_ftype(skb) \
672 		 QDF_NBUF_CB_TX_FTYPE((skb))
673 
674 
675 #define __qdf_nbuf_set_rx_ftype(skb, type) \
676 	do { \
677 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
678 	} while (0)
679 
680 #define __qdf_nbuf_get_rx_ftype(skb) \
681 		 QDF_NBUF_CB_RX_FTYPE((skb))
682 
683 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
684 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
685 
686 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
687 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
688 
689 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
690 	do { \
691 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
692 	} while (0)
693 
694 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
695 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
696 
697 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
698 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
699 
700 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
701 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
702 
703 #define __qdf_nbuf_set_da_mcbc(skb, val) \
704 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
705 
706 #define __qdf_nbuf_is_da_mcbc(skb) \
707 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
708 
709 #define __qdf_nbuf_set_da_valid(skb, val) \
710 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
711 
712 #define __qdf_nbuf_is_da_valid(skb) \
713 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
714 
715 #define __qdf_nbuf_set_sa_valid(skb, val) \
716 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
717 
718 #define __qdf_nbuf_is_sa_valid(skb) \
719 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
720 
721 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
722 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
723 
724 #define __qdf_nbuf_is_rx_retry_flag(skb) \
725 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
726 
727 #define __qdf_nbuf_set_raw_frame(skb, val) \
728 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
729 
730 #define __qdf_nbuf_is_raw_frame(skb) \
731 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
732 
733 #define __qdf_nbuf_get_tid_val(skb) \
734 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
735 
736 #define __qdf_nbuf_set_tid_val(skb, val) \
737 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
738 
739 #define __qdf_nbuf_set_is_frag(skb, val) \
740 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
741 
742 #define __qdf_nbuf_is_frag(skb) \
743 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
744 
745 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
746 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
747 
748 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
749 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
750 
751 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
752 	do { \
753 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
754 	} while (0)
755 
756 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
757 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
758 
759 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
760 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
761 
762 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
763 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
764 
765 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
766 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
767 
768 #define __qdf_nbuf_trace_get_proto_type(skb) \
769 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
770 
771 #define __qdf_nbuf_data_attr_get(skb)		\
772 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
773 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
774 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
775 
776 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
777 		skb_queue_walk_safe(queue, var, tvar)
778 
779 /**
780  * __qdf_nbuf_num_frags_init() - init extra frags
781  * @skb: sk buffer
782  *
783  * Return: none
784  */
785 static inline
786 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
787 {
788 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
789 }
790 
791 /*
792  * prototypes. Implemented in qdf_nbuf.c
793  */
794 
795 /**
796  * __qdf_nbuf_alloc() - Allocate nbuf
797  * @osdev: Device handle
798  * @size: Netbuf requested size
799  * @reserve: headroom to start with
800  * @align: Align
801  * @prio: Priority
802  * @func: Function name of the call site
803  * @line: line number of the call site
804  *
805  * This allocates an nbuf aligns if needed and reserves some space in the front,
806  * since the reserve is done after alignment the reserve value if being
807  * unaligned will result in an unaligned address.
808  *
809  * Return: nbuf or %NULL if no memory
810  */
811 __qdf_nbuf_t
812 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
813 		 int prio, const char *func, uint32_t line);
814 
815 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
816 				     const char *func, uint32_t line);
817 
818 /**
819  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
820  * @size: Size to be allocated for skb
821  * @reserve: Reserve headroom size
822  * @align: Align data
823  * @func: Function name of the call site
824  * @line: Line number of the callsite
825  *
826  * This API allocates a nbuf and aligns it if needed and reserves some headroom
827  * space after the alignment where nbuf is not allocated from skb recycler pool.
828  *
829  * Return: Allocated nbuf pointer
830  */
831 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
832 					  const char *func, uint32_t line);
833 
834 /**
835  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
836  * @skb: Pointer to network buffer
837  *
838  * if GFP_ATOMIC is overkill then we can check whether its
839  * called from interrupt context and then do it or else in
840  * normal case use GFP_KERNEL
841  *
842  * example     use "in_irq() || irqs_disabled()"
843  *
844  * Return: cloned skb
845  */
846 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
847 
848 void __qdf_nbuf_free(struct sk_buff *skb);
849 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
850 			struct sk_buff *skb, qdf_dma_dir_t dir);
851 void __qdf_nbuf_unmap(__qdf_device_t osdev,
852 			struct sk_buff *skb, qdf_dma_dir_t dir);
853 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
854 				 struct sk_buff *skb, qdf_dma_dir_t dir);
855 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
856 			struct sk_buff *skb, qdf_dma_dir_t dir);
857 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
858 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
859 
860 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
861 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
862 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
863 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
864 	qdf_dma_dir_t dir, int nbytes);
865 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
866 	qdf_dma_dir_t dir, int nbytes);
867 
868 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
869 	qdf_dma_dir_t dir);
870 
871 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
872 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
873 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
874 QDF_STATUS __qdf_nbuf_frag_map(
875 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
876 	int offset, qdf_dma_dir_t dir, int cur_frag);
877 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
878 
879 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
880 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
881 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
882 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
883 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
884 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
885 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
886 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
887 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
888 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
889 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
890 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
891 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
892 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
893 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
894 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
895 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
896 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
897 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
898 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
899 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
900 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
901 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
902 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
903 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
904 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
905 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
906 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
907 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
908 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
909 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
910 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
911 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
912 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
913 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
914 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
915 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
916 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
917 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
918 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
919 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
920 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
921 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
922 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
923 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
924 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
925 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
926 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
927 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
928 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
929 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
930 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
931 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
932 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
933 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
934 
935 #ifdef QDF_NBUF_GLOBAL_COUNT
936 int __qdf_nbuf_count_get(void);
937 void __qdf_nbuf_count_inc(struct sk_buff *skb);
938 void __qdf_nbuf_count_dec(struct sk_buff *skb);
939 void __qdf_nbuf_mod_init(void);
940 void __qdf_nbuf_mod_exit(void);
941 
942 #else
943 
944 static inline int __qdf_nbuf_count_get(void)
945 {
946 	return 0;
947 }
948 
949 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
950 {
951 	return;
952 }
953 
954 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
955 {
956 	return;
957 }
958 
959 static inline void __qdf_nbuf_mod_init(void)
960 {
961 	return;
962 }
963 
964 static inline void __qdf_nbuf_mod_exit(void)
965 {
966 	return;
967 }
968 #endif
969 
970 /**
971  * __qdf_to_status() - OS to QDF status conversion
972  * @error : OS error
973  *
974  * Return: QDF status
975  */
976 static inline QDF_STATUS __qdf_to_status(signed int error)
977 {
978 	switch (error) {
979 	case 0:
980 		return QDF_STATUS_SUCCESS;
981 	case ENOMEM:
982 	case -ENOMEM:
983 		return QDF_STATUS_E_NOMEM;
984 	default:
985 		return QDF_STATUS_E_NOSUPPORT;
986 	}
987 }
988 
989 /**
990  * __qdf_nbuf_len() - return the amount of valid data in the skb
991  * @skb: Pointer to network buffer
992  *
993  * This API returns the amount of valid data in the skb, If there are frags
994  * then it returns total length.
995  *
996  * Return: network buffer length
997  */
998 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
999 {
1000 	int i, extra_frag_len = 0;
1001 
1002 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
1003 	if (i > 0)
1004 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
1005 
1006 	return extra_frag_len + skb->len;
1007 }
1008 
1009 /**
1010  * __qdf_nbuf_cat() - link two nbufs
1011  * @dst: Buffer to piggyback into
1012  * @src: Buffer to put
1013  *
1014  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
1015  * It is callers responsibility to free the src skb.
1016  *
1017  * Return: QDF_STATUS (status of the call) if failed the src skb
1018  *         is released
1019  */
1020 static inline QDF_STATUS
1021 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1022 {
1023 	QDF_STATUS error = 0;
1024 
1025 	qdf_assert(dst && src);
1026 
1027 	/*
1028 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1029 	 * buffer, first check whether the current buffer is already large
1030 	 * enough.
1031 	 */
1032 	if (skb_tailroom(dst) < src->len) {
1033 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1034 		if (error)
1035 			return __qdf_to_status(error);
1036 	}
1037 
1038 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1039 	skb_put(dst, src->len);
1040 	return __qdf_to_status(error);
1041 }
1042 
1043 /*
1044  * nbuf manipulation routines
1045  */
1046 /**
1047  * __qdf_nbuf_headroom() - return the amount of tail space available
1048  * @buf: Pointer to network buffer
1049  *
1050  * Return: amount of tail room
1051  */
1052 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1053 {
1054 	return skb_headroom(skb);
1055 }
1056 
1057 /**
1058  * __qdf_nbuf_tailroom() - return the amount of tail space available
1059  * @buf: Pointer to network buffer
1060  *
1061  * Return: amount of tail room
1062  */
1063 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1064 {
1065 	return skb_tailroom(skb);
1066 }
1067 
1068 /**
1069  * __qdf_nbuf_put_tail() - Puts data in the end
1070  * @skb: Pointer to network buffer
1071  * @size: size to be pushed
1072  *
1073  * Return: data pointer of this buf where new data has to be
1074  *         put, or NULL if there is not enough room in this buf.
1075  */
1076 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1077 {
1078 	if (skb_tailroom(skb) < size) {
1079 		if (unlikely(pskb_expand_head(skb, 0,
1080 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1081 			dev_kfree_skb_any(skb);
1082 			return NULL;
1083 		}
1084 	}
1085 	return skb_put(skb, size);
1086 }
1087 
1088 /**
1089  * __qdf_nbuf_trim_tail() - trim data out from the end
1090  * @skb: Pointer to network buffer
1091  * @size: size to be popped
1092  *
1093  * Return: none
1094  */
1095 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1096 {
1097 	return skb_trim(skb, skb->len - size);
1098 }
1099 
1100 
1101 /*
1102  * prototypes. Implemented in qdf_nbuf.c
1103  */
1104 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1105 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1106 				qdf_nbuf_rx_cksum_t *cksum);
1107 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1108 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1109 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1110 void __qdf_nbuf_ref(struct sk_buff *skb);
1111 int __qdf_nbuf_shared(struct sk_buff *skb);
1112 
1113 /**
1114  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1115  * @skb: sk buff
1116  *
1117  * Return: number of fragments
1118  */
1119 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1120 {
1121 	return skb_shinfo(skb)->nr_frags;
1122 }
1123 
1124 /**
1125  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1126  * @skb: sk buff
1127  *
1128  * This API returns a total number of fragments from the fraglist
1129  * Return: total number of fragments
1130  */
1131 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1132 {
1133 	uint32_t num_frag = 0;
1134 	struct sk_buff *list = NULL;
1135 
1136 	num_frag = skb_shinfo(skb)->nr_frags;
1137 	skb_walk_frags(skb, list)
1138 		num_frag += skb_shinfo(list)->nr_frags;
1139 
1140 	return num_frag;
1141 }
1142 
1143 /*
1144  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1145  */
1146 #define __qdf_nbuf_pool_delete(osdev)
1147 
1148 /**
1149  * __qdf_nbuf_copy() - returns a private copy of the skb
1150  * @skb: Pointer to network buffer
1151  *
1152  * This API returns a private copy of the skb, the skb returned is completely
1153  *  modifiable by callers
1154  *
1155  * Return: skb or NULL
1156  */
1157 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1158 {
1159 	struct sk_buff *skb_new = NULL;
1160 
1161 	skb_new = skb_copy(skb, GFP_ATOMIC);
1162 	if (skb_new) {
1163 		__qdf_nbuf_count_inc(skb_new);
1164 	}
1165 	return skb_new;
1166 }
1167 
1168 #define __qdf_nbuf_reserve      skb_reserve
1169 
1170 /**
1171  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1172  * @skb: Pointer to network buffer
1173  * @data: data pointer
1174  *
1175  * Return: none
1176  */
1177 static inline void
1178 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1179 {
1180 	skb->data = data;
1181 }
1182 
1183 /**
1184  * __qdf_nbuf_set_len() - set buffer data length
1185  * @skb: Pointer to network buffer
1186  * @len: data length
1187  *
1188  * Return: none
1189  */
1190 static inline void
1191 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1192 {
1193 	skb->len = len;
1194 }
1195 
1196 /**
1197  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1198  * @skb: Pointer to network buffer
1199  * @len: skb data length
1200  *
1201  * Return: none
1202  */
1203 static inline void
1204 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1205 {
1206 	skb_set_tail_pointer(skb, len);
1207 }
1208 
1209 /**
1210  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1211  * @skb: Pointer to network buffer
1212  * @list: list to use
1213  *
1214  * This is a lockless version, driver must acquire locks if it
1215  * needs to synchronize
1216  *
1217  * Return: none
1218  */
1219 static inline void
1220 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1221 {
1222 	__skb_unlink(skb, list);
1223 }
1224 
1225 /**
1226  * __qdf_nbuf_reset() - reset the buffer data and pointer
1227  * @buf: Network buf instance
1228  * @reserve: reserve
1229  * @align: align
1230  *
1231  * Return: none
1232  */
1233 static inline void
1234 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1235 {
1236 	int offset;
1237 
1238 	skb_push(skb, skb_headroom(skb));
1239 	skb_put(skb, skb_tailroom(skb));
1240 	memset(skb->data, 0x0, skb->len);
1241 	skb_trim(skb, 0);
1242 	skb_reserve(skb, NET_SKB_PAD);
1243 	memset(skb->cb, 0x0, sizeof(skb->cb));
1244 
1245 	/*
1246 	 * The default is for netbuf fragments to be interpreted
1247 	 * as wordstreams rather than bytestreams.
1248 	 */
1249 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1250 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1251 
1252 	/*
1253 	 * Align & make sure that the tail & data are adjusted properly
1254 	 */
1255 
1256 	if (align) {
1257 		offset = ((unsigned long)skb->data) % align;
1258 		if (offset)
1259 			skb_reserve(skb, align - offset);
1260 	}
1261 
1262 	skb_reserve(skb, reserve);
1263 }
1264 
1265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1266 /**
1267  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1268  *                                       in kernel
1269  *
1270  * Return: true if dev_scratch is supported
1271  *         false if dev_scratch is not supported
1272  */
1273 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1274 {
1275 	return true;
1276 }
1277 
1278 /**
1279  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1280  * @skb: Pointer to network buffer
1281  *
1282  * Return: dev_scratch if dev_scratch supported
1283  *         0 if dev_scratch not supported
1284  */
1285 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1286 {
1287 	return skb->dev_scratch;
1288 }
1289 
1290 /**
1291  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1292  * @skb: Pointer to network buffer
1293  * @value: value to be set in dev_scratch of network buffer
1294  *
1295  * Return: void
1296  */
1297 static inline void
1298 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1299 {
1300 	skb->dev_scratch = value;
1301 }
1302 #else
1303 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1304 {
1305 	return false;
1306 }
1307 
1308 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1309 {
1310 	return 0;
1311 }
1312 
1313 static inline void
1314 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1315 {
1316 }
1317 #endif /* KERNEL_VERSION(4, 14, 0) */
1318 
1319 /**
1320  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1321  * @skb: Pointer to network buffer
1322  *
1323  * Return: Pointer to head buffer
1324  */
1325 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1326 {
1327 	return skb->head;
1328 }
1329 
1330 /**
1331  * __qdf_nbuf_data() - return the pointer to data header in the skb
1332  * @skb: Pointer to network buffer
1333  *
1334  * Return: Pointer to skb data
1335  */
1336 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1337 {
1338 	return skb->data;
1339 }
1340 
1341 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1342 {
1343 	return (uint8_t *)&skb->data;
1344 }
1345 
1346 /**
1347  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1348  * @skb: Pointer to network buffer
1349  *
1350  * Return: skb protocol
1351  */
1352 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1353 {
1354 	return skb->protocol;
1355 }
1356 
1357 /**
1358  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1359  * @skb: Pointer to network buffer
1360  *
1361  * Return: skb ip_summed
1362  */
1363 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1364 {
1365 	return skb->ip_summed;
1366 }
1367 
1368 /**
1369  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1370  * @skb: Pointer to network buffer
1371  * @ip_summed: ip checksum
1372  *
1373  * Return: none
1374  */
1375 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1376 		 uint8_t ip_summed)
1377 {
1378 	skb->ip_summed = ip_summed;
1379 }
1380 
1381 /**
1382  * __qdf_nbuf_get_priority() - return the priority value of the skb
1383  * @skb: Pointer to network buffer
1384  *
1385  * Return: skb priority
1386  */
1387 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1388 {
1389 	return skb->priority;
1390 }
1391 
1392 /**
1393  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1394  * @skb: Pointer to network buffer
1395  * @p: priority
1396  *
1397  * Return: none
1398  */
1399 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1400 {
1401 	skb->priority = p;
1402 }
1403 
1404 /**
1405  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1406  * @skb: Current skb
1407  * @next_skb: Next skb
1408  *
1409  * Return: void
1410  */
1411 static inline void
1412 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1413 {
1414 	skb->next = skb_next;
1415 }
1416 
1417 /**
1418  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1419  * @skb: Current skb
1420  *
1421  * Return: the next skb pointed to by the current skb
1422  */
1423 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1424 {
1425 	return skb->next;
1426 }
1427 
1428 /**
1429  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1430  * @skb: Current skb
1431  * @next_skb: Next skb
1432  *
1433  * This fn is used to link up extensions to the head skb. Does not handle
1434  * linking to the head
1435  *
1436  * Return: none
1437  */
1438 static inline void
1439 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1440 {
1441 	skb->next = skb_next;
1442 }
1443 
1444 /**
1445  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1446  * @skb: Current skb
1447  *
1448  * Return: the next skb pointed to by the current skb
1449  */
1450 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1451 {
1452 	return skb->next;
1453 }
1454 
1455 /**
1456  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1457  * @skb_head: head_buf nbuf holding head segment (single)
1458  * @ext_list: nbuf list holding linked extensions to the head
1459  * @ext_len: Total length of all buffers in the extension list
1460  *
1461  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1462  * to the nbuf holding the head segment (seg0)
1463  *
1464  * Return: none
1465  */
1466 static inline void
1467 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1468 			struct sk_buff *ext_list, size_t ext_len)
1469 {
1470 	skb_shinfo(skb_head)->frag_list = ext_list;
1471 	skb_head->data_len += ext_len;
1472 	skb_head->len += ext_len;
1473 }
1474 
1475 /**
1476  * __qdf_nbuf_get_shinfo() - return the shared info of the skb
1477  * @skb: Pointer to network buffer
1478  *
1479  * Return: skb shared info from head buf
1480  */
1481 static inline
1482 struct skb_shared_info *__qdf_nbuf_get_shinfo(struct sk_buff *head_buf)
1483 {
1484 	return skb_shinfo(head_buf);
1485 }
1486 
1487 /**
1488  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1489  * @head_buf: Network buf holding head segment (single)
1490  *
1491  * This ext_list is populated when we have Jumbo packet, for example in case of
1492  * monitor mode amsdu packet reception, and are stiched using frags_list.
1493  *
1494  * Return: Network buf list holding linked extensions from head buf.
1495  */
1496 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1497 {
1498 	return (skb_shinfo(head_buf)->frag_list);
1499 }
1500 
1501 /**
1502  * __qdf_nbuf_get_age() - return the checksum value of the skb
1503  * @skb: Pointer to network buffer
1504  *
1505  * Return: checksum value
1506  */
1507 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1508 {
1509 	return skb->csum;
1510 }
1511 
1512 /**
1513  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1514  * @skb: Pointer to network buffer
1515  * @v: Value
1516  *
1517  * Return: none
1518  */
1519 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1520 {
1521 	skb->csum = v;
1522 }
1523 
1524 /**
1525  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1526  * @skb: Pointer to network buffer
1527  * @adj: Adjustment value
1528  *
1529  * Return: none
1530  */
1531 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1532 {
1533 	skb->csum -= adj;
1534 }
1535 
1536 /**
1537  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1538  * @skb: Pointer to network buffer
1539  * @offset: Offset value
1540  * @len: Length
1541  * @to: Destination pointer
1542  *
1543  * Return: length of the copy bits for skb
1544  */
1545 static inline int32_t
1546 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1547 {
1548 	return skb_copy_bits(skb, offset, to, len);
1549 }
1550 
1551 /**
1552  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1553  * @skb: Pointer to network buffer
1554  * @len:  Packet length
1555  *
1556  * Return: none
1557  */
1558 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1559 {
1560 	if (skb->len > len) {
1561 		skb_trim(skb, len);
1562 	} else {
1563 		if (skb_tailroom(skb) < len - skb->len) {
1564 			if (unlikely(pskb_expand_head(skb, 0,
1565 				len - skb->len - skb_tailroom(skb),
1566 				GFP_ATOMIC))) {
1567 				QDF_DEBUG_PANIC(
1568 				   "SKB tailroom is lessthan requested length."
1569 				   " tail-room: %u, len: %u, skb->len: %u",
1570 				   skb_tailroom(skb), len, skb->len);
1571 				dev_kfree_skb_any(skb);
1572 			}
1573 		}
1574 		skb_put(skb, (len - skb->len));
1575 	}
1576 }
1577 
1578 /**
1579  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1580  * @skb: Pointer to network buffer
1581  * @protocol: Protocol type
1582  *
1583  * Return: none
1584  */
1585 static inline void
1586 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1587 {
1588 	skb->protocol = protocol;
1589 }
1590 
1591 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1592 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1593 
1594 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1595 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1596 
1597 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1598 				      uint32_t *lo, uint32_t *hi);
1599 
1600 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1601 	struct qdf_tso_info_t *tso_info);
1602 
1603 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1604 			  struct qdf_tso_seg_elem_t *tso_seg,
1605 			  bool is_last_seg);
1606 
1607 #ifdef FEATURE_TSO
1608 /**
1609  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1610  *                                    payload len
1611  * @skb: buffer
1612  *
1613  * Return: size
1614  */
1615 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1616 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1617 
1618 #else
1619 static inline
1620 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1621 {
1622 	return 0;
1623 }
1624 
1625 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1626 {
1627 	return 0;
1628 }
1629 
1630 #endif /* FEATURE_TSO */
1631 
1632 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1633 {
1634 	if (skb_is_gso(skb) &&
1635 		(skb_is_gso_v6(skb) ||
1636 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1637 		return true;
1638 	else
1639 		return false;
1640 }
1641 
1642 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1643 
1644 int __qdf_nbuf_get_users(struct sk_buff *skb);
1645 
1646 /**
1647  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1648  *			      and get hw_classify by peeking
1649  *			      into packet
1650  * @nbuf:		Network buffer (skb on Linux)
1651  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1652  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1653  *			needs to be set in case of CE classification support
1654  *			Is set by this macro.
1655  * @hw_classify:	This is a flag which is set to indicate
1656  *			CE classification is enabled.
1657  *			Do not set this bit for VLAN packets
1658  *			OR for mcast / bcast frames.
1659  *
1660  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1661  * whether to enable tx_classify bit in CE.
1662  *
1663  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1664  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1665  * it is the length and a 802.3 frame else it is Ethernet Type II
1666  * (RFC 894).
1667  * Bit 4 in pkt_subtype is the tx_classify bit
1668  *
1669  * Return:	void
1670  */
1671 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1672 				pkt_subtype, hw_classify)	\
1673 do {								\
1674 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1675 	uint16_t ether_type = ntohs(eh->h_proto);		\
1676 	bool is_mc_bc;						\
1677 								\
1678 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1679 		   is_multicast_ether_addr((uint8_t *)eh);	\
1680 								\
1681 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1682 		hw_classify = 1;				\
1683 		pkt_subtype = 0x01 <<				\
1684 			HTT_TX_CLASSIFY_BIT_S;			\
1685 	}							\
1686 								\
1687 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1688 		pkt_type = htt_pkt_type_ethernet;		\
1689 								\
1690 } while (0)
1691 
1692 /**
1693  * nbuf private buffer routines
1694  */
1695 
1696 /**
1697  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1698  * @skb: Pointer to network buffer
1699  * @addr: Pointer to store header's addr
1700  * @m_len: network buffer length
1701  *
1702  * Return: none
1703  */
1704 static inline void
1705 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1706 {
1707 	*addr = skb->data;
1708 	*len = skb->len;
1709 }
1710 
1711 /**
1712  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1713  * @head: Head pointer
1714  * @tail: Tail pointer
1715  * @qlen: Queue length
1716  */
1717 typedef struct __qdf_nbuf_qhead {
1718 	struct sk_buff *head;
1719 	struct sk_buff *tail;
1720 	unsigned int qlen;
1721 } __qdf_nbuf_queue_t;
1722 
1723 /******************Functions *************/
1724 
1725 /**
1726  * __qdf_nbuf_queue_init() - initiallize the queue head
1727  * @qhead: Queue head
1728  *
1729  * Return: QDF status
1730  */
1731 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1732 {
1733 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1734 	return QDF_STATUS_SUCCESS;
1735 }
1736 
1737 /**
1738  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1739  * @qhead: Queue head
1740  * @skb: Pointer to network buffer
1741  *
1742  * This is a lockless version, driver must acquire locks if it
1743  * needs to synchronize
1744  *
1745  * Return: none
1746  */
1747 static inline void
1748 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1749 {
1750 	skb->next = NULL;       /*Nullify the next ptr */
1751 
1752 	if (!qhead->head)
1753 		qhead->head = skb;
1754 	else
1755 		qhead->tail->next = skb;
1756 
1757 	qhead->tail = skb;
1758 	qhead->qlen++;
1759 }
1760 
1761 /**
1762  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1763  * @dest: target netbuf queue
1764  * @src:  source netbuf queue
1765  *
1766  * Return: target netbuf queue
1767  */
1768 static inline __qdf_nbuf_queue_t *
1769 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1770 {
1771 	if (!dest)
1772 		return NULL;
1773 	else if (!src || !(src->head))
1774 		return dest;
1775 
1776 	if (!(dest->head))
1777 		dest->head = src->head;
1778 	else
1779 		dest->tail->next = src->head;
1780 
1781 	dest->tail = src->tail;
1782 	dest->qlen += src->qlen;
1783 	return dest;
1784 }
1785 
1786 /**
1787  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1788  * @qhead: Queue head
1789  * @skb: Pointer to network buffer
1790  *
1791  * This is a lockless version, driver must acquire locks if it needs to
1792  * synchronize
1793  *
1794  * Return: none
1795  */
1796 static inline void
1797 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1798 {
1799 	if (!qhead->head) {
1800 		/*Empty queue Tail pointer Must be updated */
1801 		qhead->tail = skb;
1802 	}
1803 	skb->next = qhead->head;
1804 	qhead->head = skb;
1805 	qhead->qlen++;
1806 }
1807 
1808 static inline struct sk_buff *
1809 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
1810 {
1811 	__qdf_nbuf_t tmp_tail, node = NULL;
1812 
1813 	if (qhead->head) {
1814 		tmp_tail = qhead->tail;
1815 		node = qhead->head;
1816 		if (qhead->head == qhead->tail) {
1817 			qhead->head = NULL;
1818 			qhead->tail = NULL;
1819 			return node;
1820 		} else {
1821 			while (tmp_tail != node->next)
1822 			       node = node->next;
1823 			qhead->tail = node;
1824 			return node->next;
1825 		}
1826 	}
1827 	return node;
1828 }
1829 
1830 /**
1831  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1832  * @qhead: Queue head
1833  *
1834  * This is a lockless version. Driver should take care of the locks
1835  *
1836  * Return: skb or NULL
1837  */
1838 static inline
1839 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1840 {
1841 	__qdf_nbuf_t tmp = NULL;
1842 
1843 	if (qhead->head) {
1844 		qhead->qlen--;
1845 		tmp = qhead->head;
1846 		if (qhead->head == qhead->tail) {
1847 			qhead->head = NULL;
1848 			qhead->tail = NULL;
1849 		} else {
1850 			qhead->head = tmp->next;
1851 		}
1852 		tmp->next = NULL;
1853 	}
1854 	return tmp;
1855 }
1856 
1857 /**
1858  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1859  * @qhead: head of queue
1860  *
1861  * Return: NULL if the queue is empty
1862  */
1863 static inline struct sk_buff *
1864 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1865 {
1866 	return qhead->head;
1867 }
1868 
1869 /**
1870  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1871  * @qhead: head of queue
1872  *
1873  * Return: NULL if the queue is empty
1874  */
1875 static inline struct sk_buff *
1876 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1877 {
1878 	return qhead->tail;
1879 }
1880 
1881 /**
1882  * __qdf_nbuf_queue_len() - return the queue length
1883  * @qhead: Queue head
1884  *
1885  * Return: Queue length
1886  */
1887 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1888 {
1889 	return qhead->qlen;
1890 }
1891 
1892 /**
1893  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1894  * @skb: Pointer to network buffer
1895  *
1896  * This API returns the next skb from packet chain, remember the skb is
1897  * still in the queue
1898  *
1899  * Return: NULL if no packets are there
1900  */
1901 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1902 {
1903 	return skb->next;
1904 }
1905 
1906 /**
1907  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1908  * @qhead: Queue head
1909  *
1910  * Return: true if length is 0 else false
1911  */
1912 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1913 {
1914 	return qhead->qlen == 0;
1915 }
1916 
1917 /*
1918  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1919  * Because the queue head will most likely put in some structure,
1920  * we don't use pointer type as the definition.
1921  */
1922 
1923 /*
1924  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1925  * Because the queue head will most likely put in some structure,
1926  * we don't use pointer type as the definition.
1927  */
1928 
1929 static inline void
1930 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1931 {
1932 }
1933 
1934 /**
1935  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1936  *        expands the headroom
1937  *        in the data region. In case of failure the skb is released.
1938  * @skb: sk buff
1939  * @headroom: size of headroom
1940  *
1941  * Return: skb or NULL
1942  */
1943 static inline struct sk_buff *
1944 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1945 {
1946 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1947 		dev_kfree_skb_any(skb);
1948 		skb = NULL;
1949 	}
1950 	return skb;
1951 }
1952 
1953 /**
1954  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1955  *        exapnds the tailroom
1956  *        in data region. In case of failure it releases the skb.
1957  * @skb: sk buff
1958  * @tailroom: size of tailroom
1959  *
1960  * Return: skb or NULL
1961  */
1962 static inline struct sk_buff *
1963 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1964 {
1965 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1966 		return skb;
1967 	/**
1968 	 * unlikely path
1969 	 */
1970 	dev_kfree_skb_any(skb);
1971 	return NULL;
1972 }
1973 
1974 /**
1975  * __qdf_nbuf_linearize() - skb linearize
1976  * @skb: sk buff
1977  *
1978  * create a version of the specified nbuf whose contents
1979  * can be safely modified without affecting other
1980  * users.If the nbuf is non-linear then this function
1981  * linearize. if unable to linearize returns -ENOMEM on
1982  * success 0 is returned
1983  *
1984  * Return: 0 on Success, -ENOMEM on failure is returned.
1985  */
1986 static inline int
1987 __qdf_nbuf_linearize(struct sk_buff *skb)
1988 {
1989 	return skb_linearize(skb);
1990 }
1991 
1992 /**
1993  * __qdf_nbuf_unshare() - skb unshare
1994  * @skb: sk buff
1995  *
1996  * create a version of the specified nbuf whose contents
1997  * can be safely modified without affecting other
1998  * users.If the nbuf is a clone then this function
1999  * creates a new copy of the data. If the buffer is not
2000  * a clone the original buffer is returned.
2001  *
2002  * Return: skb or NULL
2003  */
2004 static inline struct sk_buff *
2005 __qdf_nbuf_unshare(struct sk_buff *skb)
2006 {
2007 	struct sk_buff *skb_new;
2008 
2009 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
2010 
2011 	skb_new = skb_unshare(skb, GFP_ATOMIC);
2012 	if (skb_new)
2013 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
2014 
2015 	return skb_new;
2016 }
2017 
2018 /**
2019  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
2020  *@buf: sk buff
2021  *
2022  * Return: true/false
2023  */
2024 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
2025 {
2026 	return skb_cloned(skb);
2027 }
2028 
2029 /**
2030  * __qdf_nbuf_pool_init() - init pool
2031  * @net: net handle
2032  *
2033  * Return: QDF status
2034  */
2035 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2036 {
2037 	return QDF_STATUS_SUCCESS;
2038 }
2039 
2040 /*
2041  * adf_nbuf_pool_delete() implementation - do nothing in linux
2042  */
2043 #define __qdf_nbuf_pool_delete(osdev)
2044 
2045 /**
2046  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2047  *        release the skb.
2048  * @skb: sk buff
2049  * @headroom: size of headroom
2050  * @tailroom: size of tailroom
2051  *
2052  * Return: skb or NULL
2053  */
2054 static inline struct sk_buff *
2055 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2056 {
2057 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2058 		return skb;
2059 
2060 	dev_kfree_skb_any(skb);
2061 	return NULL;
2062 }
2063 
2064 /**
2065  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2066  * @buf: Network buf instance
2067  * @headroom: Additional headroom to be added
2068  * @tailroom: Additional tailroom to be added
2069  *
2070  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2071  *	or NULL if there is no memory
2072  */
2073 static inline struct sk_buff *
2074 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2075 {
2076 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2077 }
2078 
2079 /**
2080  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2081  * @buf: Network buf instance
2082  *
2083  * Return: True, if buf has frag_list else return False
2084  */
2085 static inline bool
2086 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2087 {
2088 	return skb_has_frag_list(buf);
2089 }
2090 
2091 /**
2092  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2093  * @buf: Network buf instance
2094  *
2095  * Return: Network buf instance
2096  */
2097 static inline struct sk_buff *
2098 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2099 {
2100 	struct sk_buff *list;
2101 
2102 	if (!__qdf_nbuf_has_fraglist(buf))
2103 		return NULL;
2104 
2105 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2106 		;
2107 
2108 	return list;
2109 }
2110 
2111 /**
2112  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2113  * @buf: Network buf instance
2114  *
2115  * Return: void
2116  */
2117 static inline void
2118 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2119 {
2120 	struct sk_buff *list;
2121 
2122 	skb_walk_frags(buf, list)
2123 		skb_get(list);
2124 }
2125 
2126 /**
2127  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2128  *
2129  * Return: true/false
2130  */
2131 static inline bool
2132 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2133 			 uint8_t **where)
2134 {
2135 	qdf_assert(0);
2136 	return false;
2137 }
2138 
2139 /**
2140  * __qdf_nbuf_reset_ctxt() - mem zero control block
2141  * @nbuf: buffer
2142  *
2143  * Return: none
2144  */
2145 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2146 {
2147 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2148 }
2149 
2150 /**
2151  * __qdf_nbuf_network_header() - get network header
2152  * @buf: buffer
2153  *
2154  * Return: network header pointer
2155  */
2156 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2157 {
2158 	return skb_network_header(buf);
2159 }
2160 
2161 /**
2162  * __qdf_nbuf_transport_header() - get transport header
2163  * @buf: buffer
2164  *
2165  * Return: transport header pointer
2166  */
2167 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2168 {
2169 	return skb_transport_header(buf);
2170 }
2171 
2172 /**
2173  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2174  *  passed as part of network buffer by network stack
2175  * @skb: sk buff
2176  *
2177  * Return: TCP MSS size
2178  *
2179  */
2180 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2181 {
2182 	return skb_shinfo(skb)->gso_size;
2183 }
2184 
2185 /**
2186  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2187  * @nbuf: sk buff
2188  *
2189  * Return: none
2190  */
2191 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2192 
2193 /*
2194  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2195  * @nbuf: sk buff
2196  *
2197  * Return: void ptr
2198  */
2199 static inline void *
2200 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2201 {
2202 	return (void *)nbuf->cb;
2203 }
2204 
2205 /**
2206  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2207  * @skb: sk buff
2208  *
2209  * Return: head size
2210  */
2211 static inline size_t
2212 __qdf_nbuf_headlen(struct sk_buff *skb)
2213 {
2214 	return skb_headlen(skb);
2215 }
2216 
2217 /**
2218  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2219  * @buf: sk buff
2220  *
2221  * Return: true/false
2222  */
2223 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2224 {
2225 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2226 }
2227 
2228 /**
2229  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2230  * @buf: sk buff
2231  *
2232  * Return: true/false
2233  */
2234 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2235 {
2236 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2237 }
2238 
2239 /**
2240  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2241  * @skb: sk buff
2242  *
2243  * Return: size of l2+l3+l4 header length
2244  */
2245 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2246 {
2247 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2248 }
2249 
2250 /**
2251  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2252  * @skb: sk buff
2253  *
2254  * Return: size of TCP header length
2255  */
2256 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2257 {
2258 	return tcp_hdrlen(skb);
2259 }
2260 
2261 /**
2262  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2263  * @buf: sk buff
2264  *
2265  * Return:  true/false
2266  */
2267 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2268 {
2269 	if (skb_is_nonlinear(skb))
2270 		return true;
2271 	else
2272 		return false;
2273 }
2274 
2275 /**
2276  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2277  * @buf: sk buff
2278  *
2279  * Return: TCP sequence number
2280  */
2281 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2282 {
2283 	return ntohl(tcp_hdr(skb)->seq);
2284 }
2285 
2286 /**
2287  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2288  *@buf: sk buff
2289  *
2290  * Return: data pointer to typecast into your priv structure
2291  */
2292 static inline char *
2293 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2294 {
2295 	return &skb->cb[8];
2296 }
2297 
2298 /**
2299  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2300  * @buf: Pointer to nbuf
2301  *
2302  * Return: None
2303  */
2304 static inline void
2305 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2306 {
2307 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2308 }
2309 
2310 /**
2311  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2312  *
2313  * @buf: sk buff
2314  * @queue_id: Queue id
2315  *
2316  * Return: void
2317  */
2318 static inline void
2319 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2320 {
2321 	skb_record_rx_queue(skb, queue_id);
2322 }
2323 
2324 /**
2325  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2326  *
2327  * @buf: sk buff
2328  *
2329  * Return: Queue mapping
2330  */
2331 static inline uint16_t
2332 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2333 {
2334 	return skb->queue_mapping;
2335 }
2336 
2337 /**
2338  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2339  *
2340  * @buf: sk buff
2341  * @val: queue_id
2342  *
2343  */
2344 static inline void
2345 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2346 {
2347 	skb_set_queue_mapping(skb, val);
2348 }
2349 
2350 /**
2351  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2352  *
2353  * @buf: sk buff
2354  *
2355  * Return: void
2356  */
2357 static inline void
2358 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2359 {
2360 	__net_timestamp(skb);
2361 }
2362 
2363 /**
2364  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2365  *
2366  * @buf: sk buff
2367  *
2368  * Return: timestamp stored in skb in ms
2369  */
2370 static inline uint64_t
2371 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2372 {
2373 	return ktime_to_ms(skb_get_ktime(skb));
2374 }
2375 
2376 /**
2377  * __qdf_nbuf_get_timestamp_us() - get the timestamp for frame
2378  *
2379  * @buf: sk buff
2380  *
2381  * Return: timestamp stored in skb in us
2382  */
2383 static inline uint64_t
2384 __qdf_nbuf_get_timestamp_us(struct sk_buff *skb)
2385 {
2386 	return ktime_to_us(skb_get_ktime(skb));
2387 }
2388 
2389 /**
2390  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2391  *
2392  * @buf: sk buff
2393  *
2394  * Return: time difference in ms
2395  */
2396 static inline uint64_t
2397 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2398 {
2399 	return ktime_to_ms(net_timedelta(skb->tstamp));
2400 }
2401 
2402 /**
2403  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2404  *
2405  * @buf: sk buff
2406  *
2407  * Return: time difference in micro seconds
2408  */
2409 static inline uint64_t
2410 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2411 {
2412 	return ktime_to_us(net_timedelta(skb->tstamp));
2413 }
2414 
2415 /**
2416  * __qdf_nbuf_orphan() - orphan a nbuf
2417  * @skb: sk buff
2418  *
2419  * If a buffer currently has an owner then we call the
2420  * owner's destructor function
2421  *
2422  * Return: void
2423  */
2424 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2425 {
2426 	return skb_orphan(skb);
2427 }
2428 
2429 /**
2430  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2431  * head pointer to end pointer
2432  * @nbuf: qdf_nbuf_t
2433  *
2434  * Return: size of network buffer from head pointer to end
2435  * pointer
2436  */
2437 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2438 {
2439 	return skb_end_offset(nbuf);
2440 }
2441 
2442 /**
2443  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2444  * including the header and variable data area
2445  * @skb: sk buff
2446  *
2447  * Return: size of network buffer
2448  */
2449 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2450 {
2451 	return skb->truesize;
2452 }
2453 
2454 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2455 /**
2456  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2457  * from the total skb mem and DP tx/rx skb mem
2458  * @nbytes: number of bytes
2459  * @dir: direction
2460  * @is_mapped: is mapped or unmapped memory
2461  *
2462  * Return: none
2463  */
2464 static inline void __qdf_record_nbuf_nbytes(
2465 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2466 {
2467 	if (is_mapped) {
2468 		if (dir == QDF_DMA_TO_DEVICE) {
2469 			qdf_mem_dp_tx_skb_cnt_inc();
2470 			qdf_mem_dp_tx_skb_inc(nbytes);
2471 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2472 			qdf_mem_dp_rx_skb_cnt_inc();
2473 			qdf_mem_dp_rx_skb_inc(nbytes);
2474 		}
2475 		qdf_mem_skb_total_inc(nbytes);
2476 	} else {
2477 		if (dir == QDF_DMA_TO_DEVICE) {
2478 			qdf_mem_dp_tx_skb_cnt_dec();
2479 			qdf_mem_dp_tx_skb_dec(nbytes);
2480 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2481 			qdf_mem_dp_rx_skb_cnt_dec();
2482 			qdf_mem_dp_rx_skb_dec(nbytes);
2483 		}
2484 		qdf_mem_skb_total_dec(nbytes);
2485 	}
2486 }
2487 
2488 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2489 static inline void __qdf_record_nbuf_nbytes(
2490 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2491 {
2492 }
2493 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2494 
2495 /**
2496  * __qdf_nbuf_map_nbytes_single() - map nbytes
2497  * @osdev: os device
2498  * @buf: buffer
2499  * @dir: direction
2500  * @nbytes: number of bytes
2501  *
2502  * Return: QDF_STATUS
2503  */
2504 #ifdef A_SIMOS_DEVHOST
2505 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2506 		qdf_device_t osdev, struct sk_buff *buf,
2507 		qdf_dma_dir_t dir, int nbytes)
2508 {
2509 	qdf_dma_addr_t paddr;
2510 
2511 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2512 	return QDF_STATUS_SUCCESS;
2513 }
2514 #else
2515 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2516 		qdf_device_t osdev, struct sk_buff *buf,
2517 		qdf_dma_dir_t dir, int nbytes)
2518 {
2519 	qdf_dma_addr_t paddr;
2520 	QDF_STATUS ret;
2521 
2522 	/* assume that the OS only provides a single fragment */
2523 	QDF_NBUF_CB_PADDR(buf) = paddr =
2524 		dma_map_single(osdev->dev, buf->data,
2525 			       nbytes, __qdf_dma_dir_to_os(dir));
2526 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2527 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2528 	if (QDF_IS_STATUS_SUCCESS(ret))
2529 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2530 					 dir, true);
2531 	return ret;
2532 }
2533 #endif
2534 /**
2535  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2536  * @osdev: os device
2537  * @buf: buffer
2538  * @dir: direction
2539  * @nbytes: number of bytes
2540  *
2541  * Return: none
2542  */
2543 #if defined(A_SIMOS_DEVHOST)
2544 static inline void
2545 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2546 			       qdf_dma_dir_t dir, int nbytes)
2547 {
2548 }
2549 
2550 #else
2551 static inline void
2552 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2553 			       qdf_dma_dir_t dir, int nbytes)
2554 {
2555 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2556 
2557 	if (qdf_likely(paddr)) {
2558 		__qdf_record_nbuf_nbytes(
2559 			__qdf_nbuf_get_end_offset(buf), dir, false);
2560 		dma_unmap_single(osdev->dev, paddr, nbytes,
2561 				 __qdf_dma_dir_to_os(dir));
2562 		return;
2563 	}
2564 }
2565 #endif
2566 
2567 static inline struct sk_buff *
2568 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2569 {
2570 	return skb_dequeue(skb_queue_head);
2571 }
2572 
2573 static inline
2574 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2575 {
2576 	return skb_queue_head->qlen;
2577 }
2578 
2579 static inline
2580 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2581 					struct sk_buff *skb)
2582 {
2583 	return skb_queue_tail(skb_queue_head, skb);
2584 }
2585 
2586 static inline
2587 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2588 {
2589 	return skb_queue_head_init(skb_queue_head);
2590 }
2591 
2592 static inline
2593 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2594 {
2595 	return skb_queue_purge(skb_queue_head);
2596 }
2597 
2598 /**
2599  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2600  * @head: skb list for which lock is to be acquired
2601  *
2602  * Return: void
2603  */
2604 static inline
2605 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2606 {
2607 	spin_lock_bh(&skb_queue_head->lock);
2608 }
2609 
2610 /**
2611  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2612  * @head: skb list for which lock is to be release
2613  *
2614  * Return: void
2615  */
2616 static inline
2617 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2618 {
2619 	spin_unlock_bh(&skb_queue_head->lock);
2620 }
2621 
2622 /**
2623  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2624  * @nbuf: qdf_nbuf_t
2625  * @idx: Index for which frag size is requested
2626  *
2627  * Return: Frag size
2628  */
2629 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2630 							   uint8_t idx)
2631 {
2632 	unsigned int size = 0;
2633 
2634 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2635 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2636 	return size;
2637 }
2638 
2639 /**
2640  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2641  * @nbuf: qdf_nbuf_t
2642  * @idx: Index for which frag address is requested
2643  *
2644  * Return: Frag address in success, else NULL
2645  */
2646 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2647 						    uint8_t idx)
2648 {
2649 	__qdf_frag_t frag_addr = NULL;
2650 
2651 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2652 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2653 	return frag_addr;
2654 }
2655 
2656 /**
2657  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2658  * @nbuf: qdf_nbuf_t
2659  * @idx: Frag index
2660  * @size: Size by which frag_size needs to be increased/decreased
2661  *        +Ve means increase, -Ve means decrease
2662  * @truesize: truesize
2663  */
2664 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2665 						 int size,
2666 						 unsigned int truesize)
2667 {
2668 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2669 }
2670 
2671 /**
2672  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2673  *          and adjust length by size.
2674  * @nbuf: qdf_nbuf_t
2675  * @idx: Frag index
2676  * @offset: Frag page offset should be moved by offset.
2677  *      +Ve - Move offset forward.
2678  *      -Ve - Move offset backward.
2679  *
2680  * Return: QDF_STATUS
2681  */
2682 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2683 					    int offset);
2684 
2685 /**
2686  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
2687  * @nbuf: nbuf pointer
2688  * @idx: frag idx need to be removed
2689  * @truesize: truesize of frag
2690  *
2691  * Return : void
2692  */
2693 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
2694 /**
2695  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2696  * @buf: Frag pointer needs to be added in nbuf frag
2697  * @nbuf: qdf_nbuf_t where frag will be added
2698  * @offset: Offset in frag to be added to nbuf_frags
2699  * @frag_len: Frag length
2700  * @truesize: truesize
2701  * @take_frag_ref: Whether to take ref for frag or not
2702  *      This bool must be set as per below comdition:
2703  *      1. False: If this frag is being added in any nbuf
2704  *              for the first time after allocation.
2705  *      2. True: If frag is already attached part of any
2706  *              nbuf.
2707  *
2708  * It takes ref_count based on boolean flag take_frag_ref
2709  */
2710 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2711 			    int offset, int frag_len,
2712 			    unsigned int truesize, bool take_frag_ref);
2713 
2714 /**
2715  * __qdf_nbuf_ref_frag() - get frag reference
2716  *
2717  * Return: void
2718  */
2719 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2720 
2721 /**
2722  * __qdf_nbuf_set_mark() - Set nbuf mark
2723  * @buf: Pointer to nbuf
2724  * @mark: Value to set mark
2725  *
2726  * Return: None
2727  */
2728 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2729 {
2730 	buf->mark = mark;
2731 }
2732 
2733 /**
2734  * __qdf_nbuf_get_mark() - Get nbuf mark
2735  * @buf: Pointer to nbuf
2736  *
2737  * Return: Value of mark
2738  */
2739 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2740 {
2741 	return buf->mark;
2742 }
2743 
2744 /**
2745  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2746  * the data pointer to the end pointer
2747  * @nbuf: qdf_nbuf_t
2748  *
2749  * Return: size of skb from data pointer to end pointer
2750  */
2751 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2752 {
2753 	return (skb_end_pointer(nbuf) - nbuf->data);
2754 }
2755 
2756 /**
2757  * __qdf_nbuf_set_data_len() - Return the data_len of the nbuf
2758  * @nbuf: qdf_nbuf_t
2759  *
2760  * Return: value of data_len
2761  */
2762 static inline
2763 qdf_size_t __qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf, uint32_t len)
2764 {
2765 	return nbuf->data_len = len;
2766 }
2767 
2768 /**
2769  * __qdf_nbuf_get_only_data_len() - Return the data_len of the nbuf
2770  * @nbuf: qdf_nbuf_t
2771  *
2772  * Return: value of data_len
2773  */
2774 static inline qdf_size_t __qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)
2775 {
2776 	return nbuf->data_len;
2777 }
2778 
2779 /**
2780  * __qdf_nbuf_set_hash() - set the hash of the buf
2781  * @buf: Network buf instance
2782  * @len: len to be set
2783  *
2784  * Return: None
2785  */
2786 static inline void __qdf_nbuf_set_hash(__qdf_nbuf_t buf, uint32_t len)
2787 {
2788 	buf->hash = len;
2789 }
2790 
2791 /**
2792  * __qdf_nbuf_set_sw_hash() - set the sw hash of the buf
2793  * @buf: Network buf instance
2794  * @len: len to be set
2795  *
2796  * Return: None
2797  */
2798 static inline void __qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf, uint32_t len)
2799 {
2800 	buf->sw_hash = len;
2801 }
2802 
2803 /**
2804  * __qdf_nbuf_set_csum_start() - set the csum start of the buf
2805  * @buf: Network buf instance
2806  * @len: len to be set
2807  *
2808  * Return: None
2809  */
2810 static inline void __qdf_nbuf_set_csum_start(__qdf_nbuf_t buf, uint16_t len)
2811 {
2812 	buf->csum_start = len;
2813 }
2814 
2815 /**
2816  * __qdf_nbuf_set_csum_offset() - set the csum offset of the buf
2817  * @buf: Network buf instance
2818  * @len: len to be set
2819  *
2820  * Return: None
2821  */
2822 static inline void __qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf, uint16_t len)
2823 {
2824 	buf->csum_offset = len;
2825 }
2826 
2827 /**
2828  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2829  * @skb: Pointer to network buffer
2830  *
2831  * Return: Return the number of gso segments
2832  */
2833 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2834 {
2835 	return skb_shinfo(skb)->gso_segs;
2836 }
2837 
2838 /**
2839  * __qdf_nbuf_set_gso_segs() - set the number of gso segments
2840  * @skb: Pointer to network buffer
2841  * @val: val to be set
2842  *
2843  * Return: None
2844  */
2845 static inline void __qdf_nbuf_set_gso_segs(struct sk_buff *skb, uint16_t val)
2846 {
2847 	skb_shinfo(skb)->gso_segs = val;
2848 }
2849 
2850 /**
2851  * __qdf_nbuf_set_gso_type_udp_l4() - set the gso type to GSO UDP L4
2852  * @skb: Pointer to network buffer
2853  *
2854  * Return: None
2855  */
2856 static inline void __qdf_nbuf_set_gso_type_udp_l4(struct sk_buff *skb)
2857 {
2858 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
2859 }
2860 
2861 /**
2862  * __qdf_nbuf_set_ip_summed_partial() - set the ip summed to CHECKSUM_PARTIAL
2863  * @skb: Pointer to network buffer
2864  *
2865  * Return: None
2866  */
2867 static inline void __qdf_nbuf_set_ip_summed_partial(struct sk_buff *skb)
2868 {
2869 	skb->ip_summed = CHECKSUM_PARTIAL;
2870 }
2871 
2872 /**
2873  * __qdf_nbuf_get_gso_size() - Return the number of gso size
2874  * @skb: Pointer to network buffer
2875  *
2876  * Return: Return the number of gso segments
2877  */
2878 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
2879 {
2880 	return skb_shinfo(skb)->gso_size;
2881 }
2882 
2883 /**
2884  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
2885  * @skb: Pointer to network buffer
2886  *
2887  * Return: Return the number of gso segments
2888  */
2889 static inline void
2890 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
2891 {
2892 	skb_shinfo(skb)->gso_size = val;
2893 }
2894 
2895 /**
2896  * __qdf_nbuf_kfree() - Free nbuf using kfree
2897  * @buf: Pointer to network buffer
2898  *
2899  * This function is called to free the skb on failure cases
2900  *
2901  * Return: None
2902  */
2903 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
2904 {
2905 	kfree_skb(skb);
2906 }
2907 
2908 /**
2909  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
2910  * @buf: Pointer to network buffer
2911  *
2912  * This function is called to free the skb on failure cases
2913  *
2914  * Return: None
2915  */
2916 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
2917 {
2918 	dev_kfree_skb(skb);
2919 }
2920 
2921 /**
2922  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
2923  * @buf: Network buffer
2924  *
2925  * Return: TRUE if skb pkt type is mcast
2926  *         FALSE if not
2927  */
2928 static inline
2929 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
2930 {
2931 	return skb->pkt_type == PACKET_MULTICAST;
2932 }
2933 
2934 /**
2935  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
2936  * @buf: Network buffer
2937  *
2938  * Return: TRUE if skb pkt type is mcast
2939  *         FALSE if not
2940  */
2941 static inline
2942 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
2943 {
2944 	return skb->pkt_type == PACKET_BROADCAST;
2945 }
2946 
2947 /**
2948  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
2949  * @buf: Pointer to network buffer
2950  * @value: value to be set in dev_scratch of network buffer
2951  *
2952  * Return: void
2953  */
2954 static inline
2955 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
2956 {
2957 	skb->dev = dev;
2958 }
2959 
2960 /**
2961  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
2962  * @buf: Pointer to network buffer
2963  *
2964  * Return: dev mtu value in nbuf
2965  */
2966 static inline
2967 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
2968 {
2969 	return skb->dev->mtu;
2970 }
2971 
2972 /**
2973  * __qdf_nbuf_set_protocol_eth_tye_trans() - set protocol using eth trans os API
2974  * @buf: Pointer to network buffer
2975  *
2976  * Return: None
2977  */
2978 static inline
2979 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
2980 {
2981 	skb->protocol = eth_type_trans(skb, skb->dev);
2982 }
2983 
2984 /*
2985  * __qdf_nbuf_net_timedelta() - get time delta
2986  * @t: time as __qdf_ktime_t object
2987  *
2988  * Return: time delta as ktime_t object
2989  */
2990 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
2991 {
2992 	return net_timedelta(t);
2993 }
2994 
2995 #ifdef CONFIG_NBUF_AP_PLATFORM
2996 #include <i_qdf_nbuf_w.h>
2997 #else
2998 #include <i_qdf_nbuf_m.h>
2999 #endif
3000 #endif /*_I_QDF_NET_BUF_H */
3001