xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <asm/cacheflush.h>
32 #include <qdf_types.h>
33 #include <qdf_net_types.h>
34 #include <qdf_status.h>
35 #include <qdf_util.h>
36 #include <qdf_mem.h>
37 #include <linux/tcp.h>
38 #include <qdf_util.h>
39 
40 /*
41  * Use socket buffer as the underlying implementation as skbuf .
42  * Linux use sk_buff to represent both packet and data,
43  * so we use sk_buffer to represent both skbuf .
44  */
45 typedef struct sk_buff *__qdf_nbuf_t;
46 
47 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
48 
49 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
50  * max tx fragments added by the driver
51  * The driver will always add one tx fragment (the tx descriptor)
52  */
53 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
54 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
55 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
56 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
57 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
58 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
59 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
60 
61 
62 /* mark the first packet after wow wakeup */
63 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
64 
65 /*
66  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
67  */
68 typedef union {
69 	uint64_t       u64;
70 	qdf_dma_addr_t dma_addr;
71 } qdf_paddr_t;
72 
73 /**
74  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
75  *                    - data passed between layers of the driver.
76  *
77  * Notes:
78  *   1. Hard limited to 48 bytes. Please count your bytes
79  *   2. The size of this structure has to be easily calculatable and
80  *      consistently so: do not use any conditional compile flags
81  *   3. Split into a common part followed by a tx/rx overlay
82  *   4. There is only one extra frag, which represents the HTC/HTT header
83  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
84  *      for the priv_cb_w since it must be at same offset for both
85  *      TX and RX union
86  *
87  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
88  *
89  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
90  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
91  * @rx.dev.priv_cb_w.reserved1: reserved
92  * @rx.dev.priv_cb_w.reserved2: reserved
93  *
94  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
95  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
96  * @rx.dev.priv_cb_m.lro_ctx: LRO context
97  * @rx.dev.priv_cb_m.map_index:
98  * @rx.dev.priv_cb_m.reserved: reserved
99  *
100  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
101  * @rx.peer_cached_buf_frm: peer cached buffer
102  * @rx.tcp_proto: L4 protocol is TCP
103  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
104  * @rx.ipv6_proto: L3 protocol is IPV6
105  * @rx.ip_offset: offset to IP header
106  * @rx.tcp_offset: offset to TCP header
107  * @rx_ctx_id: Rx context id
108  *
109  * @rx.tcp_udp_chksum: L4 payload checksum
110  * @rx.tcp_wim: TCP window size
111  *
112  * @rx.flow_id: 32bit flow id
113  *
114  * @rx.flag_chfrag_start: first MSDU in an AMSDU
115  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
116  * @rx.flag_chfrag_end: last MSDU in an AMSDU
117  * @rx.rsrvd: reserved
118  *
119  * @rx.trace: combined structure for DP and protocol trace
120  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
121  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
122  * @rx.trace.dp_trace: flag (Datapath trace)
123  * @rx.trace.packet_track: RX_DATA packet
124  * @rx.trace.rsrvd: enable packet logging
125  *
126  * @rx.ftype: mcast2ucast, TSO, SG, MESH
127  * @rx.reserved: reserved
128  *
129  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
130  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
131  *
132  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
133  *                 + (1) CE classification enablement bit
134  *                 + (2) packet type (802.3 or Ethernet type II)
135  *                 + (3) packet offset (usually length of HTC/HTT descr)
136  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
137  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
138  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
139  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
140  * @tx.dev.priv_cb_m.reserved: reserved
141  *
142  * @tx.ftype: mcast2ucast, TSO, SG, MESH
143  * @tx.vdev_id: vdev (for protocol trace)
144  * @tx.len: length of efrag pointed by the above pointers
145  *
146  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
147  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
148  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
149  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
150  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
151  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
152  * @tx.flags.bits.flag_ext_header: extended flags
153  * @tx.flags.bits.reserved: reserved
154  * @tx.trace: combined structure for DP and protocol trace
155  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
156  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
157  * @tx.trace.is_packet_priv:
158  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
159  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
160  *                          + (MGMT_ACTION)] - 4 bits
161  * @tx.trace.dp_trace: flag (Datapath trace)
162  * @tx.trace.is_bcast: flag (Broadcast packet)
163  * @tx.trace.is_mcast: flag (Multicast packet)
164  * @tx.trace.packet_type: flag (Packet type)
165  * @tx.trace.htt2_frm: flag (high-latency path only)
166  * @tx.trace.print: enable packet logging
167  *
168  * @tx.vaddr: virtual address of ~
169  * @tx.paddr: physical/DMA address of ~
170  */
171 struct qdf_nbuf_cb {
172 	/* common */
173 	qdf_paddr_t paddr; /* of skb->data */
174 	/* valid only in one direction */
175 	union {
176 		/* Note: MAX: 40 bytes */
177 		struct {
178 			union {
179 				struct {
180 					void *ext_cb_ptr;
181 					void *fctx;
182 					uint32_t reserved1;
183 					uint32_t reserved2;
184 				} priv_cb_w;
185 				struct {
186 					uint32_t tcp_seq_num;
187 					uint32_t tcp_ack_num;
188 					unsigned char *lro_ctx;
189 					uint32_t map_index;
190 					uint32_t reserved;
191 				} priv_cb_m;
192 			} dev;
193 			uint32_t lro_eligible:1,
194 				peer_cached_buf_frm:1,
195 				tcp_proto:1,
196 				tcp_pure_ack:1,
197 				ipv6_proto:1,
198 				ip_offset:7,
199 				tcp_offset:7,
200 				rx_ctx_id:4;
201 			uint32_t tcp_udp_chksum:16,
202 				tcp_win:16;
203 			uint32_t flow_id;
204 			uint8_t flag_chfrag_start:1,
205 				flag_chfrag_cont:1,
206 				flag_chfrag_end:1,
207 				rsrvd:5;
208 			union {
209 				uint8_t packet_state;
210 				uint8_t dp_trace:1,
211 					packet_track:4,
212 					rsrvd:3;
213 			} trace;
214 			uint8_t ftype;
215 			uint8_t reserved;
216 		} rx;
217 
218 		/* Note: MAX: 40 bytes */
219 		struct {
220 			union {
221 				struct {
222 					void *ext_cb_ptr;
223 					void *fctx;
224 				} priv_cb_w;
225 				struct {
226 					uint32_t data_attr;
227 					struct {
228 						uint32_t owned:1,
229 							priv:31;
230 					} ipa;
231 					uint16_t desc_id;
232 					uint16_t mgmt_desc_id;
233 					uint32_t reserved;
234 				} priv_cb_m;
235 			} dev;
236 			uint8_t ftype;
237 			uint8_t vdev_id;
238 			uint16_t len;
239 			union {
240 				struct {
241 					uint8_t flag_efrag:1,
242 						flag_nbuf:1,
243 						num:1,
244 						flag_chfrag_start:1,
245 						flag_chfrag_cont:1,
246 						flag_chfrag_end:1,
247 						flag_ext_header:1,
248 						flag_notify_comp:1;
249 				} bits;
250 				uint8_t u8;
251 			} flags;
252 			struct {
253 				uint8_t packet_state:7,
254 					is_packet_priv:1;
255 				uint8_t packet_track:4,
256 					proto_type:4;
257 				uint8_t dp_trace:1,
258 					is_bcast:1,
259 					is_mcast:1,
260 					packet_type:3,
261 					/* used only for hl*/
262 					htt2_frm:1,
263 					print:1;
264 			} trace;
265 			unsigned char *vaddr;
266 			qdf_paddr_t paddr;
267 		} tx;
268 	} u;
269 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
270 
271 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
272 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
273 
274 /**
275  *  access macros to qdf_nbuf_cb
276  *  Note: These macros can be used as L-values as well as R-values.
277  *        When used as R-values, they effectively function as "get" macros
278  *        When used as L_values, they effectively function as "set" macros
279  */
280 
281 #define QDF_NBUF_CB_PADDR(skb) \
282 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
283 
284 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
285 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
286 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
287 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
288 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
289 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
290 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
291 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
292 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
293 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
294 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
295 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
296 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
297 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
298 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
299 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
300 
301 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
302 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
303 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
304 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
305 
306 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
307 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
308 
309 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
310 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
311 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
312 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
313 
314 #define QDF_NBUF_CB_RX_FTYPE(skb) \
315 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
316 
317 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
318 	(((struct qdf_nbuf_cb *) \
319 	((skb)->cb))->u.rx.flag_chfrag_start)
320 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
321 	(((struct qdf_nbuf_cb *) \
322 	((skb)->cb))->u.rx.flag_chfrag_cont)
323 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
324 		(((struct qdf_nbuf_cb *) \
325 		((skb)->cb))->u.rx.flag_chfrag_end)
326 
327 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
328 	qdf_nbuf_set_state(skb, PACKET_STATE)
329 
330 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
331 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
332 
333 #define QDF_NBUF_CB_TX_FTYPE(skb) \
334 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
335 
336 
337 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
338 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
339 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
340 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
341 
342 /* Tx Flags Accessor Macros*/
343 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
344 	(((struct qdf_nbuf_cb *) \
345 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
346 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
347 	(((struct qdf_nbuf_cb *) \
348 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
349 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
351 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
353 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
354 	(((struct qdf_nbuf_cb *) \
355 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
356 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
357 	(((struct qdf_nbuf_cb *) \
358 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
359 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
360 		(((struct qdf_nbuf_cb *) \
361 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
362 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
363 		(((struct qdf_nbuf_cb *) \
364 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
365 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
366 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
367 /* End of Tx Flags Accessor Macros */
368 
369 /* Tx trace accessor macros */
370 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
371 	(((struct qdf_nbuf_cb *) \
372 		((skb)->cb))->u.tx.trace.packet_state)
373 
374 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
375 	(((struct qdf_nbuf_cb *) \
376 		((skb)->cb))->u.tx.trace.is_packet_priv)
377 
378 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
379 	(((struct qdf_nbuf_cb *) \
380 		((skb)->cb))->u.tx.trace.packet_track)
381 
382 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
383 		(((struct qdf_nbuf_cb *) \
384 			((skb)->cb))->u.rx.trace.packet_track)
385 
386 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
387 	(((struct qdf_nbuf_cb *) \
388 		((skb)->cb))->u.tx.trace.proto_type)
389 
390 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
391 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
392 
393 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
394 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
395 
396 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
398 
399 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
400 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
401 
402 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
403 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
404 
405 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
406 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
407 
408 #define QDF_NBUF_CB_SET_BCAST(skb) \
409 	(((struct qdf_nbuf_cb *) \
410 		((skb)->cb))->u.tx.trace.is_bcast = true)
411 
412 #define QDF_NBUF_CB_SET_MCAST(skb) \
413 	(((struct qdf_nbuf_cb *) \
414 		((skb)->cb))->u.tx.trace.is_mcast = true)
415 /* End of Tx trace accessor macros */
416 
417 
418 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
419 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
420 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
421 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
422 
423 /* assume the OS provides a single fragment */
424 #define __qdf_nbuf_get_num_frags(skb)		   \
425 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
426 
427 #define __qdf_nbuf_reset_num_frags(skb) \
428 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
429 
430 /**
431  *   end of nbuf->cb access macros
432  */
433 
434 typedef void (*qdf_nbuf_trace_update_t)(char *);
435 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
436 
437 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
438 
439 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
440 	(QDF_NBUF_CB_PADDR(skb) = paddr)
441 
442 #define __qdf_nbuf_frag_push_head(					\
443 	skb, frag_len, frag_vaddr, frag_paddr)				\
444 	do {					\
445 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
446 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
447 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
448 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
449 	} while (0)
450 
451 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
452 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
453 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
454 
455 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
456 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
457 
458 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
459 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
460 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
461 	 /* assume that the OS only provides a single fragment */	\
462 	 QDF_NBUF_CB_PADDR(skb))
463 
464 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
465 
466 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
467 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
468 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
469 
470 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
471 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
472 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
473 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
474 
475 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
476 	do {								\
477 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
478 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
479 		if (frag_num)						\
480 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
481 							      is_wstrm; \
482 		else					\
483 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
484 							      is_wstrm; \
485 	} while (0)
486 
487 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
488 	do { \
489 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
490 	} while (0)
491 
492 #define __qdf_nbuf_get_vdev_ctx(skb) \
493 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
494 
495 #define __qdf_nbuf_set_tx_ftype(skb, type) \
496 	do { \
497 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
498 	} while (0)
499 
500 #define __qdf_nbuf_get_tx_ftype(skb) \
501 		 QDF_NBUF_CB_TX_FTYPE((skb))
502 
503 
504 #define __qdf_nbuf_set_rx_ftype(skb, type) \
505 	do { \
506 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
507 	} while (0)
508 
509 #define __qdf_nbuf_get_rx_ftype(skb) \
510 		 QDF_NBUF_CB_RX_FTYPE((skb))
511 
512 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
513 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
514 
515 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
516 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
517 
518 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
519 	do { \
520 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
521 	} while (0)
522 
523 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
524 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
525 
526 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
527 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
528 
529 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
530 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
531 
532 
533 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
534 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
535 
536 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
537 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
538 
539 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
540 	do { \
541 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
542 	} while (0)
543 
544 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
545 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
546 
547 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
548 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
549 
550 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
551 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
552 
553 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
554 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
555 
556 #define __qdf_nbuf_trace_get_proto_type(skb) \
557 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
558 
559 #define __qdf_nbuf_data_attr_get(skb)		\
560 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
561 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
562 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
563 
564 /**
565  * __qdf_nbuf_num_frags_init() - init extra frags
566  * @skb: sk buffer
567  *
568  * Return: none
569  */
570 static inline
571 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
572 {
573 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
574 }
575 
576 /*
577  * prototypes. Implemented in qdf_nbuf.c
578  */
579 __qdf_nbuf_t __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
580 			int align, int prio);
581 void __qdf_nbuf_free(struct sk_buff *skb);
582 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
583 			struct sk_buff *skb, qdf_dma_dir_t dir);
584 void __qdf_nbuf_unmap(__qdf_device_t osdev,
585 			struct sk_buff *skb, qdf_dma_dir_t dir);
586 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
587 				 struct sk_buff *skb, qdf_dma_dir_t dir);
588 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
589 			struct sk_buff *skb, qdf_dma_dir_t dir);
590 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
591 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
592 
593 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
594 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
595 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
596 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
597 	qdf_dma_dir_t dir, int nbytes);
598 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
599 	qdf_dma_dir_t dir, int nbytes);
600 
601 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
602 	qdf_dma_dir_t dir);
603 
604 QDF_STATUS __qdf_nbuf_map_nbytes_single(
605 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
606 void __qdf_nbuf_unmap_nbytes_single(
607 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
608 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
609 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
610 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
611 QDF_STATUS __qdf_nbuf_frag_map(
612 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
613 	int offset, qdf_dma_dir_t dir, int cur_frag);
614 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
615 
616 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
617 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
618 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
619 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
620 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
621 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
622 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
623 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
624 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
625 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
626 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
627 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
628 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
629 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
630 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
631 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
632 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
633 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
634 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
635 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
636 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
637 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
638 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
639 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
640 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
641 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
642 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
643 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
644 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
645 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
646 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
647 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
648 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
649 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
650 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
651 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
652 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
653 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
654 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
655 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
656 
657 #ifdef QDF_NBUF_GLOBAL_COUNT
658 int __qdf_nbuf_count_get(void);
659 void __qdf_nbuf_count_inc(struct sk_buff *skb);
660 void __qdf_nbuf_count_dec(struct sk_buff *skb);
661 void __qdf_nbuf_mod_init(void);
662 void __qdf_nbuf_mod_exit(void);
663 
664 #else
665 
666 static inline int __qdf_nbuf_count_get(void)
667 {
668 	return 0;
669 }
670 
671 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
672 {
673 	return;
674 }
675 
676 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
677 {
678 	return;
679 }
680 
681 static inline void __qdf_nbuf_mod_init(void)
682 {
683 	return;
684 }
685 
686 static inline void __qdf_nbuf_mod_exit(void)
687 {
688 	return;
689 }
690 #endif
691 
692 /**
693  * __qdf_to_status() - OS to QDF status conversion
694  * @error : OS error
695  *
696  * Return: QDF status
697  */
698 static inline QDF_STATUS __qdf_to_status(signed int error)
699 {
700 	switch (error) {
701 	case 0:
702 		return QDF_STATUS_SUCCESS;
703 	case ENOMEM:
704 	case -ENOMEM:
705 		return QDF_STATUS_E_NOMEM;
706 	default:
707 		return QDF_STATUS_E_NOSUPPORT;
708 	}
709 }
710 
711 /**
712  * __qdf_nbuf_len() - return the amount of valid data in the skb
713  * @skb: Pointer to network buffer
714  *
715  * This API returns the amount of valid data in the skb, If there are frags
716  * then it returns total length.
717  *
718  * Return: network buffer length
719  */
720 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
721 {
722 	int i, extra_frag_len = 0;
723 
724 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
725 	if (i > 0)
726 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
727 
728 	return extra_frag_len + skb->len;
729 }
730 
731 /**
732  * __qdf_nbuf_cat() - link two nbufs
733  * @dst: Buffer to piggyback into
734  * @src: Buffer to put
735  *
736  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
737  * It is callers responsibility to free the src skb.
738  *
739  * Return: QDF_STATUS (status of the call) if failed the src skb
740  *         is released
741  */
742 static inline QDF_STATUS
743 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
744 {
745 	QDF_STATUS error = 0;
746 
747 	qdf_assert(dst && src);
748 
749 	/*
750 	 * Since pskb_expand_head unconditionally reallocates the skb->head
751 	 * buffer, first check whether the current buffer is already large
752 	 * enough.
753 	 */
754 	if (skb_tailroom(dst) < src->len) {
755 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
756 		if (error)
757 			return __qdf_to_status(error);
758 	}
759 
760 	memcpy(skb_tail_pointer(dst), src->data, src->len);
761 	skb_put(dst, src->len);
762 	return __qdf_to_status(error);
763 }
764 
765 /*
766  * nbuf manipulation routines
767  */
768 /**
769  * __qdf_nbuf_headroom() - return the amount of tail space available
770  * @buf: Pointer to network buffer
771  *
772  * Return: amount of tail room
773  */
774 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
775 {
776 	return skb_headroom(skb);
777 }
778 
779 /**
780  * __qdf_nbuf_tailroom() - return the amount of tail space available
781  * @buf: Pointer to network buffer
782  *
783  * Return: amount of tail room
784  */
785 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
786 {
787 	return skb_tailroom(skb);
788 }
789 
790 /**
791  * __qdf_nbuf_put_tail() - Puts data in the end
792  * @skb: Pointer to network buffer
793  * @size: size to be pushed
794  *
795  * Return: data pointer of this buf where new data has to be
796  *         put, or NULL if there is not enough room in this buf.
797  */
798 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
799 {
800 	if (skb_tailroom(skb) < size) {
801 		if (unlikely(pskb_expand_head(skb, 0,
802 			size - skb_tailroom(skb), GFP_ATOMIC))) {
803 			dev_kfree_skb_any(skb);
804 			return NULL;
805 		}
806 	}
807 	return skb_put(skb, size);
808 }
809 
810 /**
811  * __qdf_nbuf_trim_tail() - trim data out from the end
812  * @skb: Pointer to network buffer
813  * @size: size to be popped
814  *
815  * Return: none
816  */
817 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
818 {
819 	return skb_trim(skb, skb->len - size);
820 }
821 
822 
823 /*
824  * prototypes. Implemented in qdf_nbuf.c
825  */
826 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
827 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
828 				qdf_nbuf_rx_cksum_t *cksum);
829 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
830 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
831 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
832 void __qdf_nbuf_ref(struct sk_buff *skb);
833 int __qdf_nbuf_shared(struct sk_buff *skb);
834 
835 /*
836  * qdf_nbuf_pool_delete() implementation - do nothing in linux
837  */
838 #define __qdf_nbuf_pool_delete(osdev)
839 
840 /**
841  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
842  * @skb: Pointer to network buffer
843  *
844  * if GFP_ATOMIC is overkill then we can check whether its
845  * called from interrupt context and then do it or else in
846  * normal case use GFP_KERNEL
847  *
848  * example     use "in_irq() || irqs_disabled()"
849  *
850  * Return: cloned skb
851  */
852 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
853 {
854 	struct sk_buff *skb_new = NULL;
855 
856 	skb_new = skb_clone(skb, GFP_ATOMIC);
857 	if (skb_new)
858 		__qdf_nbuf_count_inc(skb_new);
859 
860 	return skb_new;
861 }
862 
863 /**
864  * __qdf_nbuf_copy() - returns a private copy of the skb
865  * @skb: Pointer to network buffer
866  *
867  * This API returns a private copy of the skb, the skb returned is completely
868  *  modifiable by callers
869  *
870  * Return: skb or NULL
871  */
872 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
873 {
874 	struct sk_buff *skb_new = NULL;
875 
876 	skb_new = skb_copy(skb, GFP_ATOMIC);
877 	if (skb_new)
878 		__qdf_nbuf_count_inc(skb_new);
879 
880 	return skb_new;
881 }
882 
883 #define __qdf_nbuf_reserve      skb_reserve
884 
885 
886 /**
887  * __qdf_nbuf_head() - return the pointer the skb's head pointer
888  * @skb: Pointer to network buffer
889  *
890  * Return: Pointer to head buffer
891  */
892 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
893 {
894 	return skb->head;
895 }
896 
897 /**
898  * __qdf_nbuf_data() - return the pointer to data header in the skb
899  * @skb: Pointer to network buffer
900  *
901  * Return: Pointer to skb data
902  */
903 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
904 {
905 	return skb->data;
906 }
907 
908 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
909 {
910 	return (uint8_t *)&skb->data;
911 }
912 
913 /**
914  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
915  * @skb: Pointer to network buffer
916  *
917  * Return: skb protocol
918  */
919 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
920 {
921 	return skb->protocol;
922 }
923 
924 /**
925  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
926  * @skb: Pointer to network buffer
927  *
928  * Return: skb ip_summed
929  */
930 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
931 {
932 	return skb->ip_summed;
933 }
934 
935 /**
936  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
937  * @skb: Pointer to network buffer
938  * @ip_summed: ip checksum
939  *
940  * Return: none
941  */
942 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
943 		 uint8_t ip_summed)
944 {
945 	skb->ip_summed = ip_summed;
946 }
947 
948 /**
949  * __qdf_nbuf_get_priority() - return the priority value of the skb
950  * @skb: Pointer to network buffer
951  *
952  * Return: skb priority
953  */
954 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
955 {
956 	return skb->priority;
957 }
958 
959 /**
960  * __qdf_nbuf_set_priority() - sets the priority value of the skb
961  * @skb: Pointer to network buffer
962  * @p: priority
963  *
964  * Return: none
965  */
966 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
967 {
968 	skb->priority = p;
969 }
970 
971 /**
972  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
973  * @skb: Current skb
974  * @next_skb: Next skb
975  *
976  * Return: void
977  */
978 static inline void
979 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
980 {
981 	skb->next = skb_next;
982 }
983 
984 /**
985  * __qdf_nbuf_next() - return the next skb pointer of the current skb
986  * @skb: Current skb
987  *
988  * Return: the next skb pointed to by the current skb
989  */
990 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
991 {
992 	return skb->next;
993 }
994 
995 /**
996  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
997  * @skb: Current skb
998  * @next_skb: Next skb
999  *
1000  * This fn is used to link up extensions to the head skb. Does not handle
1001  * linking to the head
1002  *
1003  * Return: none
1004  */
1005 static inline void
1006 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1007 {
1008 	skb->next = skb_next;
1009 }
1010 
1011 /**
1012  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1013  * @skb: Current skb
1014  *
1015  * Return: the next skb pointed to by the current skb
1016  */
1017 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1018 {
1019 	return skb->next;
1020 }
1021 
1022 /**
1023  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1024  * @skb_head: head_buf nbuf holding head segment (single)
1025  * @ext_list: nbuf list holding linked extensions to the head
1026  * @ext_len: Total length of all buffers in the extension list
1027  *
1028  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1029  * to the nbuf holding the head segment (seg0)
1030  *
1031  * Return: none
1032  */
1033 static inline void
1034 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1035 			struct sk_buff *ext_list, size_t ext_len)
1036 {
1037 	skb_shinfo(skb_head)->frag_list = ext_list;
1038 	skb_head->data_len = ext_len;
1039 	skb_head->len += skb_head->data_len;
1040 }
1041 
1042 /**
1043  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1044  * @head_buf: Network buf holding head segment (single)
1045  *
1046  * This ext_list is populated when we have Jumbo packet, for example in case of
1047  * monitor mode amsdu packet reception, and are stiched using frags_list.
1048  *
1049  * Return: Network buf list holding linked extensions from head buf.
1050  */
1051 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1052 {
1053 	return (skb_shinfo(head_buf)->frag_list);
1054 }
1055 
1056 /**
1057  * __qdf_nbuf_get_age() - return the checksum value of the skb
1058  * @skb: Pointer to network buffer
1059  *
1060  * Return: checksum value
1061  */
1062 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1063 {
1064 	return skb->csum;
1065 }
1066 
1067 /**
1068  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1069  * @skb: Pointer to network buffer
1070  * @v: Value
1071  *
1072  * Return: none
1073  */
1074 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1075 {
1076 	skb->csum = v;
1077 }
1078 
1079 /**
1080  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1081  * @skb: Pointer to network buffer
1082  * @adj: Adjustment value
1083  *
1084  * Return: none
1085  */
1086 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1087 {
1088 	skb->csum -= adj;
1089 }
1090 
1091 /**
1092  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1093  * @skb: Pointer to network buffer
1094  * @offset: Offset value
1095  * @len: Length
1096  * @to: Destination pointer
1097  *
1098  * Return: length of the copy bits for skb
1099  */
1100 static inline int32_t
1101 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1102 {
1103 	return skb_copy_bits(skb, offset, to, len);
1104 }
1105 
1106 /**
1107  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1108  * @skb: Pointer to network buffer
1109  * @len:  Packet length
1110  *
1111  * Return: none
1112  */
1113 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1114 {
1115 	if (skb->len > len) {
1116 		skb_trim(skb, len);
1117 	} else {
1118 		if (skb_tailroom(skb) < len - skb->len) {
1119 			if (unlikely(pskb_expand_head(skb, 0,
1120 				len - skb->len - skb_tailroom(skb),
1121 				GFP_ATOMIC))) {
1122 				dev_kfree_skb_any(skb);
1123 				qdf_assert(0);
1124 			}
1125 		}
1126 		skb_put(skb, (len - skb->len));
1127 	}
1128 }
1129 
1130 /**
1131  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1132  * @skb: Pointer to network buffer
1133  * @protocol: Protocol type
1134  *
1135  * Return: none
1136  */
1137 static inline void
1138 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1139 {
1140 	skb->protocol = protocol;
1141 }
1142 
1143 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1144 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1145 
1146 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1147 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1148 
1149 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1150 				      uint32_t *lo, uint32_t *hi);
1151 
1152 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1153 	struct qdf_tso_info_t *tso_info);
1154 
1155 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1156 			  struct qdf_tso_seg_elem_t *tso_seg,
1157 			  bool is_last_seg);
1158 
1159 #ifdef FEATURE_TSO
1160 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1161 
1162 #else
1163 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1164 {
1165 	return 0;
1166 }
1167 
1168 #endif /* FEATURE_TSO */
1169 
1170 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1171 {
1172 	if (skb_is_gso(skb) &&
1173 		(skb_is_gso_v6(skb) ||
1174 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1175 		return true;
1176 	else
1177 		return false;
1178 }
1179 
1180 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1181 
1182 int __qdf_nbuf_get_users(struct sk_buff *skb);
1183 
1184 /**
1185  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1186  *			      and get hw_classify by peeking
1187  *			      into packet
1188  * @nbuf:		Network buffer (skb on Linux)
1189  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1190  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1191  *			needs to be set in case of CE classification support
1192  *			Is set by this macro.
1193  * @hw_classify:	This is a flag which is set to indicate
1194  *			CE classification is enabled.
1195  *			Do not set this bit for VLAN packets
1196  *			OR for mcast / bcast frames.
1197  *
1198  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1199  * whether to enable tx_classify bit in CE.
1200  *
1201  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1202  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1203  * it is the length and a 802.3 frame else it is Ethernet Type II
1204  * (RFC 894).
1205  * Bit 4 in pkt_subtype is the tx_classify bit
1206  *
1207  * Return:	void
1208  */
1209 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1210 				pkt_subtype, hw_classify)	\
1211 do {								\
1212 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1213 	uint16_t ether_type = ntohs(eh->h_proto);		\
1214 	bool is_mc_bc;						\
1215 								\
1216 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1217 		   is_multicast_ether_addr((uint8_t *)eh);	\
1218 								\
1219 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1220 		hw_classify = 1;				\
1221 		pkt_subtype = 0x01 <<				\
1222 			HTT_TX_CLASSIFY_BIT_S;			\
1223 	}							\
1224 								\
1225 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1226 		pkt_type = htt_pkt_type_ethernet;		\
1227 								\
1228 } while (0)
1229 
1230 /**
1231  * nbuf private buffer routines
1232  */
1233 
1234 /**
1235  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1236  * @skb: Pointer to network buffer
1237  * @addr: Pointer to store header's addr
1238  * @m_len: network buffer length
1239  *
1240  * Return: none
1241  */
1242 static inline void
1243 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1244 {
1245 	*addr = skb->data;
1246 	*len = skb->len;
1247 }
1248 
1249 /**
1250  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1251  * @head: Head pointer
1252  * @tail: Tail pointer
1253  * @qlen: Queue length
1254  */
1255 typedef struct __qdf_nbuf_qhead {
1256 	struct sk_buff *head;
1257 	struct sk_buff *tail;
1258 	unsigned int qlen;
1259 } __qdf_nbuf_queue_t;
1260 
1261 /******************Functions *************/
1262 
1263 /**
1264  * __qdf_nbuf_queue_init() - initiallize the queue head
1265  * @qhead: Queue head
1266  *
1267  * Return: QDF status
1268  */
1269 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1270 {
1271 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1272 	return QDF_STATUS_SUCCESS;
1273 }
1274 
1275 /**
1276  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1277  * @qhead: Queue head
1278  * @skb: Pointer to network buffer
1279  *
1280  * This is a lockless version, driver must acquire locks if it
1281  * needs to synchronize
1282  *
1283  * Return: none
1284  */
1285 static inline void
1286 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1287 {
1288 	skb->next = NULL;       /*Nullify the next ptr */
1289 
1290 	if (!qhead->head)
1291 		qhead->head = skb;
1292 	else
1293 		qhead->tail->next = skb;
1294 
1295 	qhead->tail = skb;
1296 	qhead->qlen++;
1297 }
1298 
1299 /**
1300  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1301  * @dest: target netbuf queue
1302  * @src:  source netbuf queue
1303  *
1304  * Return: target netbuf queue
1305  */
1306 static inline __qdf_nbuf_queue_t *
1307 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1308 {
1309 	if (!dest)
1310 		return NULL;
1311 	else if (!src || !(src->head))
1312 		return dest;
1313 
1314 	if (!(dest->head))
1315 		dest->head = src->head;
1316 	else
1317 		dest->tail->next = src->head;
1318 
1319 	dest->tail = src->tail;
1320 	dest->qlen += src->qlen;
1321 	return dest;
1322 }
1323 
1324 /**
1325  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1326  * @qhead: Queue head
1327  * @skb: Pointer to network buffer
1328  *
1329  * This is a lockless version, driver must acquire locks if it needs to
1330  * synchronize
1331  *
1332  * Return: none
1333  */
1334 static inline void
1335 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1336 {
1337 	if (!qhead->head) {
1338 		/*Empty queue Tail pointer Must be updated */
1339 		qhead->tail = skb;
1340 	}
1341 	skb->next = qhead->head;
1342 	qhead->head = skb;
1343 	qhead->qlen++;
1344 }
1345 
1346 /**
1347  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1348  * @qhead: Queue head
1349  *
1350  * This is a lockless version. Driver should take care of the locks
1351  *
1352  * Return: skb or NULL
1353  */
1354 static inline
1355 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1356 {
1357 	__qdf_nbuf_t tmp = NULL;
1358 
1359 	if (qhead->head) {
1360 		qhead->qlen--;
1361 		tmp = qhead->head;
1362 		if (qhead->head == qhead->tail) {
1363 			qhead->head = NULL;
1364 			qhead->tail = NULL;
1365 		} else {
1366 			qhead->head = tmp->next;
1367 		}
1368 		tmp->next = NULL;
1369 	}
1370 	return tmp;
1371 }
1372 
1373 /**
1374  * __qdf_nbuf_queue_free() - free a queue
1375  * @qhead: head of queue
1376  *
1377  * Return: QDF status
1378  */
1379 static inline QDF_STATUS
1380 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1381 {
1382 	__qdf_nbuf_t  buf = NULL;
1383 
1384 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1385 		__qdf_nbuf_free(buf);
1386 	return QDF_STATUS_SUCCESS;
1387 }
1388 
1389 
1390 /**
1391  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1392  * @qhead: head of queue
1393  *
1394  * Return: NULL if the queue is empty
1395  */
1396 static inline struct sk_buff *
1397 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1398 {
1399 	return qhead->head;
1400 }
1401 
1402 /**
1403  * __qdf_nbuf_queue_len() - return the queue length
1404  * @qhead: Queue head
1405  *
1406  * Return: Queue length
1407  */
1408 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1409 {
1410 	return qhead->qlen;
1411 }
1412 
1413 /**
1414  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1415  * @skb: Pointer to network buffer
1416  *
1417  * This API returns the next skb from packet chain, remember the skb is
1418  * still in the queue
1419  *
1420  * Return: NULL if no packets are there
1421  */
1422 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1423 {
1424 	return skb->next;
1425 }
1426 
1427 /**
1428  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1429  * @qhead: Queue head
1430  *
1431  * Return: true if length is 0 else false
1432  */
1433 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1434 {
1435 	return qhead->qlen == 0;
1436 }
1437 
1438 /*
1439  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1440  * Because the queue head will most likely put in some structure,
1441  * we don't use pointer type as the definition.
1442  */
1443 
1444 /*
1445  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1446  * Because the queue head will most likely put in some structure,
1447  * we don't use pointer type as the definition.
1448  */
1449 
1450 static inline void
1451 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1452 {
1453 }
1454 
1455 /**
1456  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1457  *        expands the headroom
1458  *        in the data region. In case of failure the skb is released.
1459  * @skb: sk buff
1460  * @headroom: size of headroom
1461  *
1462  * Return: skb or NULL
1463  */
1464 static inline struct sk_buff *
1465 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1466 {
1467 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1468 		dev_kfree_skb_any(skb);
1469 		skb = NULL;
1470 	}
1471 	return skb;
1472 }
1473 
1474 /**
1475  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1476  *        exapnds the tailroom
1477  *        in data region. In case of failure it releases the skb.
1478  * @skb: sk buff
1479  * @tailroom: size of tailroom
1480  *
1481  * Return: skb or NULL
1482  */
1483 static inline struct sk_buff *
1484 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1485 {
1486 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1487 		return skb;
1488 	/**
1489 	 * unlikely path
1490 	 */
1491 	dev_kfree_skb_any(skb);
1492 	return NULL;
1493 }
1494 
1495 /**
1496  * __qdf_nbuf_linearize() - skb linearize
1497  * @skb: sk buff
1498  *
1499  * create a version of the specified nbuf whose contents
1500  * can be safely modified without affecting other
1501  * users.If the nbuf is non-linear then this function
1502  * linearize. if unable to linearize returns -ENOMEM on
1503  * success 0 is returned
1504  *
1505  * Return: 0 on Success, -ENOMEM on failure is returned.
1506  */
1507 static inline int
1508 __qdf_nbuf_linearize(struct sk_buff *skb)
1509 {
1510 	return skb_linearize(skb);
1511 }
1512 
1513 /**
1514  * __qdf_nbuf_unshare() - skb unshare
1515  * @skb: sk buff
1516  *
1517  * create a version of the specified nbuf whose contents
1518  * can be safely modified without affecting other
1519  * users.If the nbuf is a clone then this function
1520  * creates a new copy of the data. If the buffer is not
1521  * a clone the original buffer is returned.
1522  *
1523  * Return: skb or NULL
1524  */
1525 static inline struct sk_buff *
1526 __qdf_nbuf_unshare(struct sk_buff *skb)
1527 {
1528 	return skb_unshare(skb, GFP_ATOMIC);
1529 }
1530 
1531 /**
1532  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1533  *@buf: sk buff
1534  *
1535  * Return: true/false
1536  */
1537 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1538 {
1539 	return skb_cloned(skb);
1540 }
1541 
1542 /**
1543  * __qdf_nbuf_pool_init() - init pool
1544  * @net: net handle
1545  *
1546  * Return: QDF status
1547  */
1548 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1549 {
1550 	return QDF_STATUS_SUCCESS;
1551 }
1552 
1553 /*
1554  * adf_nbuf_pool_delete() implementation - do nothing in linux
1555  */
1556 #define __qdf_nbuf_pool_delete(osdev)
1557 
1558 /**
1559  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1560  *        release the skb.
1561  * @skb: sk buff
1562  * @headroom: size of headroom
1563  * @tailroom: size of tailroom
1564  *
1565  * Return: skb or NULL
1566  */
1567 static inline struct sk_buff *
1568 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1569 {
1570 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1571 		return skb;
1572 
1573 	dev_kfree_skb_any(skb);
1574 	return NULL;
1575 }
1576 
1577 /**
1578  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1579  *
1580  * Return: true/false
1581  */
1582 static inline bool
1583 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1584 			 uint8_t **where)
1585 {
1586 	qdf_assert(0);
1587 	return false;
1588 }
1589 
1590 /**
1591  * __qdf_nbuf_reset_ctxt() - mem zero control block
1592  * @nbuf: buffer
1593  *
1594  * Return: none
1595  */
1596 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1597 {
1598 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1599 }
1600 
1601 /**
1602  * __qdf_nbuf_network_header() - get network header
1603  * @buf: buffer
1604  *
1605  * Return: network header pointer
1606  */
1607 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1608 {
1609 	return skb_network_header(buf);
1610 }
1611 
1612 /**
1613  * __qdf_nbuf_transport_header() - get transport header
1614  * @buf: buffer
1615  *
1616  * Return: transport header pointer
1617  */
1618 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1619 {
1620 	return skb_transport_header(buf);
1621 }
1622 
1623 /**
1624  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1625  *  passed as part of network buffer by network stack
1626  * @skb: sk buff
1627  *
1628  * Return: TCP MSS size
1629  *
1630  */
1631 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1632 {
1633 	return skb_shinfo(skb)->gso_size;
1634 }
1635 
1636 /**
1637  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1638  * @nbuf: sk buff
1639  *
1640  * Return: none
1641  */
1642 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1643 
1644 /*
1645  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1646  * @nbuf: sk buff
1647  *
1648  * Return: void ptr
1649  */
1650 static inline void *
1651 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1652 {
1653 	return (void *)nbuf->cb;
1654 }
1655 
1656 /**
1657  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1658  * @skb: sk buff
1659  *
1660  * Return: head size
1661  */
1662 static inline size_t
1663 __qdf_nbuf_headlen(struct sk_buff *skb)
1664 {
1665 	return skb_headlen(skb);
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1670  * @skb: sk buff
1671  *
1672  * Return: number of fragments
1673  */
1674 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1675 {
1676 	return skb_shinfo(skb)->nr_frags;
1677 }
1678 
1679 /**
1680  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1681  * @buf: sk buff
1682  *
1683  * Return: true/false
1684  */
1685 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1686 {
1687 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1688 }
1689 
1690 /**
1691  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1692  * @buf: sk buff
1693  *
1694  * Return: true/false
1695  */
1696 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1697 {
1698 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1699 }
1700 
1701 /**
1702  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1703  * @skb: sk buff
1704  *
1705  * Return: size of l2+l3+l4 header length
1706  */
1707 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1708 {
1709 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1710 }
1711 
1712 /**
1713  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1714  * @buf: sk buff
1715  *
1716  * Return:  true/false
1717  */
1718 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1719 {
1720 	if (skb_is_nonlinear(skb))
1721 		return true;
1722 	else
1723 		return false;
1724 }
1725 
1726 /**
1727  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1728  * @buf: sk buff
1729  *
1730  * Return: TCP sequence number
1731  */
1732 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1733 {
1734 	return ntohl(tcp_hdr(skb)->seq);
1735 }
1736 
1737 /**
1738  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1739  *@buf: sk buff
1740  *
1741  * Return: data pointer to typecast into your priv structure
1742  */
1743 static inline uint8_t *
1744 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1745 {
1746 	return &skb->cb[8];
1747 }
1748 
1749 /**
1750  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1751  * @buf: Pointer to nbuf
1752  *
1753  * Return: None
1754  */
1755 static inline void
1756 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1757 {
1758 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1759 }
1760 
1761 /**
1762  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
1763  *
1764  * @buf: sk buff
1765  * @queue_id: Queue id
1766  *
1767  * Return: void
1768  */
1769 static inline void
1770 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
1771 {
1772 	skb_record_rx_queue(skb, queue_id);
1773 }
1774 
1775 /**
1776  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
1777  *
1778  * @buf: sk buff
1779  *
1780  * Return: Queue mapping
1781  */
1782 static inline uint16_t
1783 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
1784 {
1785 	return skb->queue_mapping;
1786 }
1787 
1788 /**
1789  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
1790  *
1791  * @buf: sk buff
1792  *
1793  * Return: void
1794  */
1795 static inline void
1796 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
1797 {
1798 	__net_timestamp(skb);
1799 }
1800 
1801 /**
1802  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
1803  *
1804  * @buf: sk buff
1805  *
1806  * Return: time difference in ms
1807  */
1808 static inline uint64_t
1809 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
1810 {
1811 	return ktime_to_ms(net_timedelta(skb->tstamp));
1812 }
1813 
1814 /**
1815  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
1816  *
1817  * @buf: sk buff
1818  *
1819  * Return: time difference in micro seconds
1820  */
1821 static inline uint64_t
1822 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
1823 {
1824 	return ktime_to_us(net_timedelta(skb->tstamp));
1825 }
1826 
1827 /**
1828  * __qdf_nbuf_orphan() - orphan a nbuf
1829  * @skb: sk buff
1830  *
1831  * If a buffer currently has an owner then we call the
1832  * owner's destructor function
1833  *
1834  * Return: void
1835  */
1836 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
1837 {
1838 	return skb_orphan(skb);
1839 }
1840 #ifdef CONFIG_WIN
1841 #include <i_qdf_nbuf_w.h>
1842 #else
1843 #include <i_qdf_nbuf_m.h>
1844 #endif
1845 #endif /*_I_QDF_NET_BUF_H */
1846