xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <asm/cacheflush.h>
32 #include <qdf_types.h>
33 #include <qdf_net_types.h>
34 #include <qdf_status.h>
35 #include <qdf_util.h>
36 #include <qdf_mem.h>
37 #include <linux/tcp.h>
38 #include <qdf_util.h>
39 #include <qdf_nbuf.h>
40 
41 /*
42  * Use socket buffer as the underlying implentation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
49 
50 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
51  * max tx fragments added by the driver
52  * The driver will always add one tx fragment (the tx descriptor)
53  */
54 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
55 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
56 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
57 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
58 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
59 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
60 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
61 
62 
63 /* mark the first packet after wow wakeup */
64 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
65 
66 /*
67  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
68  */
69 typedef union {
70 	uint64_t       u64;
71 	qdf_dma_addr_t dma_addr;
72 } qdf_paddr_t;
73 
74 /**
75  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
76  *                    - data passed between layers of the driver.
77  *
78  * Notes:
79  *   1. Hard limited to 48 bytes. Please count your bytes
80  *   2. The size of this structure has to be easily calculatable and
81  *      consistently so: do not use any conditional compile flags
82  *   3. Split into a common part followed by a tx/rx overlay
83  *   4. There is only one extra frag, which represents the HTC/HTT header
84  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
85  *      for the priv_cb_w since it must be at same offset for both
86  *      TX and RX union
87  *
88  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
89  *
90  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
91  * @rx.dev.priv_cb_w.fctx      : ctx to handle special pkts defined by ftype
92  * @rx.dev.priv_cb_w.reserved1 : reserved
93  * @rx.dev.priv_cb_w.reserved2 : reserved
94  *
95  * @rx.dev.priv_cb_m.tcp_seq_num     : TCP sequence number
96  * @rx.dev.priv_cb_m.tcp_ack_num     : TCP ACK number
97  * @rx.dev.priv_cb_m.lro_ctx         : LRO context
98  * @rx.dev.priv_cb_m.map_index       :
99  * @rx.dev.priv_cb_m.reserved        : reserved
100  *
101  * @rx.lro_eligible        : flag to indicate whether the MSDU is LRO eligible
102  * @rx.peer_cached_buf_frm : peer cached buffer
103  * @rx.tcp_proto           : L4 protocol is TCP
104  * @rx.tcp_pure_ack        : A TCP ACK packet with no payload
105  * @rx.ipv6_proto          : L3 protocol is IPV6
106  * @rx.ip_offset           : offset to IP header
107  * @rx.tcp_offset          : offset to TCP header
108  * @rx_ctx_id              : Rx context id
109  *
110  * @rx.tcp_udp_chksum  : L4 payload checksum
111  * @rx.tcp_wim         : TCP window size
112  *
113  * @rx.flow_id         : 32bit flow id
114  *
115  * @rx.flag_chfrag_start : first MSDU in an AMSDU
116  * @rx.flag_chfrag_cont  : middle or part of MSDU in an AMSDU
117  * @rx.flag_chfrag_end   : last MSDU in an AMSDU
118  * @rx.rsrvd             : reserved
119  *
120  * @rx.trace       : combined structure for DP and protocol trace
121  * @rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
122  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
123  * @rx.trace.dp_trace       : flag (Datapath trace)
124  * @rx.trace.rsrvd          : enable packet logging
125  *
126  * @rx.ftype              : mcast2ucast, TSO, SG, MESH
127  * @rx.reserved           : reserved
128  *
129  * @tx.dev.priv_cb_w.fctx       : ctx to handle special pkts defined by ftype
130  * @tx.dev.priv_cb_w.ext_cb_ptr : extended cb pointer
131  *
132  * @tx.dev.priv_cb_w.data_attr : value that is programmed in CE descr, includes
133  *                 + (1) CE classification enablement bit
134  *                 + (2) packet type (802.3 or Ethernet type II)
135  *                 + (3) packet offset (usually length of HTC/HTT descr)
136  * @tx.dev.priv_cb_m.ipa.owned : packet owned by IPA
137  * @tx.dev.priv_cb_m.ipa.priv  : private data, used by IPA
138  * @tx.dev.priv_cb_m.desc_id   : tx desc id, used to sync between host and fw
139  * @tx.dev.priv_cb_m.mgmt_desc_id  : mgmt descriptor for tx completion cb
140  * @tx.dev.priv_cb_m.reserved  : reserved
141  *
142  * @tx.ftype             : mcast2ucast, TSO, SG, MESH
143  * @tx.vdev_id           : vdev (for protocol trace)
144  * @tx.len               : length of efrag pointed by the above pointers
145  *
146  * @tx.flags.bits.flag_efrag  : flag, efrag payload to be swapped (wordstream)
147  * @tx.flags.bits.num         : number of extra frags ( 0 or 1)
148  * @tx.flags.bits.nbuf        : flag, nbuf payload to be swapped (wordstream)
149  * @tx.flags.bits.flag_chfrag_start : first MSDU in an AMSDU
150  * @tx.flags.bits.flag_chfrag_cont  : middle or part of MSDU in an AMSDU
151  * @tx.flags.bits.flag_chfrag_end   : last MSDU in an AMSDU
152  * @tx.flags.bits.flag_ext_header   : extended flags
153  * @tx.flags.bits.reserved          : reserved
154  * @tx.trace       : combined structure for DP and protocol trace
155  * @tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
156  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
157  * @tx.trace.is_packet_priv :
158  * @tx.trace.packet_track   : {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
159  * @tx.trace.proto_type     : bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
160  *                          + (MGMT_ACTION)] - 4 bits
161  * @tx.trace.dp_trace       : flag (Datapath trace)
162  * @tx.trace.is_bcast       : flag (Broadcast packet)
163  * @tx.trace.is_mcast       : flag (Multicast packet)
164  * @tx.trace.packet_type    : flag (Packet type)
165  * @tx.trace.htt2_frm       : flag (high-latency path only)
166  * @tx.trace.print          : enable packet logging
167  *
168  * @tx.vaddr             : virtual address of ~
169  * @tx.paddr             : physical/DMA address of ~
170  */
171 
172 struct qdf_nbuf_cb {
173 	/* common */
174 	qdf_paddr_t paddr; /* of skb->data */
175 	/* valid only in one direction */
176 	union {
177 		/* Note: MAX: 40 bytes */
178 		struct {
179 			union {
180 				struct {
181 					void *ext_cb_ptr;
182 					void *fctx;
183 					uint32_t reserved1;
184 					uint32_t reserved2;
185 				} priv_cb_w;
186 				struct {
187 					uint32_t tcp_seq_num;
188 					uint32_t tcp_ack_num;
189 					unsigned char *lro_ctx;
190 					uint32_t map_index;
191 					uint32_t reserved;
192 				} priv_cb_m;
193 			} dev;
194 			uint32_t lro_eligible:1,
195 				peer_cached_buf_frm:1,
196 				tcp_proto:1,
197 				tcp_pure_ack:1,
198 				ipv6_proto:1,
199 				ip_offset:7,
200 				tcp_offset:7,
201 				rx_ctx_id:4;
202 			uint32_t tcp_udp_chksum:16,
203 				tcp_win:16;
204 			uint32_t flow_id;
205 			uint8_t flag_chfrag_start:1,
206 				flag_chfrag_cont:1,
207 				flag_chfrag_end:1,
208 				rsrvd:5;
209 			union {
210 				uint8_t packet_state;
211 				uint8_t dp_trace:1,
212 					rsrvd:1;
213 			} trace;
214 			uint8_t ftype;
215 			uint8_t reserved;
216 		} rx;
217 
218 		/* Note: MAX: 40 bytes */
219 		struct {
220 			union {
221 				struct {
222 					void *ext_cb_ptr;
223 					void *fctx;
224 				} priv_cb_w;
225 				struct {
226 					uint32_t data_attr;
227 					struct {
228 						uint32_t owned:1,
229 							priv:31;
230 					} ipa;
231 					uint16_t desc_id;
232 					uint16_t mgmt_desc_id;
233 					uint32_t reserved;
234 				} priv_cb_m;
235 			} dev;
236 			uint8_t ftype;
237 			uint8_t vdev_id;
238 			uint16_t len;
239 			union {
240 				struct {
241 					uint8_t flag_efrag:1,
242 						flag_nbuf:1,
243 						num:1,
244 						flag_chfrag_start:1,
245 						flag_chfrag_cont:1,
246 						flag_chfrag_end:1,
247 						flag_ext_header:1,
248 						reserved:1;
249 				} bits;
250 				uint8_t u8;
251 			} flags;
252 			struct {
253 				uint8_t packet_state:7,
254 					is_packet_priv:1;
255 				uint8_t packet_track:4,
256 					proto_type:4;
257 				uint8_t dp_trace:1,
258 					is_bcast:1,
259 					is_mcast:1,
260 					packet_type:3,
261 					/* used only for hl*/
262 					htt2_frm:1,
263 					print:1;
264 			} trace;
265 			unsigned char *vaddr;
266 			qdf_paddr_t paddr;
267 		} tx;
268 	} u;
269 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
270 
271 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
272 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
273 
274 /**
275  *  access macros to qdf_nbuf_cb
276  *  Note: These macros can be used as L-values as well as R-values.
277  *        When used as R-values, they effectively function as "get" macros
278  *        When used as L_values, they effectively function as "set" macros
279  */
280 
281 #define QDF_NBUF_CB_PADDR(skb) \
282 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
283 
284 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
285 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
286 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
287 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
288 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
289 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
290 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
291 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
292 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
293 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
294 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
295 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
296 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
297 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
298 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
299 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
300 
301 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
302 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
303 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
304 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
305 
306 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
307 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
308 
309 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
310 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
311 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
312 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
313 
314 #define QDF_NBUF_CB_RX_FTYPE(skb) \
315 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
316 
317 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
318 	(((struct qdf_nbuf_cb *) \
319 	((skb)->cb))->u.rx.flag_chfrag_start)
320 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
321 	(((struct qdf_nbuf_cb *) \
322 	((skb)->cb))->u.rx.flag_chfrag_cont)
323 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
324 		(((struct qdf_nbuf_cb *) \
325 		((skb)->cb))->u.rx.flag_chfrag_end)
326 
327 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
328 	qdf_nbuf_set_state(skb, PACKET_STATE)
329 
330 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
331 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
332 
333 #define QDF_NBUF_CB_TX_FTYPE(skb) \
334 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
335 
336 
337 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
338 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
339 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
340 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
341 
342 /* Tx Flags Accessor Macros*/
343 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
344 	(((struct qdf_nbuf_cb *) \
345 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
346 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
347 	(((struct qdf_nbuf_cb *) \
348 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
349 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
351 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
352 	(((struct qdf_nbuf_cb *) \
353 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
354 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
355 	(((struct qdf_nbuf_cb *) \
356 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
357 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
358 		(((struct qdf_nbuf_cb *) \
359 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
360 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
361 		(((struct qdf_nbuf_cb *) \
362 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
363 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
364 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
365 /* End of Tx Flags Accessor Macros */
366 
367 /* Tx trace accessor macros */
368 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
369 	(((struct qdf_nbuf_cb *) \
370 		((skb)->cb))->u.tx.trace.packet_state)
371 
372 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
373 	(((struct qdf_nbuf_cb *) \
374 		((skb)->cb))->u.tx.trace.is_packet_priv)
375 
376 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
377 	(((struct qdf_nbuf_cb *) \
378 		((skb)->cb))->u.tx.trace.packet_track)
379 
380 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
381 	(((struct qdf_nbuf_cb *) \
382 		((skb)->cb))->u.tx.trace.proto_type)
383 
384 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
385 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
386 
387 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
388 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
389 
390 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
391 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
392 
393 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
394 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
395 
396 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
398 
399 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
400 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
401 
402 #define QDF_NBUF_CB_SET_BCAST(skb) \
403 	(((struct qdf_nbuf_cb *) \
404 		((skb)->cb))->u.tx.trace.is_bcast = true)
405 
406 #define QDF_NBUF_CB_SET_MCAST(skb) \
407 	(((struct qdf_nbuf_cb *) \
408 		((skb)->cb))->u.tx.trace.is_mcast = true)
409 /* End of Tx trace accessor macros */
410 
411 
412 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
413 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
414 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
415 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
416 
417 /* assume the OS provides a single fragment */
418 #define __qdf_nbuf_get_num_frags(skb)		   \
419 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
420 
421 #define __qdf_nbuf_reset_num_frags(skb) \
422 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
423 
424 /**
425  *   end of nbuf->cb access macros
426  */
427 
428 typedef void (*qdf_nbuf_trace_update_t)(char *);
429 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
430 
431 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
432 
433 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
434 	(QDF_NBUF_CB_PADDR(skb) = paddr)
435 
436 #define __qdf_nbuf_frag_push_head(					\
437 	skb, frag_len, frag_vaddr, frag_paddr)				\
438 	do {					\
439 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
440 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
441 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
442 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
443 	} while (0)
444 
445 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
446 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
447 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
448 
449 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
450 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
451 
452 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
453 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
454 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
455 	 /* assume that the OS only provides a single fragment */	\
456 	 QDF_NBUF_CB_PADDR(skb))
457 
458 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
459 
460 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
461 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
462 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
463 
464 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
465 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
466 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
467 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
468 
469 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
470 	do {								\
471 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
472 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
473 		if (frag_num)						\
474 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
475 							      is_wstrm; \
476 		else					\
477 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
478 							      is_wstrm; \
479 	} while (0)
480 
481 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
482 	do { \
483 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
484 	} while (0)
485 
486 #define __qdf_nbuf_get_vdev_ctx(skb) \
487 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
488 
489 #define __qdf_nbuf_set_tx_ftype(skb, type) \
490 	do { \
491 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
492 	} while (0)
493 
494 #define __qdf_nbuf_get_tx_ftype(skb) \
495 		 QDF_NBUF_CB_TX_FTYPE((skb))
496 
497 
498 #define __qdf_nbuf_set_rx_ftype(skb, type) \
499 	do { \
500 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
501 	} while (0)
502 
503 #define __qdf_nbuf_get_rx_ftype(skb) \
504 		 QDF_NBUF_CB_RX_FTYPE((skb))
505 
506 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
507 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
508 
509 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
510 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
511 
512 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
513 	do { \
514 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
515 	} while (0)
516 
517 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
518 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
519 
520 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
521 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
522 
523 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
524 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
525 
526 
527 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
528 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
529 
530 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
531 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
532 
533 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
534 	do { \
535 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
536 	} while (0)
537 
538 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
539 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
540 
541 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
542 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
543 
544 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
545 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
546 
547 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
548 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
549 
550 #define __qdf_nbuf_trace_get_proto_type(skb) \
551 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
552 
553 #define __qdf_nbuf_data_attr_get(skb)		\
554 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
555 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
556 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
557 
558 /**
559  * __qdf_nbuf_num_frags_init() - init extra frags
560  * @skb: sk buffer
561  *
562  * Return: none
563  */
564 static inline
565 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
566 {
567 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
568 }
569 
570 typedef enum {
571 	CB_FTYPE_INVALID = 0,
572 	CB_FTYPE_MCAST2UCAST = 1,
573 	CB_FTYPE_TSO = 2,
574 	CB_FTYPE_TSO_SG = 3,
575 	CB_FTYPE_SG = 4,
576 	CB_FTYPE_INTRABSS_FWD = 5,
577 	CB_FTYPE_RX_INFO = 6,
578 	CB_FTYPE_MESH_RX_INFO = 7,
579 	CB_FTYPE_MESH_TX_INFO = 8,
580 } CB_FTYPE;
581 
582 /*
583  * prototypes. Implemented in qdf_nbuf.c
584  */
585 __qdf_nbuf_t __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
586 			int align, int prio);
587 void __qdf_nbuf_free(struct sk_buff *skb);
588 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
589 			struct sk_buff *skb, qdf_dma_dir_t dir);
590 void __qdf_nbuf_unmap(__qdf_device_t osdev,
591 			struct sk_buff *skb, qdf_dma_dir_t dir);
592 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
593 				 struct sk_buff *skb, qdf_dma_dir_t dir);
594 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
595 			struct sk_buff *skb, qdf_dma_dir_t dir);
596 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
597 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
598 
599 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
600 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
601 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
602 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
603 	qdf_dma_dir_t dir, int nbytes);
604 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
605 	qdf_dma_dir_t dir, int nbytes);
606 
607 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
608 	qdf_dma_dir_t dir);
609 
610 QDF_STATUS __qdf_nbuf_map_nbytes_single(
611 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
612 void __qdf_nbuf_unmap_nbytes_single(
613 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
614 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
615 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
616 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
617 QDF_STATUS __qdf_nbuf_frag_map(
618 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
619 	int offset, qdf_dma_dir_t dir, int cur_frag);
620 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
621 
622 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
623 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
624 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
625 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
626 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
627 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
628 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
629 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
630 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
631 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
632 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
633 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
634 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
635 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
636 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
637 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
638 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
639 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
640 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
641 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
642 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
643 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
644 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
645 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
646 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
647 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
648 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
649 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
650 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
651 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
652 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
653 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
654 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
655 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
656 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
657 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
658 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
659 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
660 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
661 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
662 
663 #ifdef QDF_NBUF_GLOBAL_COUNT
664 int __qdf_nbuf_count_get(void);
665 void __qdf_nbuf_count_inc(struct sk_buff *skb);
666 void __qdf_nbuf_count_dec(struct sk_buff *skb);
667 void __qdf_nbuf_mod_init(void);
668 void __qdf_nbuf_mod_exit(void);
669 
670 #else
671 
672 static inline int __qdf_nbuf_count_get(void)
673 {
674 	return 0;
675 }
676 
677 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
678 {
679 	return;
680 }
681 
682 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
683 {
684 	return;
685 }
686 
687 static inline void __qdf_nbuf_mod_init(void)
688 {
689 	return;
690 }
691 
692 static inline void __qdf_nbuf_mod_exit(void)
693 {
694 	return;
695 }
696 #endif
697 
698 /**
699  * __qdf_to_status() - OS to QDF status conversion
700  * @error : OS error
701  *
702  * Return: QDF status
703  */
704 static inline QDF_STATUS __qdf_to_status(signed int error)
705 {
706 	switch (error) {
707 	case 0:
708 		return QDF_STATUS_SUCCESS;
709 	case ENOMEM:
710 	case -ENOMEM:
711 		return QDF_STATUS_E_NOMEM;
712 	default:
713 		return QDF_STATUS_E_NOSUPPORT;
714 	}
715 }
716 
717 /**
718  * __qdf_nbuf_len() - return the amount of valid data in the skb
719  * @skb: Pointer to network buffer
720  *
721  * This API returns the amount of valid data in the skb, If there are frags
722  * then it returns total length.
723  *
724  * Return: network buffer length
725  */
726 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
727 {
728 	int i, extra_frag_len = 0;
729 
730 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
731 	if (i > 0)
732 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
733 
734 	return extra_frag_len + skb->len;
735 }
736 
737 /**
738  * __qdf_nbuf_cat() - link two nbufs
739  * @dst: Buffer to piggyback into
740  * @src: Buffer to put
741  *
742  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
743  * It is callers responsibility to free the src skb.
744  *
745  * Return: QDF_STATUS (status of the call) if failed the src skb
746  *         is released
747  */
748 static inline QDF_STATUS
749 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
750 {
751 	QDF_STATUS error = 0;
752 
753 	qdf_assert(dst && src);
754 
755 	/*
756 	 * Since pskb_expand_head unconditionally reallocates the skb->head
757 	 * buffer, first check whether the current buffer is already large
758 	 * enough.
759 	 */
760 	if (skb_tailroom(dst) < src->len) {
761 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
762 		if (error)
763 			return __qdf_to_status(error);
764 	}
765 
766 	memcpy(skb_tail_pointer(dst), src->data, src->len);
767 	skb_put(dst, src->len);
768 	return __qdf_to_status(error);
769 }
770 
771 /*
772  * nbuf manipulation routines
773  */
774 /**
775  * __qdf_nbuf_headroom() - return the amount of tail space available
776  * @buf: Pointer to network buffer
777  *
778  * Return: amount of tail room
779  */
780 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
781 {
782 	return skb_headroom(skb);
783 }
784 
785 /**
786  * __qdf_nbuf_tailroom() - return the amount of tail space available
787  * @buf: Pointer to network buffer
788  *
789  * Return: amount of tail room
790  */
791 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
792 {
793 	return skb_tailroom(skb);
794 }
795 
796 /**
797  * __qdf_nbuf_put_tail() - Puts data in the end
798  * @skb: Pointer to network buffer
799  * @size: size to be pushed
800  *
801  * Return: data pointer of this buf where new data has to be
802  *         put, or NULL if there is not enough room in this buf.
803  */
804 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
805 {
806 	if (skb_tailroom(skb) < size) {
807 		if (unlikely(pskb_expand_head(skb, 0,
808 			size - skb_tailroom(skb), GFP_ATOMIC))) {
809 			dev_kfree_skb_any(skb);
810 			return NULL;
811 		}
812 	}
813 	return skb_put(skb, size);
814 }
815 
816 /**
817  * __qdf_nbuf_trim_tail() - trim data out from the end
818  * @skb: Pointer to network buffer
819  * @size: size to be popped
820  *
821  * Return: none
822  */
823 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
824 {
825 	return skb_trim(skb, skb->len - size);
826 }
827 
828 
829 /*
830  * prototypes. Implemented in qdf_nbuf.c
831  */
832 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
833 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
834 				qdf_nbuf_rx_cksum_t *cksum);
835 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
836 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
837 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
838 void __qdf_nbuf_ref(struct sk_buff *skb);
839 int __qdf_nbuf_shared(struct sk_buff *skb);
840 
841 /*
842  * qdf_nbuf_pool_delete() implementation - do nothing in linux
843  */
844 #define __qdf_nbuf_pool_delete(osdev)
845 
846 /**
847  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
848  * @skb: Pointer to network buffer
849  *
850  * if GFP_ATOMIC is overkill then we can check whether its
851  * called from interrupt context and then do it or else in
852  * normal case use GFP_KERNEL
853  *
854  * example     use "in_irq() || irqs_disabled()"
855  *
856  * Return: cloned skb
857  */
858 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
859 {
860 	struct sk_buff *skb_new = NULL;
861 
862 	skb_new = skb_clone(skb, GFP_ATOMIC);
863 	if (skb_new)
864 		__qdf_nbuf_count_inc(skb_new);
865 
866 	return skb_new;
867 }
868 
869 /**
870  * __qdf_nbuf_copy() - returns a private copy of the skb
871  * @skb: Pointer to network buffer
872  *
873  * This API returns a private copy of the skb, the skb returned is completely
874  *  modifiable by callers
875  *
876  * Return: skb or NULL
877  */
878 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
879 {
880 	struct sk_buff *skb_new = NULL;
881 
882 	skb_new = skb_copy(skb, GFP_ATOMIC);
883 	if (skb_new)
884 		__qdf_nbuf_count_inc(skb_new);
885 
886 	return skb_new;
887 }
888 
889 #define __qdf_nbuf_reserve      skb_reserve
890 
891 
892 /**
893  * __qdf_nbuf_head() - return the pointer the skb's head pointer
894  * @skb: Pointer to network buffer
895  *
896  * Return: Pointer to head buffer
897  */
898 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
899 {
900 	return skb->head;
901 }
902 
903 /**
904  * __qdf_nbuf_data() - return the pointer to data header in the skb
905  * @skb: Pointer to network buffer
906  *
907  * Return: Pointer to skb data
908  */
909 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
910 {
911 	return skb->data;
912 }
913 
914 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
915 {
916 	return (uint8_t *)&skb->data;
917 }
918 
919 /**
920  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
921  * @skb: Pointer to network buffer
922  *
923  * Return: skb protocol
924  */
925 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
926 {
927 	return skb->protocol;
928 }
929 
930 /**
931  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
932  * @skb: Pointer to network buffer
933  *
934  * Return: skb ip_summed
935  */
936 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
937 {
938 	return skb->ip_summed;
939 }
940 
941 /**
942  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
943  * @skb: Pointer to network buffer
944  * @ip_summed: ip checksum
945  *
946  * Return: none
947  */
948 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
949 		 uint8_t ip_summed)
950 {
951 	skb->ip_summed = ip_summed;
952 }
953 
954 /**
955  * __qdf_nbuf_get_priority() - return the priority value of the skb
956  * @skb: Pointer to network buffer
957  *
958  * Return: skb priority
959  */
960 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
961 {
962 	return skb->priority;
963 }
964 
965 /**
966  * __qdf_nbuf_set_priority() - sets the priority value of the skb
967  * @skb: Pointer to network buffer
968  * @p: priority
969  *
970  * Return: none
971  */
972 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
973 {
974 	skb->priority = p;
975 }
976 
977 /**
978  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
979  * @skb: Current skb
980  * @next_skb: Next skb
981  *
982  * Return: void
983  */
984 static inline void
985 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
986 {
987 	skb->next = skb_next;
988 }
989 
990 /**
991  * __qdf_nbuf_next() - return the next skb pointer of the current skb
992  * @skb: Current skb
993  *
994  * Return: the next skb pointed to by the current skb
995  */
996 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
997 {
998 	return skb->next;
999 }
1000 
1001 /**
1002  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1003  * @skb: Current skb
1004  * @next_skb: Next skb
1005  *
1006  * This fn is used to link up extensions to the head skb. Does not handle
1007  * linking to the head
1008  *
1009  * Return: none
1010  */
1011 static inline void
1012 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1013 {
1014 	skb->next = skb_next;
1015 }
1016 
1017 /**
1018  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1019  * @skb: Current skb
1020  *
1021  * Return: the next skb pointed to by the current skb
1022  */
1023 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1024 {
1025 	return skb->next;
1026 }
1027 
1028 /**
1029  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1030  * @skb_head: head_buf nbuf holding head segment (single)
1031  * @ext_list: nbuf list holding linked extensions to the head
1032  * @ext_len: Total length of all buffers in the extension list
1033  *
1034  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1035  * to the nbuf holding the head segment (seg0)
1036  *
1037  * Return: none
1038  */
1039 static inline void
1040 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1041 			struct sk_buff *ext_list, size_t ext_len)
1042 {
1043 	skb_shinfo(skb_head)->frag_list = ext_list;
1044 	skb_head->data_len = ext_len;
1045 	skb_head->len += skb_head->data_len;
1046 }
1047 
1048 /**
1049  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1050  * @head_buf: Network buf holding head segment (single)
1051  *
1052  * This ext_list is populated when we have Jumbo packet, for example in case of
1053  * monitor mode amsdu packet reception, and are stiched using frags_list.
1054  *
1055  * Return: Network buf list holding linked extensions from head buf.
1056  */
1057 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1058 {
1059 	return (skb_shinfo(head_buf)->frag_list);
1060 }
1061 
1062 /**
1063  * __qdf_nbuf_get_age() - return the checksum value of the skb
1064  * @skb: Pointer to network buffer
1065  *
1066  * Return: checksum value
1067  */
1068 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1069 {
1070 	return skb->csum;
1071 }
1072 
1073 /**
1074  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1075  * @skb: Pointer to network buffer
1076  * @v: Value
1077  *
1078  * Return: none
1079  */
1080 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1081 {
1082 	skb->csum = v;
1083 }
1084 
1085 /**
1086  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1087  * @skb: Pointer to network buffer
1088  * @adj: Adjustment value
1089  *
1090  * Return: none
1091  */
1092 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1093 {
1094 	skb->csum -= adj;
1095 }
1096 
1097 /**
1098  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1099  * @skb: Pointer to network buffer
1100  * @offset: Offset value
1101  * @len: Length
1102  * @to: Destination pointer
1103  *
1104  * Return: length of the copy bits for skb
1105  */
1106 static inline int32_t
1107 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1108 {
1109 	return skb_copy_bits(skb, offset, to, len);
1110 }
1111 
1112 /**
1113  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1114  * @skb: Pointer to network buffer
1115  * @len:  Packet length
1116  *
1117  * Return: none
1118  */
1119 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1120 {
1121 	if (skb->len > len) {
1122 		skb_trim(skb, len);
1123 	} else {
1124 		if (skb_tailroom(skb) < len - skb->len) {
1125 			if (unlikely(pskb_expand_head(skb, 0,
1126 				len - skb->len - skb_tailroom(skb),
1127 				GFP_ATOMIC))) {
1128 				dev_kfree_skb_any(skb);
1129 				qdf_assert(0);
1130 			}
1131 		}
1132 		skb_put(skb, (len - skb->len));
1133 	}
1134 }
1135 
1136 /**
1137  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1138  * @skb: Pointer to network buffer
1139  * @protocol: Protocol type
1140  *
1141  * Return: none
1142  */
1143 static inline void
1144 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1145 {
1146 	skb->protocol = protocol;
1147 }
1148 
1149 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1150 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1151 
1152 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1153 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1154 
1155 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1156 				      uint32_t *lo, uint32_t *hi);
1157 
1158 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1159 	struct qdf_tso_info_t *tso_info);
1160 
1161 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1162 			  struct qdf_tso_seg_elem_t *tso_seg,
1163 			  bool is_last_seg);
1164 
1165 #ifdef FEATURE_TSO
1166 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1167 
1168 #else
1169 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1170 {
1171 	return 0;
1172 }
1173 
1174 #endif /* FEATURE_TSO */
1175 
1176 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1177 {
1178 	if (skb_is_gso(skb) &&
1179 		(skb_is_gso_v6(skb) ||
1180 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1181 		return true;
1182 	else
1183 		return false;
1184 }
1185 
1186 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1187 
1188 int __qdf_nbuf_get_users(struct sk_buff *skb);
1189 
1190 /**
1191  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1192  *			      and get hw_classify by peeking
1193  *			      into packet
1194  * @nbuf:		Network buffer (skb on Linux)
1195  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1196  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1197  *			needs to be set in case of CE classification support
1198  *			Is set by this macro.
1199  * @hw_classify:	This is a flag which is set to indicate
1200  *			CE classification is enabled.
1201  *			Do not set this bit for VLAN packets
1202  *			OR for mcast / bcast frames.
1203  *
1204  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1205  * whether to enable tx_classify bit in CE.
1206  *
1207  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1208  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1209  * it is the length and a 802.3 frame else it is Ethernet Type II
1210  * (RFC 894).
1211  * Bit 4 in pkt_subtype is the tx_classify bit
1212  *
1213  * Return:	void
1214  */
1215 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1216 				pkt_subtype, hw_classify)	\
1217 do {								\
1218 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1219 	uint16_t ether_type = ntohs(eh->h_proto);		\
1220 	bool is_mc_bc;						\
1221 								\
1222 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1223 		   is_multicast_ether_addr((uint8_t *)eh);	\
1224 								\
1225 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1226 		hw_classify = 1;				\
1227 		pkt_subtype = 0x01 <<				\
1228 			HTT_TX_CLASSIFY_BIT_S;			\
1229 	}							\
1230 								\
1231 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1232 		pkt_type = htt_pkt_type_ethernet;		\
1233 								\
1234 } while (0)
1235 
1236 /**
1237  * nbuf private buffer routines
1238  */
1239 
1240 /**
1241  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1242  * @skb: Pointer to network buffer
1243  * @addr: Pointer to store header's addr
1244  * @m_len: network buffer length
1245  *
1246  * Return: none
1247  */
1248 static inline void
1249 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1250 {
1251 	*addr = skb->data;
1252 	*len = skb->len;
1253 }
1254 
1255 /**
1256  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1257  * @head: Head pointer
1258  * @tail: Tail pointer
1259  * @qlen: Queue length
1260  */
1261 typedef struct __qdf_nbuf_qhead {
1262 	struct sk_buff *head;
1263 	struct sk_buff *tail;
1264 	unsigned int qlen;
1265 } __qdf_nbuf_queue_t;
1266 
1267 /******************Functions *************/
1268 
1269 /**
1270  * __qdf_nbuf_queue_init() - initiallize the queue head
1271  * @qhead: Queue head
1272  *
1273  * Return: QDF status
1274  */
1275 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1276 {
1277 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1278 	return QDF_STATUS_SUCCESS;
1279 }
1280 
1281 /**
1282  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1283  * @qhead: Queue head
1284  * @skb: Pointer to network buffer
1285  *
1286  * This is a lockless version, driver must acquire locks if it
1287  * needs to synchronize
1288  *
1289  * Return: none
1290  */
1291 static inline void
1292 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1293 {
1294 	skb->next = NULL;       /*Nullify the next ptr */
1295 
1296 	if (!qhead->head)
1297 		qhead->head = skb;
1298 	else
1299 		qhead->tail->next = skb;
1300 
1301 	qhead->tail = skb;
1302 	qhead->qlen++;
1303 }
1304 
1305 /**
1306  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1307  * @dest: target netbuf queue
1308  * @src:  source netbuf queue
1309  *
1310  * Return: target netbuf queue
1311  */
1312 static inline __qdf_nbuf_queue_t *
1313 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1314 {
1315 	if (!dest)
1316 		return NULL;
1317 	else if (!src || !(src->head))
1318 		return dest;
1319 
1320 	if (!(dest->head))
1321 		dest->head = src->head;
1322 	else
1323 		dest->tail->next = src->head;
1324 
1325 	dest->tail = src->tail;
1326 	dest->qlen += src->qlen;
1327 	return dest;
1328 }
1329 
1330 /**
1331  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1332  * @qhead: Queue head
1333  * @skb: Pointer to network buffer
1334  *
1335  * This is a lockless version, driver must acquire locks if it needs to
1336  * synchronize
1337  *
1338  * Return: none
1339  */
1340 static inline void
1341 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1342 {
1343 	if (!qhead->head) {
1344 		/*Empty queue Tail pointer Must be updated */
1345 		qhead->tail = skb;
1346 	}
1347 	skb->next = qhead->head;
1348 	qhead->head = skb;
1349 	qhead->qlen++;
1350 }
1351 
1352 /**
1353  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1354  * @qhead: Queue head
1355  *
1356  * This is a lockless version. Driver should take care of the locks
1357  *
1358  * Return: skb or NULL
1359  */
1360 static inline
1361 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1362 {
1363 	__qdf_nbuf_t tmp = NULL;
1364 
1365 	if (qhead->head) {
1366 		qhead->qlen--;
1367 		tmp = qhead->head;
1368 		if (qhead->head == qhead->tail) {
1369 			qhead->head = NULL;
1370 			qhead->tail = NULL;
1371 		} else {
1372 			qhead->head = tmp->next;
1373 		}
1374 		tmp->next = NULL;
1375 	}
1376 	return tmp;
1377 }
1378 
1379 /**
1380  * __qdf_nbuf_queue_free() - free a queue
1381  * @qhead: head of queue
1382  *
1383  * Return: QDF status
1384  */
1385 static inline QDF_STATUS
1386 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1387 {
1388 	__qdf_nbuf_t  buf = NULL;
1389 
1390 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1391 		__qdf_nbuf_free(buf);
1392 	return QDF_STATUS_SUCCESS;
1393 }
1394 
1395 
1396 /**
1397  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1398  * @qhead: head of queue
1399  *
1400  * Return: NULL if the queue is empty
1401  */
1402 static inline struct sk_buff *
1403 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1404 {
1405 	return qhead->head;
1406 }
1407 
1408 /**
1409  * __qdf_nbuf_queue_len() - return the queue length
1410  * @qhead: Queue head
1411  *
1412  * Return: Queue length
1413  */
1414 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1415 {
1416 	return qhead->qlen;
1417 }
1418 
1419 /**
1420  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1421  * @skb: Pointer to network buffer
1422  *
1423  * This API returns the next skb from packet chain, remember the skb is
1424  * still in the queue
1425  *
1426  * Return: NULL if no packets are there
1427  */
1428 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1429 {
1430 	return skb->next;
1431 }
1432 
1433 /**
1434  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1435  * @qhead: Queue head
1436  *
1437  * Return: true if length is 0 else false
1438  */
1439 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1440 {
1441 	return qhead->qlen == 0;
1442 }
1443 
1444 /*
1445  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1446  * Because the queue head will most likely put in some structure,
1447  * we don't use pointer type as the definition.
1448  */
1449 
1450 /*
1451  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1452  * Because the queue head will most likely put in some structure,
1453  * we don't use pointer type as the definition.
1454  */
1455 
1456 static inline void
1457 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1458 {
1459 }
1460 
1461 /**
1462  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1463  *        expands the headroom
1464  *        in the data region. In case of failure the skb is released.
1465  * @skb: sk buff
1466  * @headroom: size of headroom
1467  *
1468  * Return: skb or NULL
1469  */
1470 static inline struct sk_buff *
1471 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1472 {
1473 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1474 		dev_kfree_skb_any(skb);
1475 		skb = NULL;
1476 	}
1477 	return skb;
1478 }
1479 
1480 /**
1481  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1482  *        exapnds the tailroom
1483  *        in data region. In case of failure it releases the skb.
1484  * @skb: sk buff
1485  * @tailroom: size of tailroom
1486  *
1487  * Return: skb or NULL
1488  */
1489 static inline struct sk_buff *
1490 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1491 {
1492 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1493 		return skb;
1494 	/**
1495 	 * unlikely path
1496 	 */
1497 	dev_kfree_skb_any(skb);
1498 	return NULL;
1499 }
1500 
1501 /**
1502  * __qdf_nbuf_linearize() - skb linearize
1503  * @skb: sk buff
1504  *
1505  * create a version of the specified nbuf whose contents
1506  * can be safely modified without affecting other
1507  * users.If the nbuf is non-linear then this function
1508  * linearize. if unable to linearize returns -ENOMEM on
1509  * success 0 is returned
1510  *
1511  * Return: 0 on Success, -ENOMEM on failure is returned.
1512  */
1513 static inline int
1514 __qdf_nbuf_linearize(struct sk_buff *skb)
1515 {
1516 	return skb_linearize(skb);
1517 }
1518 
1519 /**
1520  * __qdf_nbuf_unshare() - skb unshare
1521  * @skb: sk buff
1522  *
1523  * create a version of the specified nbuf whose contents
1524  * can be safely modified without affecting other
1525  * users.If the nbuf is a clone then this function
1526  * creates a new copy of the data. If the buffer is not
1527  * a clone the original buffer is returned.
1528  *
1529  * Return: skb or NULL
1530  */
1531 static inline struct sk_buff *
1532 __qdf_nbuf_unshare(struct sk_buff *skb)
1533 {
1534 	return skb_unshare(skb, GFP_ATOMIC);
1535 }
1536 
1537 /**
1538  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1539  *@buf: sk buff
1540  *
1541  * Return: true/false
1542  */
1543 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1544 {
1545 	return skb_cloned(skb);
1546 }
1547 
1548 /**
1549  * __qdf_nbuf_pool_init() - init pool
1550  * @net: net handle
1551  *
1552  * Return: QDF status
1553  */
1554 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1555 {
1556 	return QDF_STATUS_SUCCESS;
1557 }
1558 
1559 /*
1560  * adf_nbuf_pool_delete() implementation - do nothing in linux
1561  */
1562 #define __qdf_nbuf_pool_delete(osdev)
1563 
1564 /**
1565  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1566  *        release the skb.
1567  * @skb: sk buff
1568  * @headroom: size of headroom
1569  * @tailroom: size of tailroom
1570  *
1571  * Return: skb or NULL
1572  */
1573 static inline struct sk_buff *
1574 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1575 {
1576 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1577 		return skb;
1578 
1579 	dev_kfree_skb_any(skb);
1580 	return NULL;
1581 }
1582 
1583 /**
1584  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1585  *
1586  * Return: true/false
1587  */
1588 static inline bool
1589 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1590 			 uint8_t **where)
1591 {
1592 	qdf_assert(0);
1593 	return false;
1594 }
1595 
1596 /**
1597  * __qdf_nbuf_reset_ctxt() - mem zero control block
1598  * @nbuf: buffer
1599  *
1600  * Return: none
1601  */
1602 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1603 {
1604 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1605 }
1606 
1607 /**
1608  * __qdf_nbuf_network_header() - get network header
1609  * @buf: buffer
1610  *
1611  * Return: network header pointer
1612  */
1613 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1614 {
1615 	return skb_network_header(buf);
1616 }
1617 
1618 /**
1619  * __qdf_nbuf_transport_header() - get transport header
1620  * @buf: buffer
1621  *
1622  * Return: transport header pointer
1623  */
1624 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1625 {
1626 	return skb_transport_header(buf);
1627 }
1628 
1629 /**
1630  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1631  *  passed as part of network buffer by network stack
1632  * @skb: sk buff
1633  *
1634  * Return: TCP MSS size
1635  *
1636  */
1637 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1638 {
1639 	return skb_shinfo(skb)->gso_size;
1640 }
1641 
1642 /**
1643  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1644  * @nbuf: sk buff
1645  *
1646  * Return: none
1647  */
1648 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1649 
1650 /**
1651  * __qdf_nbuf_set_rx_info() - set rx info
1652  * @nbuf: sk buffer
1653  * @info: rx info
1654  * @len: length
1655  *
1656  * Return: none
1657  */
1658 static inline void
1659 __qdf_nbuf_set_rx_info(__qdf_nbuf_t nbuf, void *info, uint32_t len)
1660 {
1661 	/* Customer may have skb->cb size increased, e.g. to 96 bytes,
1662 	 * then len's large enough to save the rs status info struct
1663 	 */
1664 	uint8_t offset = sizeof(struct qdf_nbuf_cb);
1665 	uint32_t max = sizeof(((struct sk_buff *)0)->cb)-offset;
1666 
1667 	len = (len > max ? max : len);
1668 
1669 	memcpy(((uint8_t *)(nbuf->cb) + offset), info, len);
1670 }
1671 
1672 /**
1673  * __qdf_nbuf_get_rx_info() - get rx info
1674  * @nbuf: sk buffer
1675  *
1676  * Return: rx_info
1677  */
1678 static inline void *
1679 __qdf_nbuf_get_rx_info(__qdf_nbuf_t nbuf)
1680 {
1681 	uint8_t offset = sizeof(struct qdf_nbuf_cb);
1682 
1683 	return (void *)((uint8_t *)(nbuf->cb) + offset);
1684 }
1685 
1686 /*
1687  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1688  * @nbuf: sk buff
1689  *
1690  * Return: void ptr
1691  */
1692 static inline void *
1693 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1694 {
1695 	return (void *)nbuf->cb;
1696 }
1697 
1698 /**
1699  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1700  * @skb: sk buff
1701  *
1702  * Return: head size
1703  */
1704 static inline size_t
1705 __qdf_nbuf_headlen(struct sk_buff *skb)
1706 {
1707 	return skb_headlen(skb);
1708 }
1709 
1710 /**
1711  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1712  * @skb: sk buff
1713  *
1714  * Return: number of fragments
1715  */
1716 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1717 {
1718 	return skb_shinfo(skb)->nr_frags;
1719 }
1720 
1721 /**
1722  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1723  * @buf: sk buff
1724  *
1725  * Return: true/false
1726  */
1727 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1728 {
1729 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1730 }
1731 
1732 /**
1733  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1734  * @buf: sk buff
1735  *
1736  * Return: true/false
1737  */
1738 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1739 {
1740 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1741 }
1742 
1743 /**
1744  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1745  * @skb: sk buff
1746  *
1747  * Return: size of l2+l3+l4 header length
1748  */
1749 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1750 {
1751 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1752 }
1753 
1754 /**
1755  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1756  * @buf: sk buff
1757  *
1758  * Return:  true/false
1759  */
1760 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1761 {
1762 	if (skb_is_nonlinear(skb))
1763 		return true;
1764 	else
1765 		return false;
1766 }
1767 
1768 /**
1769  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1770  * @buf: sk buff
1771  *
1772  * Return: TCP sequence number
1773  */
1774 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1775 {
1776 	return ntohl(tcp_hdr(skb)->seq);
1777 }
1778 
1779 /**
1780  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1781  *@buf: sk buff
1782  *
1783  * Return: data pointer to typecast into your priv structure
1784  */
1785 static inline uint8_t *
1786 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1787 {
1788 	return &skb->cb[8];
1789 }
1790 
1791 /**
1792  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1793  * @buf: Pointer to nbuf
1794  *
1795  * Return: None
1796  */
1797 static inline void
1798 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1799 {
1800 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1801 }
1802 
1803 /**
1804  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
1805  *
1806  * @buf: sk buff
1807  * @queue_id: Queue id
1808  *
1809  * Return: void
1810  */
1811 static inline void
1812 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
1813 {
1814 	skb_record_rx_queue(skb, queue_id);
1815 }
1816 
1817 /**
1818  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
1819  *
1820  * @buf: sk buff
1821  *
1822  * Return: Queue mapping
1823  */
1824 static inline uint16_t
1825 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
1826 {
1827 	return skb->queue_mapping;
1828 }
1829 
1830 /**
1831  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
1832  *
1833  * @buf: sk buff
1834  *
1835  * Return: void
1836  */
1837 static inline void
1838 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
1839 {
1840 	__net_timestamp(skb);
1841 }
1842 
1843 /**
1844  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
1845  *
1846  * @buf: sk buff
1847  *
1848  * Return: time difference in ms
1849  */
1850 static inline uint64_t
1851 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
1852 {
1853 	return ktime_to_ms(net_timedelta(skb->tstamp));
1854 }
1855 
1856 /**
1857  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
1858  *
1859  * @buf: sk buff
1860  *
1861  * Return: time difference in micro seconds
1862  */
1863 static inline uint64_t
1864 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
1865 {
1866 	return ktime_to_us(net_timedelta(skb->tstamp));
1867 }
1868 
1869 /**
1870  * __qdf_nbuf_orphan() - orphan a nbuf
1871  * @skb: sk buff
1872  *
1873  * If a buffer currently has an owner then we call the
1874  * owner's destructor function
1875  *
1876  * Return: void
1877  */
1878 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
1879 {
1880 	return skb_orphan(skb);
1881 }
1882 #ifdef CONFIG_WIN
1883 #include <i_qdf_nbuf_w.h>
1884 #else
1885 #include <i_qdf_nbuf_m.h>
1886 #endif
1887 #endif /*_I_QDF_NET_BUF_H */
1888