xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <asm/cacheflush.h>
32 #include <qdf_types.h>
33 #include <qdf_net_types.h>
34 #include <qdf_status.h>
35 #include <qdf_util.h>
36 #include <qdf_mem.h>
37 #include <linux/tcp.h>
38 #include <qdf_util.h>
39 
40 /*
41  * Use socket buffer as the underlying implementation as skbuf .
42  * Linux use sk_buff to represent both packet and data,
43  * so we use sk_buffer to represent both skbuf .
44  */
45 typedef struct sk_buff *__qdf_nbuf_t;
46 
47 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
48 
49 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
50  * max tx fragments added by the driver
51  * The driver will always add one tx fragment (the tx descriptor)
52  */
53 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
54 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
55 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
56 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
57 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
58 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
59 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
60 
61 
62 /* mark the first packet after wow wakeup */
63 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
64 
65 /*
66  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
67  */
68 typedef union {
69 	uint64_t       u64;
70 	qdf_dma_addr_t dma_addr;
71 } qdf_paddr_t;
72 
73 /**
74  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
75  *                    - data passed between layers of the driver.
76  *
77  * Notes:
78  *   1. Hard limited to 48 bytes. Please count your bytes
79  *   2. The size of this structure has to be easily calculatable and
80  *      consistently so: do not use any conditional compile flags
81  *   3. Split into a common part followed by a tx/rx overlay
82  *   4. There is only one extra frag, which represents the HTC/HTT header
83  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
84  *      for the priv_cb_w since it must be at same offset for both
85  *      TX and RX union
86  *
87  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
88  *
89  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
90  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
91  * @rx.dev.priv_cb_w.reserved1: reserved
92  * @rx.dev.priv_cb_w.reserved2: reserved
93  *
94  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
95  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
96  * @rx.dev.priv_cb_m.lro_ctx: LRO context
97  * @rx.dev.priv_cb_m.map_index:
98  * @rx.dev.priv_cb_m.reserved: reserved
99  *
100  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
101  * @rx.peer_cached_buf_frm: peer cached buffer
102  * @rx.tcp_proto: L4 protocol is TCP
103  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
104  * @rx.ipv6_proto: L3 protocol is IPV6
105  * @rx.ip_offset: offset to IP header
106  * @rx.tcp_offset: offset to TCP header
107  * @rx_ctx_id: Rx context id
108  *
109  * @rx.tcp_udp_chksum: L4 payload checksum
110  * @rx.tcp_wim: TCP window size
111  *
112  * @rx.flow_id: 32bit flow id
113  *
114  * @rx.flag_chfrag_start: first MSDU in an AMSDU
115  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
116  * @rx.flag_chfrag_end: last MSDU in an AMSDU
117  * @rx.rsrvd: reserved
118  *
119  * @rx.trace: combined structure for DP and protocol trace
120  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
121  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
122  * @rx.trace.dp_trace: flag (Datapath trace)
123  * @rx.trace.packet_track: RX_DATA packet
124  * @rx.trace.rsrvd: enable packet logging
125  *
126  * @rx.ftype: mcast2ucast, TSO, SG, MESH
127  * @rx.reserved: reserved
128  *
129  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
130  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
131  *
132  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
133  *                 + (1) CE classification enablement bit
134  *                 + (2) packet type (802.3 or Ethernet type II)
135  *                 + (3) packet offset (usually length of HTC/HTT descr)
136  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
137  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
138  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
139  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
140  * @tx.dev.priv_cb_m.reserved: reserved
141  *
142  * @tx.ftype: mcast2ucast, TSO, SG, MESH
143  * @tx.vdev_id: vdev (for protocol trace)
144  * @tx.len: length of efrag pointed by the above pointers
145  *
146  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
147  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
148  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
149  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
150  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
151  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
152  * @tx.flags.bits.flag_ext_header: extended flags
153  * @tx.flags.bits.reserved: reserved
154  * @tx.trace: combined structure for DP and protocol trace
155  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
156  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
157  * @tx.trace.is_packet_priv:
158  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
159  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
160  *                          + (MGMT_ACTION)] - 4 bits
161  * @tx.trace.dp_trace: flag (Datapath trace)
162  * @tx.trace.is_bcast: flag (Broadcast packet)
163  * @tx.trace.is_mcast: flag (Multicast packet)
164  * @tx.trace.packet_type: flag (Packet type)
165  * @tx.trace.htt2_frm: flag (high-latency path only)
166  * @tx.trace.print: enable packet logging
167  *
168  * @tx.vaddr: virtual address of ~
169  * @tx.paddr: physical/DMA address of ~
170  */
171 struct qdf_nbuf_cb {
172 	/* common */
173 	qdf_paddr_t paddr; /* of skb->data */
174 	/* valid only in one direction */
175 	union {
176 		/* Note: MAX: 40 bytes */
177 		struct {
178 			union {
179 				struct {
180 					void *ext_cb_ptr;
181 					void *fctx;
182 					uint32_t reserved1;
183 					uint32_t reserved2;
184 				} priv_cb_w;
185 				struct {
186 					uint32_t tcp_seq_num;
187 					uint32_t tcp_ack_num;
188 					unsigned char *lro_ctx;
189 					uint32_t map_index;
190 					uint32_t reserved;
191 				} priv_cb_m;
192 			} dev;
193 			uint32_t lro_eligible:1,
194 				peer_cached_buf_frm:1,
195 				tcp_proto:1,
196 				tcp_pure_ack:1,
197 				ipv6_proto:1,
198 				ip_offset:7,
199 				tcp_offset:7,
200 				rx_ctx_id:4;
201 			uint32_t tcp_udp_chksum:16,
202 				tcp_win:16;
203 			uint32_t flow_id;
204 			uint8_t flag_chfrag_start:1,
205 				flag_chfrag_cont:1,
206 				flag_chfrag_end:1,
207 				rsrvd:5;
208 			union {
209 				uint8_t packet_state;
210 				uint8_t dp_trace:1,
211 					packet_track:4,
212 					rsrvd:3;
213 			} trace;
214 			uint8_t ftype;
215 			uint8_t reserved;
216 		} rx;
217 
218 		/* Note: MAX: 40 bytes */
219 		struct {
220 			union {
221 				struct {
222 					void *ext_cb_ptr;
223 					void *fctx;
224 				} priv_cb_w;
225 				struct {
226 					uint32_t data_attr;
227 					struct {
228 						uint32_t owned:1,
229 							priv:31;
230 					} ipa;
231 					uint16_t desc_id;
232 					uint16_t mgmt_desc_id;
233 					uint32_t reserved;
234 				} priv_cb_m;
235 			} dev;
236 			uint8_t ftype;
237 			uint8_t vdev_id;
238 			uint16_t len;
239 			union {
240 				struct {
241 					uint8_t flag_efrag:1,
242 						flag_nbuf:1,
243 						num:1,
244 						flag_chfrag_start:1,
245 						flag_chfrag_cont:1,
246 						flag_chfrag_end:1,
247 						flag_ext_header:1,
248 						flag_notify_comp:1;
249 				} bits;
250 				uint8_t u8;
251 			} flags;
252 			struct {
253 				uint8_t packet_state:7,
254 					is_packet_priv:1;
255 				uint8_t packet_track:4,
256 					proto_type:4;
257 				uint8_t dp_trace:1,
258 					is_bcast:1,
259 					is_mcast:1,
260 					packet_type:3,
261 					/* used only for hl*/
262 					htt2_frm:1,
263 					print:1;
264 			} trace;
265 			unsigned char *vaddr;
266 			qdf_paddr_t paddr;
267 		} tx;
268 	} u;
269 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
270 
271 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
272 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
273 
274 /**
275  *  access macros to qdf_nbuf_cb
276  *  Note: These macros can be used as L-values as well as R-values.
277  *        When used as R-values, they effectively function as "get" macros
278  *        When used as L_values, they effectively function as "set" macros
279  */
280 
281 #define QDF_NBUF_CB_PADDR(skb) \
282 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
283 
284 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
285 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
286 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
287 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
288 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
289 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
290 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
291 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
292 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
293 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
294 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
295 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
296 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
297 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
298 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
299 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
300 
301 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
302 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
303 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
304 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
305 
306 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
307 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
308 
309 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
310 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
311 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
312 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
313 
314 #define QDF_NBUF_CB_RX_FTYPE(skb) \
315 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
316 
317 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
318 	(((struct qdf_nbuf_cb *) \
319 	((skb)->cb))->u.rx.flag_chfrag_start)
320 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
321 	(((struct qdf_nbuf_cb *) \
322 	((skb)->cb))->u.rx.flag_chfrag_cont)
323 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
324 		(((struct qdf_nbuf_cb *) \
325 		((skb)->cb))->u.rx.flag_chfrag_end)
326 
327 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
328 	qdf_nbuf_set_state(skb, PACKET_STATE)
329 
330 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
331 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
332 
333 #define QDF_NBUF_CB_TX_FTYPE(skb) \
334 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
335 
336 
337 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
338 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
339 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
340 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
341 
342 /* Tx Flags Accessor Macros*/
343 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
344 	(((struct qdf_nbuf_cb *) \
345 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
346 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
347 	(((struct qdf_nbuf_cb *) \
348 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
349 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
351 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
353 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
354 	(((struct qdf_nbuf_cb *) \
355 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
356 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
357 	(((struct qdf_nbuf_cb *) \
358 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
359 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
360 		(((struct qdf_nbuf_cb *) \
361 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
362 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
363 		(((struct qdf_nbuf_cb *) \
364 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
365 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
366 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
367 /* End of Tx Flags Accessor Macros */
368 
369 /* Tx trace accessor macros */
370 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
371 	(((struct qdf_nbuf_cb *) \
372 		((skb)->cb))->u.tx.trace.packet_state)
373 
374 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
375 	(((struct qdf_nbuf_cb *) \
376 		((skb)->cb))->u.tx.trace.is_packet_priv)
377 
378 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
379 	(((struct qdf_nbuf_cb *) \
380 		((skb)->cb))->u.tx.trace.packet_track)
381 
382 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
383 		(((struct qdf_nbuf_cb *) \
384 			((skb)->cb))->u.rx.trace.packet_track)
385 
386 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
387 	(((struct qdf_nbuf_cb *) \
388 		((skb)->cb))->u.tx.trace.proto_type)
389 
390 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
391 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
392 
393 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
394 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
395 
396 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
398 
399 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
400 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
401 
402 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
403 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
404 
405 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
406 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
407 
408 #define QDF_NBUF_CB_SET_BCAST(skb) \
409 	(((struct qdf_nbuf_cb *) \
410 		((skb)->cb))->u.tx.trace.is_bcast = true)
411 
412 #define QDF_NBUF_CB_SET_MCAST(skb) \
413 	(((struct qdf_nbuf_cb *) \
414 		((skb)->cb))->u.tx.trace.is_mcast = true)
415 /* End of Tx trace accessor macros */
416 
417 
418 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
419 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
420 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
421 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
422 
423 /* assume the OS provides a single fragment */
424 #define __qdf_nbuf_get_num_frags(skb)		   \
425 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
426 
427 #define __qdf_nbuf_reset_num_frags(skb) \
428 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
429 
430 /**
431  *   end of nbuf->cb access macros
432  */
433 
434 typedef void (*qdf_nbuf_trace_update_t)(char *);
435 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
436 
437 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
438 
439 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
440 	(QDF_NBUF_CB_PADDR(skb) = paddr)
441 
442 #define __qdf_nbuf_frag_push_head(					\
443 	skb, frag_len, frag_vaddr, frag_paddr)				\
444 	do {					\
445 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
446 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
447 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
448 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
449 	} while (0)
450 
451 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
452 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
453 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
454 
455 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
456 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
457 
458 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
459 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
460 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
461 	 /* assume that the OS only provides a single fragment */	\
462 	 QDF_NBUF_CB_PADDR(skb))
463 
464 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
465 
466 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
467 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
468 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
469 
470 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
471 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
472 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
473 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
474 
475 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
476 	do {								\
477 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
478 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
479 		if (frag_num)						\
480 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
481 							      is_wstrm; \
482 		else					\
483 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
484 							      is_wstrm; \
485 	} while (0)
486 
487 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
488 	do { \
489 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
490 	} while (0)
491 
492 #define __qdf_nbuf_get_vdev_ctx(skb) \
493 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
494 
495 #define __qdf_nbuf_set_tx_ftype(skb, type) \
496 	do { \
497 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
498 	} while (0)
499 
500 #define __qdf_nbuf_get_tx_ftype(skb) \
501 		 QDF_NBUF_CB_TX_FTYPE((skb))
502 
503 
504 #define __qdf_nbuf_set_rx_ftype(skb, type) \
505 	do { \
506 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
507 	} while (0)
508 
509 #define __qdf_nbuf_get_rx_ftype(skb) \
510 		 QDF_NBUF_CB_RX_FTYPE((skb))
511 
512 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
513 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
514 
515 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
516 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
517 
518 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
519 	do { \
520 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
521 	} while (0)
522 
523 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
524 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
525 
526 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
527 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
528 
529 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
530 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
531 
532 
533 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
534 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
535 
536 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
537 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
538 
539 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
540 	do { \
541 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
542 	} while (0)
543 
544 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
545 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
546 
547 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
548 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
549 
550 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
551 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
552 
553 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
554 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
555 
556 #define __qdf_nbuf_trace_get_proto_type(skb) \
557 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
558 
559 #define __qdf_nbuf_data_attr_get(skb)		\
560 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
561 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
562 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
563 
564 /**
565  * __qdf_nbuf_num_frags_init() - init extra frags
566  * @skb: sk buffer
567  *
568  * Return: none
569  */
570 static inline
571 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
572 {
573 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
574 }
575 
576 typedef enum {
577 	CB_FTYPE_INVALID = 0,
578 	CB_FTYPE_MCAST2UCAST = 1,
579 	CB_FTYPE_TSO = 2,
580 	CB_FTYPE_TSO_SG = 3,
581 	CB_FTYPE_SG = 4,
582 	CB_FTYPE_INTRABSS_FWD = 5,
583 	CB_FTYPE_RX_INFO = 6,
584 	CB_FTYPE_MESH_RX_INFO = 7,
585 	CB_FTYPE_MESH_TX_INFO = 8,
586 } CB_FTYPE;
587 
588 /*
589  * prototypes. Implemented in qdf_nbuf.c
590  */
591 __qdf_nbuf_t __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
592 			int align, int prio);
593 void __qdf_nbuf_free(struct sk_buff *skb);
594 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
595 			struct sk_buff *skb, qdf_dma_dir_t dir);
596 void __qdf_nbuf_unmap(__qdf_device_t osdev,
597 			struct sk_buff *skb, qdf_dma_dir_t dir);
598 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
599 				 struct sk_buff *skb, qdf_dma_dir_t dir);
600 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
601 			struct sk_buff *skb, qdf_dma_dir_t dir);
602 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
603 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
604 
605 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
606 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
607 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
608 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
609 	qdf_dma_dir_t dir, int nbytes);
610 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
611 	qdf_dma_dir_t dir, int nbytes);
612 
613 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
614 	qdf_dma_dir_t dir);
615 
616 QDF_STATUS __qdf_nbuf_map_nbytes_single(
617 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
618 void __qdf_nbuf_unmap_nbytes_single(
619 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
620 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
621 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
622 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
623 QDF_STATUS __qdf_nbuf_frag_map(
624 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
625 	int offset, qdf_dma_dir_t dir, int cur_frag);
626 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
627 
628 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
629 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
630 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
631 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
632 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
633 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
634 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
635 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
636 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
637 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
638 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
639 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
640 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
641 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
642 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
643 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
644 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
645 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
646 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
647 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
648 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
649 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
650 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
651 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
652 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
653 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
654 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
655 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
656 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
657 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
658 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
659 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
660 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
661 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
662 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
663 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
664 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
665 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
666 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
667 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
668 
669 #ifdef QDF_NBUF_GLOBAL_COUNT
670 int __qdf_nbuf_count_get(void);
671 void __qdf_nbuf_count_inc(struct sk_buff *skb);
672 void __qdf_nbuf_count_dec(struct sk_buff *skb);
673 void __qdf_nbuf_mod_init(void);
674 void __qdf_nbuf_mod_exit(void);
675 
676 #else
677 
678 static inline int __qdf_nbuf_count_get(void)
679 {
680 	return 0;
681 }
682 
683 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
684 {
685 	return;
686 }
687 
688 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
689 {
690 	return;
691 }
692 
693 static inline void __qdf_nbuf_mod_init(void)
694 {
695 	return;
696 }
697 
698 static inline void __qdf_nbuf_mod_exit(void)
699 {
700 	return;
701 }
702 #endif
703 
704 /**
705  * __qdf_to_status() - OS to QDF status conversion
706  * @error : OS error
707  *
708  * Return: QDF status
709  */
710 static inline QDF_STATUS __qdf_to_status(signed int error)
711 {
712 	switch (error) {
713 	case 0:
714 		return QDF_STATUS_SUCCESS;
715 	case ENOMEM:
716 	case -ENOMEM:
717 		return QDF_STATUS_E_NOMEM;
718 	default:
719 		return QDF_STATUS_E_NOSUPPORT;
720 	}
721 }
722 
723 /**
724  * __qdf_nbuf_len() - return the amount of valid data in the skb
725  * @skb: Pointer to network buffer
726  *
727  * This API returns the amount of valid data in the skb, If there are frags
728  * then it returns total length.
729  *
730  * Return: network buffer length
731  */
732 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
733 {
734 	int i, extra_frag_len = 0;
735 
736 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
737 	if (i > 0)
738 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
739 
740 	return extra_frag_len + skb->len;
741 }
742 
743 /**
744  * __qdf_nbuf_cat() - link two nbufs
745  * @dst: Buffer to piggyback into
746  * @src: Buffer to put
747  *
748  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
749  * It is callers responsibility to free the src skb.
750  *
751  * Return: QDF_STATUS (status of the call) if failed the src skb
752  *         is released
753  */
754 static inline QDF_STATUS
755 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
756 {
757 	QDF_STATUS error = 0;
758 
759 	qdf_assert(dst && src);
760 
761 	/*
762 	 * Since pskb_expand_head unconditionally reallocates the skb->head
763 	 * buffer, first check whether the current buffer is already large
764 	 * enough.
765 	 */
766 	if (skb_tailroom(dst) < src->len) {
767 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
768 		if (error)
769 			return __qdf_to_status(error);
770 	}
771 
772 	memcpy(skb_tail_pointer(dst), src->data, src->len);
773 	skb_put(dst, src->len);
774 	return __qdf_to_status(error);
775 }
776 
777 /*
778  * nbuf manipulation routines
779  */
780 /**
781  * __qdf_nbuf_headroom() - return the amount of tail space available
782  * @buf: Pointer to network buffer
783  *
784  * Return: amount of tail room
785  */
786 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
787 {
788 	return skb_headroom(skb);
789 }
790 
791 /**
792  * __qdf_nbuf_tailroom() - return the amount of tail space available
793  * @buf: Pointer to network buffer
794  *
795  * Return: amount of tail room
796  */
797 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
798 {
799 	return skb_tailroom(skb);
800 }
801 
802 /**
803  * __qdf_nbuf_put_tail() - Puts data in the end
804  * @skb: Pointer to network buffer
805  * @size: size to be pushed
806  *
807  * Return: data pointer of this buf where new data has to be
808  *         put, or NULL if there is not enough room in this buf.
809  */
810 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
811 {
812 	if (skb_tailroom(skb) < size) {
813 		if (unlikely(pskb_expand_head(skb, 0,
814 			size - skb_tailroom(skb), GFP_ATOMIC))) {
815 			dev_kfree_skb_any(skb);
816 			return NULL;
817 		}
818 	}
819 	return skb_put(skb, size);
820 }
821 
822 /**
823  * __qdf_nbuf_trim_tail() - trim data out from the end
824  * @skb: Pointer to network buffer
825  * @size: size to be popped
826  *
827  * Return: none
828  */
829 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
830 {
831 	return skb_trim(skb, skb->len - size);
832 }
833 
834 
835 /*
836  * prototypes. Implemented in qdf_nbuf.c
837  */
838 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
839 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
840 				qdf_nbuf_rx_cksum_t *cksum);
841 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
842 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
843 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
844 void __qdf_nbuf_ref(struct sk_buff *skb);
845 int __qdf_nbuf_shared(struct sk_buff *skb);
846 
847 /*
848  * qdf_nbuf_pool_delete() implementation - do nothing in linux
849  */
850 #define __qdf_nbuf_pool_delete(osdev)
851 
852 /**
853  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
854  * @skb: Pointer to network buffer
855  *
856  * if GFP_ATOMIC is overkill then we can check whether its
857  * called from interrupt context and then do it or else in
858  * normal case use GFP_KERNEL
859  *
860  * example     use "in_irq() || irqs_disabled()"
861  *
862  * Return: cloned skb
863  */
864 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
865 {
866 	struct sk_buff *skb_new = NULL;
867 
868 	skb_new = skb_clone(skb, GFP_ATOMIC);
869 	if (skb_new)
870 		__qdf_nbuf_count_inc(skb_new);
871 
872 	return skb_new;
873 }
874 
875 /**
876  * __qdf_nbuf_copy() - returns a private copy of the skb
877  * @skb: Pointer to network buffer
878  *
879  * This API returns a private copy of the skb, the skb returned is completely
880  *  modifiable by callers
881  *
882  * Return: skb or NULL
883  */
884 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
885 {
886 	struct sk_buff *skb_new = NULL;
887 
888 	skb_new = skb_copy(skb, GFP_ATOMIC);
889 	if (skb_new)
890 		__qdf_nbuf_count_inc(skb_new);
891 
892 	return skb_new;
893 }
894 
895 #define __qdf_nbuf_reserve      skb_reserve
896 
897 
898 /**
899  * __qdf_nbuf_head() - return the pointer the skb's head pointer
900  * @skb: Pointer to network buffer
901  *
902  * Return: Pointer to head buffer
903  */
904 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
905 {
906 	return skb->head;
907 }
908 
909 /**
910  * __qdf_nbuf_data() - return the pointer to data header in the skb
911  * @skb: Pointer to network buffer
912  *
913  * Return: Pointer to skb data
914  */
915 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
916 {
917 	return skb->data;
918 }
919 
920 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
921 {
922 	return (uint8_t *)&skb->data;
923 }
924 
925 /**
926  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
927  * @skb: Pointer to network buffer
928  *
929  * Return: skb protocol
930  */
931 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
932 {
933 	return skb->protocol;
934 }
935 
936 /**
937  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
938  * @skb: Pointer to network buffer
939  *
940  * Return: skb ip_summed
941  */
942 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
943 {
944 	return skb->ip_summed;
945 }
946 
947 /**
948  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
949  * @skb: Pointer to network buffer
950  * @ip_summed: ip checksum
951  *
952  * Return: none
953  */
954 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
955 		 uint8_t ip_summed)
956 {
957 	skb->ip_summed = ip_summed;
958 }
959 
960 /**
961  * __qdf_nbuf_get_priority() - return the priority value of the skb
962  * @skb: Pointer to network buffer
963  *
964  * Return: skb priority
965  */
966 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
967 {
968 	return skb->priority;
969 }
970 
971 /**
972  * __qdf_nbuf_set_priority() - sets the priority value of the skb
973  * @skb: Pointer to network buffer
974  * @p: priority
975  *
976  * Return: none
977  */
978 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
979 {
980 	skb->priority = p;
981 }
982 
983 /**
984  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
985  * @skb: Current skb
986  * @next_skb: Next skb
987  *
988  * Return: void
989  */
990 static inline void
991 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
992 {
993 	skb->next = skb_next;
994 }
995 
996 /**
997  * __qdf_nbuf_next() - return the next skb pointer of the current skb
998  * @skb: Current skb
999  *
1000  * Return: the next skb pointed to by the current skb
1001  */
1002 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1003 {
1004 	return skb->next;
1005 }
1006 
1007 /**
1008  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1009  * @skb: Current skb
1010  * @next_skb: Next skb
1011  *
1012  * This fn is used to link up extensions to the head skb. Does not handle
1013  * linking to the head
1014  *
1015  * Return: none
1016  */
1017 static inline void
1018 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1019 {
1020 	skb->next = skb_next;
1021 }
1022 
1023 /**
1024  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1025  * @skb: Current skb
1026  *
1027  * Return: the next skb pointed to by the current skb
1028  */
1029 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1030 {
1031 	return skb->next;
1032 }
1033 
1034 /**
1035  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1036  * @skb_head: head_buf nbuf holding head segment (single)
1037  * @ext_list: nbuf list holding linked extensions to the head
1038  * @ext_len: Total length of all buffers in the extension list
1039  *
1040  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1041  * to the nbuf holding the head segment (seg0)
1042  *
1043  * Return: none
1044  */
1045 static inline void
1046 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1047 			struct sk_buff *ext_list, size_t ext_len)
1048 {
1049 	skb_shinfo(skb_head)->frag_list = ext_list;
1050 	skb_head->data_len = ext_len;
1051 	skb_head->len += skb_head->data_len;
1052 }
1053 
1054 /**
1055  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1056  * @head_buf: Network buf holding head segment (single)
1057  *
1058  * This ext_list is populated when we have Jumbo packet, for example in case of
1059  * monitor mode amsdu packet reception, and are stiched using frags_list.
1060  *
1061  * Return: Network buf list holding linked extensions from head buf.
1062  */
1063 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1064 {
1065 	return (skb_shinfo(head_buf)->frag_list);
1066 }
1067 
1068 /**
1069  * __qdf_nbuf_get_age() - return the checksum value of the skb
1070  * @skb: Pointer to network buffer
1071  *
1072  * Return: checksum value
1073  */
1074 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1075 {
1076 	return skb->csum;
1077 }
1078 
1079 /**
1080  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1081  * @skb: Pointer to network buffer
1082  * @v: Value
1083  *
1084  * Return: none
1085  */
1086 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1087 {
1088 	skb->csum = v;
1089 }
1090 
1091 /**
1092  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1093  * @skb: Pointer to network buffer
1094  * @adj: Adjustment value
1095  *
1096  * Return: none
1097  */
1098 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1099 {
1100 	skb->csum -= adj;
1101 }
1102 
1103 /**
1104  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1105  * @skb: Pointer to network buffer
1106  * @offset: Offset value
1107  * @len: Length
1108  * @to: Destination pointer
1109  *
1110  * Return: length of the copy bits for skb
1111  */
1112 static inline int32_t
1113 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1114 {
1115 	return skb_copy_bits(skb, offset, to, len);
1116 }
1117 
1118 /**
1119  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1120  * @skb: Pointer to network buffer
1121  * @len:  Packet length
1122  *
1123  * Return: none
1124  */
1125 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1126 {
1127 	if (skb->len > len) {
1128 		skb_trim(skb, len);
1129 	} else {
1130 		if (skb_tailroom(skb) < len - skb->len) {
1131 			if (unlikely(pskb_expand_head(skb, 0,
1132 				len - skb->len - skb_tailroom(skb),
1133 				GFP_ATOMIC))) {
1134 				dev_kfree_skb_any(skb);
1135 				qdf_assert(0);
1136 			}
1137 		}
1138 		skb_put(skb, (len - skb->len));
1139 	}
1140 }
1141 
1142 /**
1143  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1144  * @skb: Pointer to network buffer
1145  * @protocol: Protocol type
1146  *
1147  * Return: none
1148  */
1149 static inline void
1150 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1151 {
1152 	skb->protocol = protocol;
1153 }
1154 
1155 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1156 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1157 
1158 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1159 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1160 
1161 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1162 				      uint32_t *lo, uint32_t *hi);
1163 
1164 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1165 	struct qdf_tso_info_t *tso_info);
1166 
1167 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1168 			  struct qdf_tso_seg_elem_t *tso_seg,
1169 			  bool is_last_seg);
1170 
1171 #ifdef FEATURE_TSO
1172 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1173 
1174 #else
1175 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1176 {
1177 	return 0;
1178 }
1179 
1180 #endif /* FEATURE_TSO */
1181 
1182 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1183 {
1184 	if (skb_is_gso(skb) &&
1185 		(skb_is_gso_v6(skb) ||
1186 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1187 		return true;
1188 	else
1189 		return false;
1190 }
1191 
1192 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1193 
1194 int __qdf_nbuf_get_users(struct sk_buff *skb);
1195 
1196 /**
1197  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1198  *			      and get hw_classify by peeking
1199  *			      into packet
1200  * @nbuf:		Network buffer (skb on Linux)
1201  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1202  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1203  *			needs to be set in case of CE classification support
1204  *			Is set by this macro.
1205  * @hw_classify:	This is a flag which is set to indicate
1206  *			CE classification is enabled.
1207  *			Do not set this bit for VLAN packets
1208  *			OR for mcast / bcast frames.
1209  *
1210  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1211  * whether to enable tx_classify bit in CE.
1212  *
1213  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1214  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1215  * it is the length and a 802.3 frame else it is Ethernet Type II
1216  * (RFC 894).
1217  * Bit 4 in pkt_subtype is the tx_classify bit
1218  *
1219  * Return:	void
1220  */
1221 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1222 				pkt_subtype, hw_classify)	\
1223 do {								\
1224 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1225 	uint16_t ether_type = ntohs(eh->h_proto);		\
1226 	bool is_mc_bc;						\
1227 								\
1228 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1229 		   is_multicast_ether_addr((uint8_t *)eh);	\
1230 								\
1231 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1232 		hw_classify = 1;				\
1233 		pkt_subtype = 0x01 <<				\
1234 			HTT_TX_CLASSIFY_BIT_S;			\
1235 	}							\
1236 								\
1237 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1238 		pkt_type = htt_pkt_type_ethernet;		\
1239 								\
1240 } while (0)
1241 
1242 /**
1243  * nbuf private buffer routines
1244  */
1245 
1246 /**
1247  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1248  * @skb: Pointer to network buffer
1249  * @addr: Pointer to store header's addr
1250  * @m_len: network buffer length
1251  *
1252  * Return: none
1253  */
1254 static inline void
1255 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1256 {
1257 	*addr = skb->data;
1258 	*len = skb->len;
1259 }
1260 
1261 /**
1262  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1263  * @head: Head pointer
1264  * @tail: Tail pointer
1265  * @qlen: Queue length
1266  */
1267 typedef struct __qdf_nbuf_qhead {
1268 	struct sk_buff *head;
1269 	struct sk_buff *tail;
1270 	unsigned int qlen;
1271 } __qdf_nbuf_queue_t;
1272 
1273 /******************Functions *************/
1274 
1275 /**
1276  * __qdf_nbuf_queue_init() - initiallize the queue head
1277  * @qhead: Queue head
1278  *
1279  * Return: QDF status
1280  */
1281 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1282 {
1283 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1284 	return QDF_STATUS_SUCCESS;
1285 }
1286 
1287 /**
1288  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1289  * @qhead: Queue head
1290  * @skb: Pointer to network buffer
1291  *
1292  * This is a lockless version, driver must acquire locks if it
1293  * needs to synchronize
1294  *
1295  * Return: none
1296  */
1297 static inline void
1298 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1299 {
1300 	skb->next = NULL;       /*Nullify the next ptr */
1301 
1302 	if (!qhead->head)
1303 		qhead->head = skb;
1304 	else
1305 		qhead->tail->next = skb;
1306 
1307 	qhead->tail = skb;
1308 	qhead->qlen++;
1309 }
1310 
1311 /**
1312  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1313  * @dest: target netbuf queue
1314  * @src:  source netbuf queue
1315  *
1316  * Return: target netbuf queue
1317  */
1318 static inline __qdf_nbuf_queue_t *
1319 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1320 {
1321 	if (!dest)
1322 		return NULL;
1323 	else if (!src || !(src->head))
1324 		return dest;
1325 
1326 	if (!(dest->head))
1327 		dest->head = src->head;
1328 	else
1329 		dest->tail->next = src->head;
1330 
1331 	dest->tail = src->tail;
1332 	dest->qlen += src->qlen;
1333 	return dest;
1334 }
1335 
1336 /**
1337  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1338  * @qhead: Queue head
1339  * @skb: Pointer to network buffer
1340  *
1341  * This is a lockless version, driver must acquire locks if it needs to
1342  * synchronize
1343  *
1344  * Return: none
1345  */
1346 static inline void
1347 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1348 {
1349 	if (!qhead->head) {
1350 		/*Empty queue Tail pointer Must be updated */
1351 		qhead->tail = skb;
1352 	}
1353 	skb->next = qhead->head;
1354 	qhead->head = skb;
1355 	qhead->qlen++;
1356 }
1357 
1358 /**
1359  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1360  * @qhead: Queue head
1361  *
1362  * This is a lockless version. Driver should take care of the locks
1363  *
1364  * Return: skb or NULL
1365  */
1366 static inline
1367 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1368 {
1369 	__qdf_nbuf_t tmp = NULL;
1370 
1371 	if (qhead->head) {
1372 		qhead->qlen--;
1373 		tmp = qhead->head;
1374 		if (qhead->head == qhead->tail) {
1375 			qhead->head = NULL;
1376 			qhead->tail = NULL;
1377 		} else {
1378 			qhead->head = tmp->next;
1379 		}
1380 		tmp->next = NULL;
1381 	}
1382 	return tmp;
1383 }
1384 
1385 /**
1386  * __qdf_nbuf_queue_free() - free a queue
1387  * @qhead: head of queue
1388  *
1389  * Return: QDF status
1390  */
1391 static inline QDF_STATUS
1392 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1393 {
1394 	__qdf_nbuf_t  buf = NULL;
1395 
1396 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1397 		__qdf_nbuf_free(buf);
1398 	return QDF_STATUS_SUCCESS;
1399 }
1400 
1401 
1402 /**
1403  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1404  * @qhead: head of queue
1405  *
1406  * Return: NULL if the queue is empty
1407  */
1408 static inline struct sk_buff *
1409 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1410 {
1411 	return qhead->head;
1412 }
1413 
1414 /**
1415  * __qdf_nbuf_queue_len() - return the queue length
1416  * @qhead: Queue head
1417  *
1418  * Return: Queue length
1419  */
1420 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1421 {
1422 	return qhead->qlen;
1423 }
1424 
1425 /**
1426  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1427  * @skb: Pointer to network buffer
1428  *
1429  * This API returns the next skb from packet chain, remember the skb is
1430  * still in the queue
1431  *
1432  * Return: NULL if no packets are there
1433  */
1434 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1435 {
1436 	return skb->next;
1437 }
1438 
1439 /**
1440  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1441  * @qhead: Queue head
1442  *
1443  * Return: true if length is 0 else false
1444  */
1445 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1446 {
1447 	return qhead->qlen == 0;
1448 }
1449 
1450 /*
1451  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1452  * Because the queue head will most likely put in some structure,
1453  * we don't use pointer type as the definition.
1454  */
1455 
1456 /*
1457  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1458  * Because the queue head will most likely put in some structure,
1459  * we don't use pointer type as the definition.
1460  */
1461 
1462 static inline void
1463 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1464 {
1465 }
1466 
1467 /**
1468  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1469  *        expands the headroom
1470  *        in the data region. In case of failure the skb is released.
1471  * @skb: sk buff
1472  * @headroom: size of headroom
1473  *
1474  * Return: skb or NULL
1475  */
1476 static inline struct sk_buff *
1477 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1478 {
1479 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1480 		dev_kfree_skb_any(skb);
1481 		skb = NULL;
1482 	}
1483 	return skb;
1484 }
1485 
1486 /**
1487  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1488  *        exapnds the tailroom
1489  *        in data region. In case of failure it releases the skb.
1490  * @skb: sk buff
1491  * @tailroom: size of tailroom
1492  *
1493  * Return: skb or NULL
1494  */
1495 static inline struct sk_buff *
1496 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1497 {
1498 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1499 		return skb;
1500 	/**
1501 	 * unlikely path
1502 	 */
1503 	dev_kfree_skb_any(skb);
1504 	return NULL;
1505 }
1506 
1507 /**
1508  * __qdf_nbuf_linearize() - skb linearize
1509  * @skb: sk buff
1510  *
1511  * create a version of the specified nbuf whose contents
1512  * can be safely modified without affecting other
1513  * users.If the nbuf is non-linear then this function
1514  * linearize. if unable to linearize returns -ENOMEM on
1515  * success 0 is returned
1516  *
1517  * Return: 0 on Success, -ENOMEM on failure is returned.
1518  */
1519 static inline int
1520 __qdf_nbuf_linearize(struct sk_buff *skb)
1521 {
1522 	return skb_linearize(skb);
1523 }
1524 
1525 /**
1526  * __qdf_nbuf_unshare() - skb unshare
1527  * @skb: sk buff
1528  *
1529  * create a version of the specified nbuf whose contents
1530  * can be safely modified without affecting other
1531  * users.If the nbuf is a clone then this function
1532  * creates a new copy of the data. If the buffer is not
1533  * a clone the original buffer is returned.
1534  *
1535  * Return: skb or NULL
1536  */
1537 static inline struct sk_buff *
1538 __qdf_nbuf_unshare(struct sk_buff *skb)
1539 {
1540 	return skb_unshare(skb, GFP_ATOMIC);
1541 }
1542 
1543 /**
1544  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1545  *@buf: sk buff
1546  *
1547  * Return: true/false
1548  */
1549 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1550 {
1551 	return skb_cloned(skb);
1552 }
1553 
1554 /**
1555  * __qdf_nbuf_pool_init() - init pool
1556  * @net: net handle
1557  *
1558  * Return: QDF status
1559  */
1560 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1561 {
1562 	return QDF_STATUS_SUCCESS;
1563 }
1564 
1565 /*
1566  * adf_nbuf_pool_delete() implementation - do nothing in linux
1567  */
1568 #define __qdf_nbuf_pool_delete(osdev)
1569 
1570 /**
1571  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1572  *        release the skb.
1573  * @skb: sk buff
1574  * @headroom: size of headroom
1575  * @tailroom: size of tailroom
1576  *
1577  * Return: skb or NULL
1578  */
1579 static inline struct sk_buff *
1580 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1581 {
1582 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1583 		return skb;
1584 
1585 	dev_kfree_skb_any(skb);
1586 	return NULL;
1587 }
1588 
1589 /**
1590  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1591  *
1592  * Return: true/false
1593  */
1594 static inline bool
1595 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1596 			 uint8_t **where)
1597 {
1598 	qdf_assert(0);
1599 	return false;
1600 }
1601 
1602 /**
1603  * __qdf_nbuf_reset_ctxt() - mem zero control block
1604  * @nbuf: buffer
1605  *
1606  * Return: none
1607  */
1608 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1609 {
1610 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1611 }
1612 
1613 /**
1614  * __qdf_nbuf_network_header() - get network header
1615  * @buf: buffer
1616  *
1617  * Return: network header pointer
1618  */
1619 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1620 {
1621 	return skb_network_header(buf);
1622 }
1623 
1624 /**
1625  * __qdf_nbuf_transport_header() - get transport header
1626  * @buf: buffer
1627  *
1628  * Return: transport header pointer
1629  */
1630 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1631 {
1632 	return skb_transport_header(buf);
1633 }
1634 
1635 /**
1636  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1637  *  passed as part of network buffer by network stack
1638  * @skb: sk buff
1639  *
1640  * Return: TCP MSS size
1641  *
1642  */
1643 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1644 {
1645 	return skb_shinfo(skb)->gso_size;
1646 }
1647 
1648 /**
1649  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1650  * @nbuf: sk buff
1651  *
1652  * Return: none
1653  */
1654 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1655 
1656 /*
1657  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1658  * @nbuf: sk buff
1659  *
1660  * Return: void ptr
1661  */
1662 static inline void *
1663 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1664 {
1665 	return (void *)nbuf->cb;
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1670  * @skb: sk buff
1671  *
1672  * Return: head size
1673  */
1674 static inline size_t
1675 __qdf_nbuf_headlen(struct sk_buff *skb)
1676 {
1677 	return skb_headlen(skb);
1678 }
1679 
1680 /**
1681  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1682  * @skb: sk buff
1683  *
1684  * Return: number of fragments
1685  */
1686 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1687 {
1688 	return skb_shinfo(skb)->nr_frags;
1689 }
1690 
1691 /**
1692  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1693  * @buf: sk buff
1694  *
1695  * Return: true/false
1696  */
1697 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1698 {
1699 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1700 }
1701 
1702 /**
1703  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1704  * @buf: sk buff
1705  *
1706  * Return: true/false
1707  */
1708 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1709 {
1710 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1711 }
1712 
1713 /**
1714  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1715  * @skb: sk buff
1716  *
1717  * Return: size of l2+l3+l4 header length
1718  */
1719 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1720 {
1721 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1722 }
1723 
1724 /**
1725  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1726  * @buf: sk buff
1727  *
1728  * Return:  true/false
1729  */
1730 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1731 {
1732 	if (skb_is_nonlinear(skb))
1733 		return true;
1734 	else
1735 		return false;
1736 }
1737 
1738 /**
1739  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1740  * @buf: sk buff
1741  *
1742  * Return: TCP sequence number
1743  */
1744 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1745 {
1746 	return ntohl(tcp_hdr(skb)->seq);
1747 }
1748 
1749 /**
1750  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1751  *@buf: sk buff
1752  *
1753  * Return: data pointer to typecast into your priv structure
1754  */
1755 static inline uint8_t *
1756 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1757 {
1758 	return &skb->cb[8];
1759 }
1760 
1761 /**
1762  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1763  * @buf: Pointer to nbuf
1764  *
1765  * Return: None
1766  */
1767 static inline void
1768 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1769 {
1770 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1771 }
1772 
1773 /**
1774  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
1775  *
1776  * @buf: sk buff
1777  * @queue_id: Queue id
1778  *
1779  * Return: void
1780  */
1781 static inline void
1782 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
1783 {
1784 	skb_record_rx_queue(skb, queue_id);
1785 }
1786 
1787 /**
1788  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
1789  *
1790  * @buf: sk buff
1791  *
1792  * Return: Queue mapping
1793  */
1794 static inline uint16_t
1795 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
1796 {
1797 	return skb->queue_mapping;
1798 }
1799 
1800 /**
1801  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
1802  *
1803  * @buf: sk buff
1804  *
1805  * Return: void
1806  */
1807 static inline void
1808 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
1809 {
1810 	__net_timestamp(skb);
1811 }
1812 
1813 /**
1814  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
1815  *
1816  * @buf: sk buff
1817  *
1818  * Return: time difference in ms
1819  */
1820 static inline uint64_t
1821 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
1822 {
1823 	return ktime_to_ms(net_timedelta(skb->tstamp));
1824 }
1825 
1826 /**
1827  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
1828  *
1829  * @buf: sk buff
1830  *
1831  * Return: time difference in micro seconds
1832  */
1833 static inline uint64_t
1834 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
1835 {
1836 	return ktime_to_us(net_timedelta(skb->tstamp));
1837 }
1838 
1839 /**
1840  * __qdf_nbuf_orphan() - orphan a nbuf
1841  * @skb: sk buff
1842  *
1843  * If a buffer currently has an owner then we call the
1844  * owner's destructor function
1845  *
1846  * Return: void
1847  */
1848 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
1849 {
1850 	return skb_orphan(skb);
1851 }
1852 #ifdef CONFIG_WIN
1853 #include <i_qdf_nbuf_w.h>
1854 #else
1855 #include <i_qdf_nbuf_m.h>
1856 #endif
1857 #endif /*_I_QDF_NET_BUF_H */
1858