xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <asm/cacheflush.h>
32 #include <qdf_types.h>
33 #include <qdf_net_types.h>
34 #include <qdf_status.h>
35 #include <qdf_util.h>
36 #include <qdf_mem.h>
37 #include <linux/tcp.h>
38 #include <qdf_util.h>
39 
40 /*
41  * Use socket buffer as the underlying implementation as skbuf .
42  * Linux use sk_buff to represent both packet and data,
43  * so we use sk_buffer to represent both skbuf .
44  */
45 typedef struct sk_buff *__qdf_nbuf_t;
46 
47 /**
48  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
49  *
50  * This is used for skb queue management via linux skb buff head APIs
51  */
52 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
53 
54 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
55 
56 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
57  * max tx fragments added by the driver
58  * The driver will always add one tx fragment (the tx descriptor)
59  */
60 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
61 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
62 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
63 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
64 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
65 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
67 
68 
69 /* mark the first packet after wow wakeup */
70 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
71 
72 /*
73  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
74  */
75 typedef union {
76 	uint64_t       u64;
77 	qdf_dma_addr_t dma_addr;
78 } qdf_paddr_t;
79 
80 /**
81  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
82  *                    - data passed between layers of the driver.
83  *
84  * Notes:
85  *   1. Hard limited to 48 bytes. Please count your bytes
86  *   2. The size of this structure has to be easily calculatable and
87  *      consistently so: do not use any conditional compile flags
88  *   3. Split into a common part followed by a tx/rx overlay
89  *   4. There is only one extra frag, which represents the HTC/HTT header
90  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
91  *      for the priv_cb_w since it must be at same offset for both
92  *      TX and RX union
93  *
94  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
95  *
96  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
97  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
98  * @rx.dev.priv_cb_w.reserved1: reserved
99  * @rx.dev.priv_cb_w.reserved2: reserved
100  *
101  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
102  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
103  * @rx.dev.priv_cb_m.lro_ctx: LRO context
104  * @rx.dev.priv_cb_m.map_index:
105  * @rx.dev.priv_cb_m.peer_local_id: peer_local_id for RX pkt
106  *
107  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
108  * @rx.peer_cached_buf_frm: peer cached buffer
109  * @rx.tcp_proto: L4 protocol is TCP
110  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
111  * @rx.ipv6_proto: L3 protocol is IPV6
112  * @rx.ip_offset: offset to IP header
113  * @rx.tcp_offset: offset to TCP header
114  * @rx_ctx_id: Rx context id
115  * @flush_ind: flush indication
116  * @num_elements_in_list: number of elements in the nbuf list
117  *
118  * @rx.tcp_udp_chksum: L4 payload checksum
119  * @rx.tcp_wim: TCP window size
120  *
121  * @rx.flow_id: 32bit flow id
122  *
123  * @rx.flag_chfrag_start: first MSDU in an AMSDU
124  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
125  * @rx.flag_chfrag_end: last MSDU in an AMSDU
126  * @rx.packet_buff_pool: indicate packet from pre-allocated pool for Rx ring
127  * @rx.rsrvd: reserved
128  *
129  * @rx.trace: combined structure for DP and protocol trace
130  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
131  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
132  * @rx.trace.dp_trace: flag (Datapath trace)
133  * @rx.trace.packet_track: RX_DATA packet
134  * @rx.trace.rsrvd: enable packet logging
135  *
136  * @rx.ftype: mcast2ucast, TSO, SG, MESH
137  * @rx.reserved: reserved
138  *
139  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
140  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
141  *
142  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
143  *                 + (1) CE classification enablement bit
144  *                 + (2) packet type (802.3 or Ethernet type II)
145  *                 + (3) packet offset (usually length of HTC/HTT descr)
146  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
147  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
148  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
149  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
150  * @tx.dev.priv_cb_m.reserved: reserved
151  *
152  * @tx.ftype: mcast2ucast, TSO, SG, MESH
153  * @tx.vdev_id: vdev (for protocol trace)
154  * @tx.len: length of efrag pointed by the above pointers
155  *
156  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
157  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
158  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
159  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
160  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
161  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
162  * @tx.flags.bits.flag_ext_header: extended flags
163  * @tx.flags.bits.reserved: reserved
164  * @tx.trace: combined structure for DP and protocol trace
165  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
166  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
167  * @tx.trace.is_packet_priv:
168  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
169  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
170  *                          + (MGMT_ACTION)] - 4 bits
171  * @tx.trace.dp_trace: flag (Datapath trace)
172  * @tx.trace.is_bcast: flag (Broadcast packet)
173  * @tx.trace.is_mcast: flag (Multicast packet)
174  * @tx.trace.packet_type: flag (Packet type)
175  * @tx.trace.htt2_frm: flag (high-latency path only)
176  * @tx.trace.print: enable packet logging
177  *
178  * @tx.vaddr: virtual address of ~
179  * @tx.paddr: physical/DMA address of ~
180  */
181 struct qdf_nbuf_cb {
182 	/* common */
183 	qdf_paddr_t paddr; /* of skb->data */
184 	/* valid only in one direction */
185 	union {
186 		/* Note: MAX: 40 bytes */
187 		struct {
188 			union {
189 				struct {
190 					void *ext_cb_ptr;
191 					void *fctx;
192 					uint32_t reserved1;
193 					uint32_t reserved2;
194 				} priv_cb_w;
195 				struct {
196 					uint32_t tcp_seq_num;
197 					uint32_t tcp_ack_num;
198 					unsigned char *lro_ctx;
199 					uint32_t map_index;
200 					uint32_t peer_local_id;
201 				} priv_cb_m;
202 			} dev;
203 			uint32_t lro_eligible:1,
204 				peer_cached_buf_frm:1,
205 				tcp_proto:1,
206 				tcp_pure_ack:1,
207 				ipv6_proto:1,
208 				ip_offset:7,
209 				tcp_offset:7,
210 				rx_ctx_id:4,
211 				flush_ind:1,
212 				num_elements_in_list:8;
213 			uint32_t tcp_udp_chksum:16,
214 				 tcp_win:16;
215 			uint32_t flow_id;
216 			uint8_t flag_chfrag_start:1,
217 				flag_chfrag_cont:1,
218 				flag_chfrag_end:1,
219 				packet_buff_pool:1,
220 				rsrvd:4;
221 			union {
222 				uint8_t packet_state;
223 				uint8_t dp_trace:1,
224 					packet_track:4,
225 					rsrvd:3;
226 			} trace;
227 			uint8_t ftype;
228 			uint8_t reserved;
229 		} rx;
230 
231 		/* Note: MAX: 40 bytes */
232 		struct {
233 			union {
234 				struct {
235 					void *ext_cb_ptr;
236 					void *fctx;
237 				} priv_cb_w;
238 				struct {
239 					uint32_t data_attr;
240 					struct {
241 						uint32_t owned:1,
242 							priv:31;
243 					} ipa;
244 					uint16_t desc_id;
245 					uint16_t mgmt_desc_id;
246 					uint32_t reserved;
247 				} priv_cb_m;
248 			} dev;
249 			uint8_t ftype;
250 			uint8_t vdev_id;
251 			uint16_t len;
252 			union {
253 				struct {
254 					uint8_t flag_efrag:1,
255 						flag_nbuf:1,
256 						num:1,
257 						flag_chfrag_start:1,
258 						flag_chfrag_cont:1,
259 						flag_chfrag_end:1,
260 						flag_ext_header:1,
261 						flag_notify_comp:1;
262 				} bits;
263 				uint8_t u8;
264 			} flags;
265 			struct {
266 				uint8_t packet_state:7,
267 					is_packet_priv:1;
268 				uint8_t packet_track:4,
269 					proto_type:4;
270 				uint8_t dp_trace:1,
271 					is_bcast:1,
272 					is_mcast:1,
273 					packet_type:3,
274 					/* used only for hl*/
275 					htt2_frm:1,
276 					print:1;
277 			} trace;
278 			unsigned char *vaddr;
279 			qdf_paddr_t paddr;
280 		} tx;
281 	} u;
282 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
283 
284 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
285 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
286 
287 /**
288  *  access macros to qdf_nbuf_cb
289  *  Note: These macros can be used as L-values as well as R-values.
290  *        When used as R-values, they effectively function as "get" macros
291  *        When used as L_values, they effectively function as "set" macros
292  */
293 
294 #define QDF_NBUF_CB_PADDR(skb) \
295 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
296 
297 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
298 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
299 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
300 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
301 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
302 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
303 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
304 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
305 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
306 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
307 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
308 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
309 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
310 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
311 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
312 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
313 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \
314 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flush_ind)
315 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
316 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
317 
318 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
319 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
320 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
321 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
322 
323 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
324 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
325 
326 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
327 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
328 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
329 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
330 
331 #define QDF_NBUF_CB_RX_FTYPE(skb) \
332 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
333 
334 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
335 	(((struct qdf_nbuf_cb *) \
336 	((skb)->cb))->u.rx.flag_chfrag_start)
337 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
338 	(((struct qdf_nbuf_cb *) \
339 	((skb)->cb))->u.rx.flag_chfrag_cont)
340 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
341 		(((struct qdf_nbuf_cb *) \
342 		((skb)->cb))->u.rx.flag_chfrag_end)
343 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
344 		(((struct qdf_nbuf_cb *) \
345 		((skb)->cb))->u.rx.packet_buff_pool)
346 
347 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
348 	qdf_nbuf_set_state(skb, PACKET_STATE)
349 
350 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
351 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
352 
353 #define QDF_NBUF_CB_TX_FTYPE(skb) \
354 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
355 
356 
357 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
358 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
359 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
360 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
361 
362 /* Tx Flags Accessor Macros*/
363 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
364 	(((struct qdf_nbuf_cb *) \
365 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
366 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
367 	(((struct qdf_nbuf_cb *) \
368 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
369 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
370 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
371 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
372 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
373 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
374 	(((struct qdf_nbuf_cb *) \
375 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
376 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
377 	(((struct qdf_nbuf_cb *) \
378 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
379 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
380 		(((struct qdf_nbuf_cb *) \
381 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
382 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
383 		(((struct qdf_nbuf_cb *) \
384 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
385 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
386 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
387 /* End of Tx Flags Accessor Macros */
388 
389 /* Tx trace accessor macros */
390 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
391 	(((struct qdf_nbuf_cb *) \
392 		((skb)->cb))->u.tx.trace.packet_state)
393 
394 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
395 	(((struct qdf_nbuf_cb *) \
396 		((skb)->cb))->u.tx.trace.is_packet_priv)
397 
398 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
399 	(((struct qdf_nbuf_cb *) \
400 		((skb)->cb))->u.tx.trace.packet_track)
401 
402 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
403 		(((struct qdf_nbuf_cb *) \
404 			((skb)->cb))->u.rx.trace.packet_track)
405 
406 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
407 	(((struct qdf_nbuf_cb *) \
408 		((skb)->cb))->u.tx.trace.proto_type)
409 
410 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
411 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
412 
413 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
414 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
415 
416 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
417 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
418 
419 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
420 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
421 
422 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
423 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
424 
425 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
426 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
427 
428 #define QDF_NBUF_CB_SET_BCAST(skb) \
429 	(((struct qdf_nbuf_cb *) \
430 		((skb)->cb))->u.tx.trace.is_bcast = true)
431 
432 #define QDF_NBUF_CB_SET_MCAST(skb) \
433 	(((struct qdf_nbuf_cb *) \
434 		((skb)->cb))->u.tx.trace.is_mcast = true)
435 /* End of Tx trace accessor macros */
436 
437 
438 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
439 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
440 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
441 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
442 
443 /* assume the OS provides a single fragment */
444 #define __qdf_nbuf_get_num_frags(skb)		   \
445 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
446 
447 #define __qdf_nbuf_reset_num_frags(skb) \
448 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
449 
450 /**
451  *   end of nbuf->cb access macros
452  */
453 
454 typedef void (*qdf_nbuf_trace_update_t)(char *);
455 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
456 
457 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
458 
459 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
460 	(QDF_NBUF_CB_PADDR(skb) = paddr)
461 
462 #define __qdf_nbuf_frag_push_head(					\
463 	skb, frag_len, frag_vaddr, frag_paddr)				\
464 	do {					\
465 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
466 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
467 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
468 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
469 	} while (0)
470 
471 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
472 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
473 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
474 
475 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
476 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
477 
478 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
479 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
480 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
481 	 /* assume that the OS only provides a single fragment */	\
482 	 QDF_NBUF_CB_PADDR(skb))
483 
484 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
485 
486 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
487 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
488 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
489 
490 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
491 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
492 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
493 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
494 
495 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
496 	do {								\
497 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
498 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
499 		if (frag_num)						\
500 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
501 							      is_wstrm; \
502 		else					\
503 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
504 							      is_wstrm; \
505 	} while (0)
506 
507 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
508 	do { \
509 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
510 	} while (0)
511 
512 #define __qdf_nbuf_get_vdev_ctx(skb) \
513 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
514 
515 #define __qdf_nbuf_set_tx_ftype(skb, type) \
516 	do { \
517 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
518 	} while (0)
519 
520 #define __qdf_nbuf_get_tx_ftype(skb) \
521 		 QDF_NBUF_CB_TX_FTYPE((skb))
522 
523 
524 #define __qdf_nbuf_set_rx_ftype(skb, type) \
525 	do { \
526 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
527 	} while (0)
528 
529 #define __qdf_nbuf_get_rx_ftype(skb) \
530 		 QDF_NBUF_CB_RX_FTYPE((skb))
531 
532 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
533 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
534 
535 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
536 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
537 
538 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
539 	do { \
540 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
541 	} while (0)
542 
543 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
544 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
545 
546 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
547 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
548 
549 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
550 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
551 
552 
553 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
554 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
555 
556 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
557 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
558 
559 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
560 	do { \
561 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
562 	} while (0)
563 
564 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
565 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
566 
567 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
568 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
569 
570 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
571 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
572 
573 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
574 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
575 
576 #define __qdf_nbuf_trace_get_proto_type(skb) \
577 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
578 
579 #define __qdf_nbuf_data_attr_get(skb)		\
580 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
581 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
582 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
583 
584 /**
585  * __qdf_nbuf_num_frags_init() - init extra frags
586  * @skb: sk buffer
587  *
588  * Return: none
589  */
590 static inline
591 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
592 {
593 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
594 }
595 
596 /*
597  * prototypes. Implemented in qdf_nbuf.c
598  */
599 
600 /**
601  * __qdf_nbuf_alloc() - Allocate nbuf
602  * @osdev: Device handle
603  * @size: Netbuf requested size
604  * @reserve: headroom to start with
605  * @align: Align
606  * @prio: Priority
607  * @func: Function name of the call site
608  * @line: line number of the call site
609  *
610  * This allocates an nbuf aligns if needed and reserves some space in the front,
611  * since the reserve is done after alignment the reserve value if being
612  * unaligned will result in an unaligned address.
613  *
614  * Return: nbuf or %NULL if no memory
615  */
616 __qdf_nbuf_t
617 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
618 		 int prio, const char *func, uint32_t line);
619 
620 void __qdf_nbuf_free(struct sk_buff *skb);
621 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
622 			struct sk_buff *skb, qdf_dma_dir_t dir);
623 void __qdf_nbuf_unmap(__qdf_device_t osdev,
624 			struct sk_buff *skb, qdf_dma_dir_t dir);
625 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
626 				 struct sk_buff *skb, qdf_dma_dir_t dir);
627 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
628 			struct sk_buff *skb, qdf_dma_dir_t dir);
629 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
630 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
631 
632 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
633 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
634 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
635 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
636 	qdf_dma_dir_t dir, int nbytes);
637 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
638 	qdf_dma_dir_t dir, int nbytes);
639 
640 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
641 	qdf_dma_dir_t dir);
642 
643 QDF_STATUS __qdf_nbuf_map_nbytes_single(
644 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
645 void __qdf_nbuf_unmap_nbytes_single(
646 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
647 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
648 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
649 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
650 QDF_STATUS __qdf_nbuf_frag_map(
651 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
652 	int offset, qdf_dma_dir_t dir, int cur_frag);
653 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
654 
655 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
656 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
657 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
658 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
659 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
660 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
661 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
662 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
663 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
664 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
665 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
666 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
667 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
668 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
669 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
670 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
671 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
672 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
673 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
674 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
675 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
676 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
677 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
678 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
679 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
680 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
681 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
682 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
683 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
684 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
685 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
686 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
687 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
688 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
689 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
690 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
691 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
692 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
693 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
694 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
695 
696 #ifdef QDF_NBUF_GLOBAL_COUNT
697 int __qdf_nbuf_count_get(void);
698 void __qdf_nbuf_count_inc(struct sk_buff *skb);
699 void __qdf_nbuf_count_dec(struct sk_buff *skb);
700 void __qdf_nbuf_mod_init(void);
701 void __qdf_nbuf_mod_exit(void);
702 
703 #else
704 
705 static inline int __qdf_nbuf_count_get(void)
706 {
707 	return 0;
708 }
709 
710 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
711 {
712 	return;
713 }
714 
715 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
716 {
717 	return;
718 }
719 
720 static inline void __qdf_nbuf_mod_init(void)
721 {
722 	return;
723 }
724 
725 static inline void __qdf_nbuf_mod_exit(void)
726 {
727 	return;
728 }
729 #endif
730 
731 /**
732  * __qdf_to_status() - OS to QDF status conversion
733  * @error : OS error
734  *
735  * Return: QDF status
736  */
737 static inline QDF_STATUS __qdf_to_status(signed int error)
738 {
739 	switch (error) {
740 	case 0:
741 		return QDF_STATUS_SUCCESS;
742 	case ENOMEM:
743 	case -ENOMEM:
744 		return QDF_STATUS_E_NOMEM;
745 	default:
746 		return QDF_STATUS_E_NOSUPPORT;
747 	}
748 }
749 
750 /**
751  * __qdf_nbuf_len() - return the amount of valid data in the skb
752  * @skb: Pointer to network buffer
753  *
754  * This API returns the amount of valid data in the skb, If there are frags
755  * then it returns total length.
756  *
757  * Return: network buffer length
758  */
759 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
760 {
761 	int i, extra_frag_len = 0;
762 
763 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
764 	if (i > 0)
765 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
766 
767 	return extra_frag_len + skb->len;
768 }
769 
770 /**
771  * __qdf_nbuf_cat() - link two nbufs
772  * @dst: Buffer to piggyback into
773  * @src: Buffer to put
774  *
775  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
776  * It is callers responsibility to free the src skb.
777  *
778  * Return: QDF_STATUS (status of the call) if failed the src skb
779  *         is released
780  */
781 static inline QDF_STATUS
782 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
783 {
784 	QDF_STATUS error = 0;
785 
786 	qdf_assert(dst && src);
787 
788 	/*
789 	 * Since pskb_expand_head unconditionally reallocates the skb->head
790 	 * buffer, first check whether the current buffer is already large
791 	 * enough.
792 	 */
793 	if (skb_tailroom(dst) < src->len) {
794 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
795 		if (error)
796 			return __qdf_to_status(error);
797 	}
798 
799 	memcpy(skb_tail_pointer(dst), src->data, src->len);
800 	skb_put(dst, src->len);
801 	return __qdf_to_status(error);
802 }
803 
804 /*
805  * nbuf manipulation routines
806  */
807 /**
808  * __qdf_nbuf_headroom() - return the amount of tail space available
809  * @buf: Pointer to network buffer
810  *
811  * Return: amount of tail room
812  */
813 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
814 {
815 	return skb_headroom(skb);
816 }
817 
818 /**
819  * __qdf_nbuf_tailroom() - return the amount of tail space available
820  * @buf: Pointer to network buffer
821  *
822  * Return: amount of tail room
823  */
824 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
825 {
826 	return skb_tailroom(skb);
827 }
828 
829 /**
830  * __qdf_nbuf_put_tail() - Puts data in the end
831  * @skb: Pointer to network buffer
832  * @size: size to be pushed
833  *
834  * Return: data pointer of this buf where new data has to be
835  *         put, or NULL if there is not enough room in this buf.
836  */
837 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
838 {
839 	if (skb_tailroom(skb) < size) {
840 		if (unlikely(pskb_expand_head(skb, 0,
841 			size - skb_tailroom(skb), GFP_ATOMIC))) {
842 			dev_kfree_skb_any(skb);
843 			return NULL;
844 		}
845 	}
846 	return skb_put(skb, size);
847 }
848 
849 /**
850  * __qdf_nbuf_trim_tail() - trim data out from the end
851  * @skb: Pointer to network buffer
852  * @size: size to be popped
853  *
854  * Return: none
855  */
856 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
857 {
858 	return skb_trim(skb, skb->len - size);
859 }
860 
861 
862 /*
863  * prototypes. Implemented in qdf_nbuf.c
864  */
865 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
866 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
867 				qdf_nbuf_rx_cksum_t *cksum);
868 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
869 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
870 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
871 void __qdf_nbuf_ref(struct sk_buff *skb);
872 int __qdf_nbuf_shared(struct sk_buff *skb);
873 
874 /*
875  * qdf_nbuf_pool_delete() implementation - do nothing in linux
876  */
877 #define __qdf_nbuf_pool_delete(osdev)
878 
879 /**
880  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
881  * @skb: Pointer to network buffer
882  *
883  * if GFP_ATOMIC is overkill then we can check whether its
884  * called from interrupt context and then do it or else in
885  * normal case use GFP_KERNEL
886  *
887  * example     use "in_irq() || irqs_disabled()"
888  *
889  * Return: cloned skb
890  */
891 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
892 {
893 	struct sk_buff *skb_new = NULL;
894 
895 	skb_new = skb_clone(skb, GFP_ATOMIC);
896 	if (skb_new)
897 		__qdf_nbuf_count_inc(skb_new);
898 
899 	return skb_new;
900 }
901 
902 /**
903  * __qdf_nbuf_copy() - returns a private copy of the skb
904  * @skb: Pointer to network buffer
905  *
906  * This API returns a private copy of the skb, the skb returned is completely
907  *  modifiable by callers
908  *
909  * Return: skb or NULL
910  */
911 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
912 {
913 	struct sk_buff *skb_new = NULL;
914 
915 	skb_new = skb_copy(skb, GFP_ATOMIC);
916 	if (skb_new)
917 		__qdf_nbuf_count_inc(skb_new);
918 
919 	return skb_new;
920 }
921 
922 #define __qdf_nbuf_reserve      skb_reserve
923 
924 /**
925  * __qdf_nbuf_reset() - reset the buffer data and pointer
926  * @buf: Network buf instance
927  * @reserve: reserve
928  * @align: align
929  *
930  * Return: none
931  */
932 static inline void
933 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
934 {
935 	int offset;
936 
937 	skb_push(skb, skb_headroom(skb));
938 	skb_put(skb, skb_tailroom(skb));
939 	memset(skb->data, 0x0, skb->len);
940 	skb_trim(skb, 0);
941 	skb_reserve(skb, NET_SKB_PAD);
942 	memset(skb->cb, 0x0, sizeof(skb->cb));
943 
944 	/*
945 	 * The default is for netbuf fragments to be interpreted
946 	 * as wordstreams rather than bytestreams.
947 	 */
948 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
949 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
950 
951 	/*
952 	 * Align & make sure that the tail & data are adjusted properly
953 	 */
954 
955 	if (align) {
956 		offset = ((unsigned long)skb->data) % align;
957 		if (offset)
958 			skb_reserve(skb, align - offset);
959 	}
960 
961 	skb_reserve(skb, reserve);
962 }
963 
964 /**
965  * __qdf_nbuf_head() - return the pointer the skb's head pointer
966  * @skb: Pointer to network buffer
967  *
968  * Return: Pointer to head buffer
969  */
970 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
971 {
972 	return skb->head;
973 }
974 
975 /**
976  * __qdf_nbuf_data() - return the pointer to data header in the skb
977  * @skb: Pointer to network buffer
978  *
979  * Return: Pointer to skb data
980  */
981 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
982 {
983 	return skb->data;
984 }
985 
986 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
987 {
988 	return (uint8_t *)&skb->data;
989 }
990 
991 /**
992  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
993  * @skb: Pointer to network buffer
994  *
995  * Return: skb protocol
996  */
997 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
998 {
999 	return skb->protocol;
1000 }
1001 
1002 /**
1003  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1004  * @skb: Pointer to network buffer
1005  *
1006  * Return: skb ip_summed
1007  */
1008 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1009 {
1010 	return skb->ip_summed;
1011 }
1012 
1013 /**
1014  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1015  * @skb: Pointer to network buffer
1016  * @ip_summed: ip checksum
1017  *
1018  * Return: none
1019  */
1020 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1021 		 uint8_t ip_summed)
1022 {
1023 	skb->ip_summed = ip_summed;
1024 }
1025 
1026 /**
1027  * __qdf_nbuf_get_priority() - return the priority value of the skb
1028  * @skb: Pointer to network buffer
1029  *
1030  * Return: skb priority
1031  */
1032 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1033 {
1034 	return skb->priority;
1035 }
1036 
1037 /**
1038  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1039  * @skb: Pointer to network buffer
1040  * @p: priority
1041  *
1042  * Return: none
1043  */
1044 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1045 {
1046 	skb->priority = p;
1047 }
1048 
1049 /**
1050  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1051  * @skb: Current skb
1052  * @next_skb: Next skb
1053  *
1054  * Return: void
1055  */
1056 static inline void
1057 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1058 {
1059 	skb->next = skb_next;
1060 }
1061 
1062 /**
1063  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1064  * @skb: Current skb
1065  *
1066  * Return: the next skb pointed to by the current skb
1067  */
1068 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1069 {
1070 	return skb->next;
1071 }
1072 
1073 /**
1074  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1075  * @skb: Current skb
1076  * @next_skb: Next skb
1077  *
1078  * This fn is used to link up extensions to the head skb. Does not handle
1079  * linking to the head
1080  *
1081  * Return: none
1082  */
1083 static inline void
1084 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1085 {
1086 	skb->next = skb_next;
1087 }
1088 
1089 /**
1090  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1091  * @skb: Current skb
1092  *
1093  * Return: the next skb pointed to by the current skb
1094  */
1095 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1096 {
1097 	return skb->next;
1098 }
1099 
1100 /**
1101  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1102  * @skb_head: head_buf nbuf holding head segment (single)
1103  * @ext_list: nbuf list holding linked extensions to the head
1104  * @ext_len: Total length of all buffers in the extension list
1105  *
1106  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1107  * to the nbuf holding the head segment (seg0)
1108  *
1109  * Return: none
1110  */
1111 static inline void
1112 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1113 			struct sk_buff *ext_list, size_t ext_len)
1114 {
1115 	skb_shinfo(skb_head)->frag_list = ext_list;
1116 	skb_head->data_len = ext_len;
1117 	skb_head->len += skb_head->data_len;
1118 }
1119 
1120 /**
1121  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1122  * @head_buf: Network buf holding head segment (single)
1123  *
1124  * This ext_list is populated when we have Jumbo packet, for example in case of
1125  * monitor mode amsdu packet reception, and are stiched using frags_list.
1126  *
1127  * Return: Network buf list holding linked extensions from head buf.
1128  */
1129 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1130 {
1131 	return (skb_shinfo(head_buf)->frag_list);
1132 }
1133 
1134 /**
1135  * __qdf_nbuf_get_age() - return the checksum value of the skb
1136  * @skb: Pointer to network buffer
1137  *
1138  * Return: checksum value
1139  */
1140 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1141 {
1142 	return skb->csum;
1143 }
1144 
1145 /**
1146  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1147  * @skb: Pointer to network buffer
1148  * @v: Value
1149  *
1150  * Return: none
1151  */
1152 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1153 {
1154 	skb->csum = v;
1155 }
1156 
1157 /**
1158  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1159  * @skb: Pointer to network buffer
1160  * @adj: Adjustment value
1161  *
1162  * Return: none
1163  */
1164 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1165 {
1166 	skb->csum -= adj;
1167 }
1168 
1169 /**
1170  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1171  * @skb: Pointer to network buffer
1172  * @offset: Offset value
1173  * @len: Length
1174  * @to: Destination pointer
1175  *
1176  * Return: length of the copy bits for skb
1177  */
1178 static inline int32_t
1179 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1180 {
1181 	return skb_copy_bits(skb, offset, to, len);
1182 }
1183 
1184 /**
1185  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1186  * @skb: Pointer to network buffer
1187  * @len:  Packet length
1188  *
1189  * Return: none
1190  */
1191 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1192 {
1193 	if (skb->len > len) {
1194 		skb_trim(skb, len);
1195 	} else {
1196 		if (skb_tailroom(skb) < len - skb->len) {
1197 			if (unlikely(pskb_expand_head(skb, 0,
1198 				len - skb->len - skb_tailroom(skb),
1199 				GFP_ATOMIC))) {
1200 				dev_kfree_skb_any(skb);
1201 				qdf_assert(0);
1202 			}
1203 		}
1204 		skb_put(skb, (len - skb->len));
1205 	}
1206 }
1207 
1208 /**
1209  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1210  * @skb: Pointer to network buffer
1211  * @protocol: Protocol type
1212  *
1213  * Return: none
1214  */
1215 static inline void
1216 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1217 {
1218 	skb->protocol = protocol;
1219 }
1220 
1221 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1222 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1223 
1224 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1225 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1226 
1227 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1228 				      uint32_t *lo, uint32_t *hi);
1229 
1230 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1231 	struct qdf_tso_info_t *tso_info);
1232 
1233 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1234 			  struct qdf_tso_seg_elem_t *tso_seg,
1235 			  bool is_last_seg);
1236 
1237 #ifdef FEATURE_TSO
1238 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1239 
1240 #else
1241 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1242 {
1243 	return 0;
1244 }
1245 
1246 #endif /* FEATURE_TSO */
1247 
1248 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1249 {
1250 	if (skb_is_gso(skb) &&
1251 		(skb_is_gso_v6(skb) ||
1252 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1253 		return true;
1254 	else
1255 		return false;
1256 }
1257 
1258 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1259 
1260 int __qdf_nbuf_get_users(struct sk_buff *skb);
1261 
1262 /**
1263  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1264  *			      and get hw_classify by peeking
1265  *			      into packet
1266  * @nbuf:		Network buffer (skb on Linux)
1267  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1268  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1269  *			needs to be set in case of CE classification support
1270  *			Is set by this macro.
1271  * @hw_classify:	This is a flag which is set to indicate
1272  *			CE classification is enabled.
1273  *			Do not set this bit for VLAN packets
1274  *			OR for mcast / bcast frames.
1275  *
1276  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1277  * whether to enable tx_classify bit in CE.
1278  *
1279  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1280  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1281  * it is the length and a 802.3 frame else it is Ethernet Type II
1282  * (RFC 894).
1283  * Bit 4 in pkt_subtype is the tx_classify bit
1284  *
1285  * Return:	void
1286  */
1287 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1288 				pkt_subtype, hw_classify)	\
1289 do {								\
1290 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1291 	uint16_t ether_type = ntohs(eh->h_proto);		\
1292 	bool is_mc_bc;						\
1293 								\
1294 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1295 		   is_multicast_ether_addr((uint8_t *)eh);	\
1296 								\
1297 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1298 		hw_classify = 1;				\
1299 		pkt_subtype = 0x01 <<				\
1300 			HTT_TX_CLASSIFY_BIT_S;			\
1301 	}							\
1302 								\
1303 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1304 		pkt_type = htt_pkt_type_ethernet;		\
1305 								\
1306 } while (0)
1307 
1308 /**
1309  * nbuf private buffer routines
1310  */
1311 
1312 /**
1313  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1314  * @skb: Pointer to network buffer
1315  * @addr: Pointer to store header's addr
1316  * @m_len: network buffer length
1317  *
1318  * Return: none
1319  */
1320 static inline void
1321 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1322 {
1323 	*addr = skb->data;
1324 	*len = skb->len;
1325 }
1326 
1327 /**
1328  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1329  * @head: Head pointer
1330  * @tail: Tail pointer
1331  * @qlen: Queue length
1332  */
1333 typedef struct __qdf_nbuf_qhead {
1334 	struct sk_buff *head;
1335 	struct sk_buff *tail;
1336 	unsigned int qlen;
1337 } __qdf_nbuf_queue_t;
1338 
1339 /******************Functions *************/
1340 
1341 /**
1342  * __qdf_nbuf_queue_init() - initiallize the queue head
1343  * @qhead: Queue head
1344  *
1345  * Return: QDF status
1346  */
1347 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1348 {
1349 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1350 	return QDF_STATUS_SUCCESS;
1351 }
1352 
1353 /**
1354  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1355  * @qhead: Queue head
1356  * @skb: Pointer to network buffer
1357  *
1358  * This is a lockless version, driver must acquire locks if it
1359  * needs to synchronize
1360  *
1361  * Return: none
1362  */
1363 static inline void
1364 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1365 {
1366 	skb->next = NULL;       /*Nullify the next ptr */
1367 
1368 	if (!qhead->head)
1369 		qhead->head = skb;
1370 	else
1371 		qhead->tail->next = skb;
1372 
1373 	qhead->tail = skb;
1374 	qhead->qlen++;
1375 }
1376 
1377 /**
1378  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1379  * @dest: target netbuf queue
1380  * @src:  source netbuf queue
1381  *
1382  * Return: target netbuf queue
1383  */
1384 static inline __qdf_nbuf_queue_t *
1385 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1386 {
1387 	if (!dest)
1388 		return NULL;
1389 	else if (!src || !(src->head))
1390 		return dest;
1391 
1392 	if (!(dest->head))
1393 		dest->head = src->head;
1394 	else
1395 		dest->tail->next = src->head;
1396 
1397 	dest->tail = src->tail;
1398 	dest->qlen += src->qlen;
1399 	return dest;
1400 }
1401 
1402 /**
1403  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1404  * @qhead: Queue head
1405  * @skb: Pointer to network buffer
1406  *
1407  * This is a lockless version, driver must acquire locks if it needs to
1408  * synchronize
1409  *
1410  * Return: none
1411  */
1412 static inline void
1413 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1414 {
1415 	if (!qhead->head) {
1416 		/*Empty queue Tail pointer Must be updated */
1417 		qhead->tail = skb;
1418 	}
1419 	skb->next = qhead->head;
1420 	qhead->head = skb;
1421 	qhead->qlen++;
1422 }
1423 
1424 /**
1425  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1426  * @qhead: Queue head
1427  *
1428  * This is a lockless version. Driver should take care of the locks
1429  *
1430  * Return: skb or NULL
1431  */
1432 static inline
1433 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1434 {
1435 	__qdf_nbuf_t tmp = NULL;
1436 
1437 	if (qhead->head) {
1438 		qhead->qlen--;
1439 		tmp = qhead->head;
1440 		if (qhead->head == qhead->tail) {
1441 			qhead->head = NULL;
1442 			qhead->tail = NULL;
1443 		} else {
1444 			qhead->head = tmp->next;
1445 		}
1446 		tmp->next = NULL;
1447 	}
1448 	return tmp;
1449 }
1450 
1451 /**
1452  * __qdf_nbuf_queue_free() - free a queue
1453  * @qhead: head of queue
1454  *
1455  * Return: QDF status
1456  */
1457 static inline QDF_STATUS
1458 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1459 {
1460 	__qdf_nbuf_t  buf = NULL;
1461 
1462 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1463 		__qdf_nbuf_free(buf);
1464 	return QDF_STATUS_SUCCESS;
1465 }
1466 
1467 
1468 /**
1469  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1470  * @qhead: head of queue
1471  *
1472  * Return: NULL if the queue is empty
1473  */
1474 static inline struct sk_buff *
1475 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1476 {
1477 	return qhead->head;
1478 }
1479 
1480 /**
1481  * __qdf_nbuf_queue_len() - return the queue length
1482  * @qhead: Queue head
1483  *
1484  * Return: Queue length
1485  */
1486 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1487 {
1488 	return qhead->qlen;
1489 }
1490 
1491 /**
1492  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1493  * @skb: Pointer to network buffer
1494  *
1495  * This API returns the next skb from packet chain, remember the skb is
1496  * still in the queue
1497  *
1498  * Return: NULL if no packets are there
1499  */
1500 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1501 {
1502 	return skb->next;
1503 }
1504 
1505 /**
1506  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1507  * @qhead: Queue head
1508  *
1509  * Return: true if length is 0 else false
1510  */
1511 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1512 {
1513 	return qhead->qlen == 0;
1514 }
1515 
1516 /*
1517  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1518  * Because the queue head will most likely put in some structure,
1519  * we don't use pointer type as the definition.
1520  */
1521 
1522 /*
1523  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1524  * Because the queue head will most likely put in some structure,
1525  * we don't use pointer type as the definition.
1526  */
1527 
1528 static inline void
1529 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1530 {
1531 }
1532 
1533 /**
1534  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1535  *        expands the headroom
1536  *        in the data region. In case of failure the skb is released.
1537  * @skb: sk buff
1538  * @headroom: size of headroom
1539  *
1540  * Return: skb or NULL
1541  */
1542 static inline struct sk_buff *
1543 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1544 {
1545 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1546 		dev_kfree_skb_any(skb);
1547 		skb = NULL;
1548 	}
1549 	return skb;
1550 }
1551 
1552 /**
1553  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1554  *        exapnds the tailroom
1555  *        in data region. In case of failure it releases the skb.
1556  * @skb: sk buff
1557  * @tailroom: size of tailroom
1558  *
1559  * Return: skb or NULL
1560  */
1561 static inline struct sk_buff *
1562 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1563 {
1564 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1565 		return skb;
1566 	/**
1567 	 * unlikely path
1568 	 */
1569 	dev_kfree_skb_any(skb);
1570 	return NULL;
1571 }
1572 
1573 /**
1574  * __qdf_nbuf_linearize() - skb linearize
1575  * @skb: sk buff
1576  *
1577  * create a version of the specified nbuf whose contents
1578  * can be safely modified without affecting other
1579  * users.If the nbuf is non-linear then this function
1580  * linearize. if unable to linearize returns -ENOMEM on
1581  * success 0 is returned
1582  *
1583  * Return: 0 on Success, -ENOMEM on failure is returned.
1584  */
1585 static inline int
1586 __qdf_nbuf_linearize(struct sk_buff *skb)
1587 {
1588 	return skb_linearize(skb);
1589 }
1590 
1591 /**
1592  * __qdf_nbuf_unshare() - skb unshare
1593  * @skb: sk buff
1594  *
1595  * create a version of the specified nbuf whose contents
1596  * can be safely modified without affecting other
1597  * users.If the nbuf is a clone then this function
1598  * creates a new copy of the data. If the buffer is not
1599  * a clone the original buffer is returned.
1600  *
1601  * Return: skb or NULL
1602  */
1603 static inline struct sk_buff *
1604 __qdf_nbuf_unshare(struct sk_buff *skb)
1605 {
1606 	return skb_unshare(skb, GFP_ATOMIC);
1607 }
1608 
1609 /**
1610  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1611  *@buf: sk buff
1612  *
1613  * Return: true/false
1614  */
1615 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1616 {
1617 	return skb_cloned(skb);
1618 }
1619 
1620 /**
1621  * __qdf_nbuf_pool_init() - init pool
1622  * @net: net handle
1623  *
1624  * Return: QDF status
1625  */
1626 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1627 {
1628 	return QDF_STATUS_SUCCESS;
1629 }
1630 
1631 /*
1632  * adf_nbuf_pool_delete() implementation - do nothing in linux
1633  */
1634 #define __qdf_nbuf_pool_delete(osdev)
1635 
1636 /**
1637  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1638  *        release the skb.
1639  * @skb: sk buff
1640  * @headroom: size of headroom
1641  * @tailroom: size of tailroom
1642  *
1643  * Return: skb or NULL
1644  */
1645 static inline struct sk_buff *
1646 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1647 {
1648 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1649 		return skb;
1650 
1651 	dev_kfree_skb_any(skb);
1652 	return NULL;
1653 }
1654 
1655 /**
1656  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1657  *
1658  * Return: true/false
1659  */
1660 static inline bool
1661 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1662 			 uint8_t **where)
1663 {
1664 	qdf_assert(0);
1665 	return false;
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_reset_ctxt() - mem zero control block
1670  * @nbuf: buffer
1671  *
1672  * Return: none
1673  */
1674 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1675 {
1676 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1677 }
1678 
1679 /**
1680  * __qdf_nbuf_network_header() - get network header
1681  * @buf: buffer
1682  *
1683  * Return: network header pointer
1684  */
1685 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1686 {
1687 	return skb_network_header(buf);
1688 }
1689 
1690 /**
1691  * __qdf_nbuf_transport_header() - get transport header
1692  * @buf: buffer
1693  *
1694  * Return: transport header pointer
1695  */
1696 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1697 {
1698 	return skb_transport_header(buf);
1699 }
1700 
1701 /**
1702  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1703  *  passed as part of network buffer by network stack
1704  * @skb: sk buff
1705  *
1706  * Return: TCP MSS size
1707  *
1708  */
1709 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1710 {
1711 	return skb_shinfo(skb)->gso_size;
1712 }
1713 
1714 /**
1715  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1716  * @nbuf: sk buff
1717  *
1718  * Return: none
1719  */
1720 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1721 
1722 /*
1723  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1724  * @nbuf: sk buff
1725  *
1726  * Return: void ptr
1727  */
1728 static inline void *
1729 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1730 {
1731 	return (void *)nbuf->cb;
1732 }
1733 
1734 /**
1735  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1736  * @skb: sk buff
1737  *
1738  * Return: head size
1739  */
1740 static inline size_t
1741 __qdf_nbuf_headlen(struct sk_buff *skb)
1742 {
1743 	return skb_headlen(skb);
1744 }
1745 
1746 /**
1747  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1748  * @skb: sk buff
1749  *
1750  * Return: number of fragments
1751  */
1752 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1753 {
1754 	return skb_shinfo(skb)->nr_frags;
1755 }
1756 
1757 /**
1758  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1759  * @buf: sk buff
1760  *
1761  * Return: true/false
1762  */
1763 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1764 {
1765 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1766 }
1767 
1768 /**
1769  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1770  * @buf: sk buff
1771  *
1772  * Return: true/false
1773  */
1774 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1775 {
1776 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1777 }
1778 
1779 /**
1780  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1781  * @skb: sk buff
1782  *
1783  * Return: size of l2+l3+l4 header length
1784  */
1785 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1786 {
1787 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1788 }
1789 
1790 /**
1791  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1792  * @buf: sk buff
1793  *
1794  * Return:  true/false
1795  */
1796 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1797 {
1798 	if (skb_is_nonlinear(skb))
1799 		return true;
1800 	else
1801 		return false;
1802 }
1803 
1804 /**
1805  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1806  * @buf: sk buff
1807  *
1808  * Return: TCP sequence number
1809  */
1810 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1811 {
1812 	return ntohl(tcp_hdr(skb)->seq);
1813 }
1814 
1815 /**
1816  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1817  *@buf: sk buff
1818  *
1819  * Return: data pointer to typecast into your priv structure
1820  */
1821 static inline uint8_t *
1822 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1823 {
1824 	return &skb->cb[8];
1825 }
1826 
1827 /**
1828  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1829  * @buf: Pointer to nbuf
1830  *
1831  * Return: None
1832  */
1833 static inline void
1834 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1835 {
1836 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1837 }
1838 
1839 /**
1840  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
1841  *
1842  * @buf: sk buff
1843  * @queue_id: Queue id
1844  *
1845  * Return: void
1846  */
1847 static inline void
1848 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
1849 {
1850 	skb_record_rx_queue(skb, queue_id);
1851 }
1852 
1853 /**
1854  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
1855  *
1856  * @buf: sk buff
1857  *
1858  * Return: Queue mapping
1859  */
1860 static inline uint16_t
1861 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
1862 {
1863 	return skb->queue_mapping;
1864 }
1865 
1866 /**
1867  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
1868  *
1869  * @buf: sk buff
1870  *
1871  * Return: void
1872  */
1873 static inline void
1874 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
1875 {
1876 	__net_timestamp(skb);
1877 }
1878 
1879 /**
1880  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
1881  *
1882  * @buf: sk buff
1883  *
1884  * Return: time difference in ms
1885  */
1886 static inline uint64_t
1887 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
1888 {
1889 	return ktime_to_ms(net_timedelta(skb->tstamp));
1890 }
1891 
1892 /**
1893  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
1894  *
1895  * @buf: sk buff
1896  *
1897  * Return: time difference in micro seconds
1898  */
1899 static inline uint64_t
1900 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
1901 {
1902 	return ktime_to_us(net_timedelta(skb->tstamp));
1903 }
1904 
1905 /**
1906  * __qdf_nbuf_orphan() - orphan a nbuf
1907  * @skb: sk buff
1908  *
1909  * If a buffer currently has an owner then we call the
1910  * owner's destructor function
1911  *
1912  * Return: void
1913  */
1914 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
1915 {
1916 	return skb_orphan(skb);
1917 }
1918 
1919 static inline struct sk_buff *
1920 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
1921 {
1922 	return skb_dequeue(skb_queue_head);
1923 }
1924 
1925 static inline
1926 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
1927 {
1928 	return skb_queue_head->qlen;
1929 }
1930 
1931 static inline
1932 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
1933 					struct sk_buff *skb)
1934 {
1935 	return skb_queue_tail(skb_queue_head, skb);
1936 }
1937 
1938 static inline
1939 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
1940 {
1941 	return skb_queue_head_init(skb_queue_head);
1942 }
1943 
1944 static inline
1945 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
1946 {
1947 	return skb_queue_purge(skb_queue_head);
1948 }
1949 
1950 #ifdef CONFIG_WIN
1951 #include <i_qdf_nbuf_w.h>
1952 #else
1953 #include <i_qdf_nbuf_m.h>
1954 #endif
1955 #endif /*_I_QDF_NET_BUF_H */
1956