xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: i_qdf_nbuf.h
30  * This file provides OS dependent nbuf API's.
31  */
32 
33 #ifndef _I_QDF_NBUF_H
34 #define _I_QDF_NBUF_H
35 
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/dma-mapping.h>
40 #include <asm/cacheflush.h>
41 #include <qdf_types.h>
42 #include <qdf_net_types.h>
43 #include <qdf_status.h>
44 #include <qdf_util.h>
45 #include <qdf_mem.h>
46 #include <linux/tcp.h>
47 #include <qdf_util.h>
48 #include <qdf_nbuf.h>
49 
50 /*
51  * Use socket buffer as the underlying implentation as skbuf .
52  * Linux use sk_buff to represent both packet and data,
53  * so we use sk_buffer to represent both skbuf .
54  */
55 typedef struct sk_buff *__qdf_nbuf_t;
56 
57 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
58 
59 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
60  * max tx fragments added by the driver
61  * The driver will always add one tx fragment (the tx descriptor)
62  */
63 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
64 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
65 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
66 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
67 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
68 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
69 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
70 
71 
72 /* mark the first packet after wow wakeup */
73 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
74 
75 /*
76  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
77  */
78 typedef union {
79 	uint64_t       u64;
80 	qdf_dma_addr_t dma_addr;
81 } qdf_paddr_t;
82 
83 /**
84  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
85  *                    - data passed between layers of the driver.
86  *
87  * Notes:
88  *   1. Hard limited to 48 bytes. Please count your bytes
89  *   2. The size of this structure has to be easily calculatable and
90  *      consistently so: do not use any conditional compile flags
91  *   3. Split into a common part followed by a tx/rx overlay
92  *   4. There is only one extra frag, which represents the HTC/HTT header
93  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
94  *      for the priv_cb_w since it must be at same offset for both
95  *      TX and RX union
96  *
97  * @paddr   : physical addressed retrived by dma_map of nbuf->data
98  *
99  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
100  * @rx.dev.priv_cb_w.fctx      : ctx to handle special pkts defined by ftype
101  * @rx.dev.priv_cb_w.reserved1 : reserved
102  * @rx.dev.priv_cb_w.reserved2 : reserved
103  *
104  * @rx.dev.priv_cb_m.tcp_seq_num     : TCP sequence number
105  * @rx.dev.priv_cb_m.tcp_ack_num     : TCP ACK number
106  * @rx.dev.priv_cb_m.lro_ctx         : LRO context
107  * @rx.dev.priv_cb_m.map_index       :
108  * @rx.dev.priv_cb_m.reserved        : reserved
109  *
110  * @rx.lro_eligible        : flag to indicate whether the MSDU is LRO eligible
111  * @rx.peer_cached_buf_frm : peer cached buffer
112  * @rx.tcp_proto           : L4 protocol is TCP
113  * @rx.tcp_pure_ack        : A TCP ACK packet with no payload
114  * @rx.ipv6_proto          : L3 protocol is IPV6
115  * @rx.ip_offset           : offset to IP header
116  * @rx.tcp_offset          : offset to TCP header
117  * @rx_ctx_id              : Rx context id
118  *
119  * @rx.tcp_udp_chksum  : L4 payload checksum
120  * @rx.tcp_wim         : TCP window size
121  *
122  * @rx.flow_id         : 32bit flow id
123  *
124  * @rx.flag_chfrag_start : first MSDU in an AMSDU
125  * @rx.flag_chfrag_cont  : middle or part of MSDU in an AMSDU
126  * @rx.flag_chfrag_end   : last MSDU in an AMSDU
127  * @rx.rsrvd             : reserved
128  *
129  * @rx.trace       : combined structure for DP and protocol trace
130  * @rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
131  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
132  * @rx.trace.dp_trace       : flag (Datapath trace)
133  * @rx.trace.rsrvd          : enable packet logging
134  *
135  * @rx.ftype              : mcast2ucast, TSO, SG, MESH
136  * @rx.reserved           : reserved
137  *
138  * @tx.dev.priv_cb_w.fctx       : ctx to handle special pkts defined by ftype
139  * @tx.dev.priv_cb_w.ext_cb_ptr : extended cb pointer
140  *
141  * @tx.dev.priv_cb_w.data_attr : value that is programmed in CE descr, includes
142  *                 + (1) CE classification enablement bit
143  *                 + (2) packet type (802.3 or Ethernet type II)
144  *                 + (3) packet offset (usually length of HTC/HTT descr)
145  * @tx.dev.priv_cb_m.ipa.owned : packet owned by IPA
146  * @tx.dev.priv_cb_m.ipa.priv  : private data, used by IPA
147  * @tx.dev.priv_cb_m.desc_id   : tx desc id, used to sync between host and fw
148  * @tx.dev.priv_cb_m.mgmt_desc_id  : mgmt descriptor for tx completion cb
149  * @tx.dev.priv_cb_m.reserved  : reserved
150  *
151  * @tx.ftype             : mcast2ucast, TSO, SG, MESH
152  * @tx.vdev_id           : vdev (for protocol trace)
153  * @tx.len               : length of efrag pointed by the above pointers
154  *
155  * @tx.flags.bits.flag_efrag  : flag, efrag payload to be swapped (wordstream)
156  * @tx.flags.bits.num         : number of extra frags ( 0 or 1)
157  * @tx.flags.bits.nbuf        : flag, nbuf payload to be swapped (wordstream)
158  * @tx.flags.bits.flag_chfrag_start : first MSDU in an AMSDU
159  * @tx.flags.bits.flag_chfrag_cont  : middle or part of MSDU in an AMSDU
160  * @tx.flags.bits.flag_chfrag_end   : last MSDU in an AMSDU
161  * @tx.flags.bits.flag_ext_header   : extended flags
162  * @tx.flags.bits.reserved          : reserved
163  * @tx.trace       : combined structure for DP and protocol trace
164  * @tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
165  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
166  * @tx.trace.is_packet_priv :
167  * @tx.trace.packet_track   : {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
168  * @tx.trace.proto_type     : bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
169  *                          + (MGMT_ACTION)] - 4 bits
170  * @tx.trace.dp_trace       : flag (Datapath trace)
171  * @tx.trace.is_bcast       : flag (Broadcast packet)
172  * @tx.trace.is_mcast       : flag (Multicast packet)
173  * @tx.trace.packet_type    : flag (Packet type)
174  * @tx.trace.htt2_frm       : flag (high-latency path only)
175  * @tx.trace.print          : enable packet logging
176  *
177  * @tx.vaddr             : virtual address of ~
178  * @tx.paddr             : physical/DMA address of ~
179  */
180 
181 struct qdf_nbuf_cb {
182 	/* common */
183 	qdf_paddr_t paddr; /* of skb->data */
184 	/* valid only in one direction */
185 	union {
186 		/* Note: MAX: 40 bytes */
187 		struct {
188 			union {
189 				struct {
190 					void *ext_cb_ptr;
191 					void *fctx;
192 					uint32_t reserved1;
193 					uint32_t reserved2;
194 				} priv_cb_w;
195 				struct {
196 					uint32_t tcp_seq_num;
197 					uint32_t tcp_ack_num;
198 					unsigned char *lro_ctx;
199 					uint32_t map_index;
200 					uint32_t reserved;
201 				} priv_cb_m;
202 			} dev;
203 			uint32_t lro_eligible:1,
204 				peer_cached_buf_frm:1,
205 				tcp_proto:1,
206 				tcp_pure_ack:1,
207 				ipv6_proto:1,
208 				ip_offset:7,
209 				tcp_offset:7,
210 				rx_ctx_id:4;
211 			uint32_t tcp_udp_chksum:16,
212 				tcp_win:16;
213 			uint32_t flow_id;
214 			uint8_t flag_chfrag_start:1,
215 				flag_chfrag_cont:1,
216 				flag_chfrag_end:1,
217 				rsrvd:5;
218 			union {
219 				uint8_t packet_state;
220 				uint8_t dp_trace:1,
221 					rsrvd:1;
222 			} trace;
223 			uint8_t ftype;
224 			uint8_t reserved;
225 		} rx;
226 
227 		/* Note: MAX: 40 bytes */
228 		struct {
229 			union {
230 				struct {
231 					void *ext_cb_ptr;
232 					void *fctx;
233 				} priv_cb_w;
234 				struct {
235 					uint32_t data_attr;
236 					struct {
237 						uint32_t owned:1,
238 							priv:31;
239 					} ipa;
240 					uint16_t desc_id;
241 					uint16_t mgmt_desc_id;
242 					uint32_t reserved;
243 				} priv_cb_m;
244 			} dev;
245 			uint8_t ftype;
246 			uint8_t vdev_id;
247 			uint16_t len;
248 			union {
249 				struct {
250 					uint8_t flag_efrag:1,
251 						flag_nbuf:1,
252 						num:1,
253 						flag_chfrag_start:1,
254 						flag_chfrag_cont:1,
255 						flag_chfrag_end:1,
256 						flag_ext_header:1,
257 						reserved:1;
258 				} bits;
259 				uint8_t u8;
260 			} flags;
261 			struct {
262 				uint8_t packet_state:7,
263 					is_packet_priv:1;
264 				uint8_t packet_track:4,
265 					proto_type:4;
266 				uint8_t dp_trace:1,
267 					is_bcast:1,
268 					is_mcast:1,
269 					packet_type:3,
270 					/* used only for hl*/
271 					htt2_frm:1,
272 					print:1;
273 			} trace;
274 			unsigned char *vaddr;
275 			qdf_paddr_t paddr;
276 		} tx;
277 	} u;
278 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
279 
280 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
281 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
282 
283 /**
284  *  access macros to qdf_nbuf_cb
285  *  Note: These macros can be used as L-values as well as R-values.
286  *        When used as R-values, they effectively function as "get" macros
287  *        When used as L_values, they effectively function as "set" macros
288  */
289 
290 #define QDF_NBUF_CB_PADDR(skb) \
291 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
292 
293 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
294 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
295 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
296 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
297 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
298 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
299 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
300 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
301 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
302 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
303 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
304 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
305 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
306 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
307 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
308 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
309 
310 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
311 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
312 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
313 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
314 
315 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
316 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
317 
318 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
319 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
320 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
321 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
322 
323 #define QDF_NBUF_CB_RX_FTYPE(skb) \
324 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
325 
326 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
327 	(((struct qdf_nbuf_cb *) \
328 	((skb)->cb))->u.rx.flag_chfrag_start)
329 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
330 	(((struct qdf_nbuf_cb *) \
331 	((skb)->cb))->u.rx.flag_chfrag_cont)
332 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
333 		(((struct qdf_nbuf_cb *) \
334 		((skb)->cb))->u.rx.flag_chfrag_end)
335 
336 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
337 	qdf_nbuf_set_state(skb, PACKET_STATE)
338 
339 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
340 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
341 
342 #define QDF_NBUF_CB_TX_FTYPE(skb) \
343 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
344 
345 
346 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
347 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
348 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
349 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
350 
351 /* Tx Flags Accessor Macros*/
352 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
353 	(((struct qdf_nbuf_cb *) \
354 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
355 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
356 	(((struct qdf_nbuf_cb *) \
357 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
358 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
359 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
360 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
361 	(((struct qdf_nbuf_cb *) \
362 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
363 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
364 	(((struct qdf_nbuf_cb *) \
365 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
366 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
367 		(((struct qdf_nbuf_cb *) \
368 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
369 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
370 		(((struct qdf_nbuf_cb *) \
371 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
372 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
373 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
374 /* End of Tx Flags Accessor Macros */
375 
376 /* Tx trace accessor macros */
377 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
378 	(((struct qdf_nbuf_cb *) \
379 		((skb)->cb))->u.tx.trace.packet_state)
380 
381 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
382 	(((struct qdf_nbuf_cb *) \
383 		((skb)->cb))->u.tx.trace.is_packet_priv)
384 
385 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
386 	(((struct qdf_nbuf_cb *) \
387 		((skb)->cb))->u.tx.trace.packet_track)
388 
389 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
390 	(((struct qdf_nbuf_cb *) \
391 		((skb)->cb))->u.tx.trace.proto_type)
392 
393 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
394 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
395 
396 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
398 
399 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
400 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
401 
402 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
403 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
404 
405 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
406 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
407 
408 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
409 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
410 
411 #define QDF_NBUF_CB_SET_BCAST(skb) \
412 	(((struct qdf_nbuf_cb *) \
413 		((skb)->cb))->u.tx.trace.is_bcast = true)
414 
415 #define QDF_NBUF_CB_SET_MCAST(skb) \
416 	(((struct qdf_nbuf_cb *) \
417 		((skb)->cb))->u.tx.trace.is_mcast = true)
418 /* End of Tx trace accessor macros */
419 
420 
421 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
422 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
423 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
424 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
425 
426 /* assume the OS provides a single fragment */
427 #define __qdf_nbuf_get_num_frags(skb)		   \
428 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
429 
430 #define __qdf_nbuf_reset_num_frags(skb) \
431 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
432 
433 /**
434  *   end of nbuf->cb access macros
435  */
436 
437 typedef void (*qdf_nbuf_trace_update_t)(char *);
438 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
439 
440 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
441 
442 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
443 	(QDF_NBUF_CB_PADDR(skb) = paddr)
444 
445 #define __qdf_nbuf_frag_push_head(					\
446 	skb, frag_len, frag_vaddr, frag_paddr)				\
447 	do {					\
448 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
449 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
450 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
451 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
452 	} while (0)
453 
454 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
455 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
456 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
457 
458 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
459 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
460 
461 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
462 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
463 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
464 	 /* assume that the OS only provides a single fragment */	\
465 	 QDF_NBUF_CB_PADDR(skb))
466 
467 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
468 
469 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
470 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
471 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
472 
473 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
474 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
475 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
476 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
477 
478 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
479 	do {								\
480 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
481 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
482 		if (frag_num)						\
483 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
484 							      is_wstrm; \
485 		else					\
486 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
487 							      is_wstrm; \
488 	} while (0)
489 
490 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
491 	do { \
492 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
493 	} while (0)
494 
495 #define __qdf_nbuf_get_vdev_ctx(skb) \
496 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
497 
498 #define __qdf_nbuf_set_tx_ftype(skb, type) \
499 	do { \
500 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
501 	} while (0)
502 
503 #define __qdf_nbuf_get_tx_ftype(skb) \
504 		 QDF_NBUF_CB_TX_FTYPE((skb))
505 
506 
507 #define __qdf_nbuf_set_rx_ftype(skb, type) \
508 	do { \
509 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
510 	} while (0)
511 
512 #define __qdf_nbuf_get_rx_ftype(skb) \
513 		 QDF_NBUF_CB_RX_FTYPE((skb))
514 
515 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
516 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
517 
518 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
519 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
520 
521 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
522 	do { \
523 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
524 	} while (0)
525 
526 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
527 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
528 
529 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
530 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
531 
532 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
533 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
534 
535 
536 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
537 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
538 
539 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
540 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
541 
542 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
543 	do { \
544 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
545 	} while (0)
546 
547 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
548 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
549 
550 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
551 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
552 
553 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
554 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
555 
556 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
557 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
558 
559 #define __qdf_nbuf_trace_get_proto_type(skb) \
560 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
561 
562 #define __qdf_nbuf_data_attr_get(skb)		\
563 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
564 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
565 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
566 
567 /**
568  * __qdf_nbuf_num_frags_init() - init extra frags
569  * @skb: sk buffer
570  *
571  * Return: none
572  */
573 static inline
574 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
575 {
576 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
577 }
578 
579 typedef enum {
580 	CB_FTYPE_INVALID = 0,
581 	CB_FTYPE_MCAST2UCAST = 1,
582 	CB_FTYPE_TSO = 2,
583 	CB_FTYPE_TSO_SG = 3,
584 	CB_FTYPE_SG = 4,
585 	CB_FTYPE_INTRABSS_FWD = 5,
586 	CB_FTYPE_RX_INFO = 6,
587 	CB_FTYPE_MESH_RX_INFO = 7,
588 	CB_FTYPE_MESH_TX_INFO = 8,
589 } CB_FTYPE;
590 
591 /*
592  * prototypes. Implemented in qdf_nbuf.c
593  */
594 __qdf_nbuf_t __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
595 			int align, int prio);
596 void __qdf_nbuf_free(struct sk_buff *skb);
597 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
598 			struct sk_buff *skb, qdf_dma_dir_t dir);
599 void __qdf_nbuf_unmap(__qdf_device_t osdev,
600 			struct sk_buff *skb, qdf_dma_dir_t dir);
601 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
602 				 struct sk_buff *skb, qdf_dma_dir_t dir);
603 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
604 			struct sk_buff *skb, qdf_dma_dir_t dir);
605 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
606 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
607 
608 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
609 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
610 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
611 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
612 	qdf_dma_dir_t dir, int nbytes);
613 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
614 	qdf_dma_dir_t dir, int nbytes);
615 
616 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
617 	qdf_dma_dir_t dir);
618 
619 QDF_STATUS __qdf_nbuf_map_nbytes_single(
620 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
621 void __qdf_nbuf_unmap_nbytes_single(
622 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
623 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
624 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
625 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
626 QDF_STATUS __qdf_nbuf_frag_map(
627 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
628 	int offset, qdf_dma_dir_t dir, int cur_frag);
629 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
630 
631 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
632 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
633 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
634 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
635 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
636 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
637 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
638 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
639 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
640 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
641 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
642 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
643 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
644 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
645 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
646 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
647 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
648 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
649 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
650 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
651 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
652 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
653 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
654 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
655 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
656 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
657 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
658 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
659 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
660 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
661 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
662 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
663 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
664 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
665 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
666 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
667 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
668 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
669 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
670 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
671 
672 #ifdef QDF_NBUF_GLOBAL_COUNT
673 int __qdf_nbuf_count_get(void);
674 void __qdf_nbuf_count_inc(struct sk_buff *skb);
675 void __qdf_nbuf_count_dec(struct sk_buff *skb);
676 void __qdf_nbuf_mod_init(void);
677 void __qdf_nbuf_mod_exit(void);
678 
679 #else
680 
681 static inline int __qdf_nbuf_count_get(void)
682 {
683 	return 0;
684 }
685 
686 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
687 {
688 	return;
689 }
690 
691 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
692 {
693 	return;
694 }
695 
696 static inline void __qdf_nbuf_mod_init(void)
697 {
698 	return;
699 }
700 
701 static inline void __qdf_nbuf_mod_exit(void)
702 {
703 	return;
704 }
705 #endif
706 
707 /**
708  * __qdf_to_status() - OS to QDF status conversion
709  * @error : OS error
710  *
711  * Return: QDF status
712  */
713 static inline QDF_STATUS __qdf_to_status(signed int error)
714 {
715 	switch (error) {
716 	case 0:
717 		return QDF_STATUS_SUCCESS;
718 	case ENOMEM:
719 	case -ENOMEM:
720 		return QDF_STATUS_E_NOMEM;
721 	default:
722 		return QDF_STATUS_E_NOSUPPORT;
723 	}
724 }
725 
726 /**
727  * __qdf_nbuf_len() - return the amount of valid data in the skb
728  * @skb: Pointer to network buffer
729  *
730  * This API returns the amount of valid data in the skb, If there are frags
731  * then it returns total length.
732  *
733  * Return: network buffer length
734  */
735 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
736 {
737 	int i, extra_frag_len = 0;
738 
739 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
740 	if (i > 0)
741 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
742 
743 	return extra_frag_len + skb->len;
744 }
745 
746 /**
747  * __qdf_nbuf_cat() - link two nbufs
748  * @dst: Buffer to piggyback into
749  * @src: Buffer to put
750  *
751  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
752  * It is callers responsibility to free the src skb.
753  *
754  * Return: QDF_STATUS (status of the call) if failed the src skb
755  *         is released
756  */
757 static inline QDF_STATUS
758 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
759 {
760 	QDF_STATUS error = 0;
761 
762 	qdf_assert(dst && src);
763 
764 	/*
765 	 * Since pskb_expand_head unconditionally reallocates the skb->head
766 	 * buffer, first check whether the current buffer is already large
767 	 * enough.
768 	 */
769 	if (skb_tailroom(dst) < src->len) {
770 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
771 		if (error)
772 			return __qdf_to_status(error);
773 	}
774 
775 	memcpy(skb_tail_pointer(dst), src->data, src->len);
776 	skb_put(dst, src->len);
777 	return __qdf_to_status(error);
778 }
779 
780 /*
781  * nbuf manipulation routines
782  */
783 /**
784  * __qdf_nbuf_headroom() - return the amount of tail space available
785  * @buf: Pointer to network buffer
786  *
787  * Return: amount of tail room
788  */
789 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
790 {
791 	return skb_headroom(skb);
792 }
793 
794 /**
795  * __qdf_nbuf_tailroom() - return the amount of tail space available
796  * @buf: Pointer to network buffer
797  *
798  * Return: amount of tail room
799  */
800 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
801 {
802 	return skb_tailroom(skb);
803 }
804 
805 /**
806  * __qdf_nbuf_put_tail() - Puts data in the end
807  * @skb: Pointer to network buffer
808  * @size: size to be pushed
809  *
810  * Return: data pointer of this buf where new data has to be
811  *         put, or NULL if there is not enough room in this buf.
812  */
813 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
814 {
815 	if (skb_tailroom(skb) < size) {
816 		if (unlikely(pskb_expand_head(skb, 0,
817 			size - skb_tailroom(skb), GFP_ATOMIC))) {
818 			dev_kfree_skb_any(skb);
819 			return NULL;
820 		}
821 	}
822 	return skb_put(skb, size);
823 }
824 
825 /**
826  * __qdf_nbuf_trim_tail() - trim data out from the end
827  * @skb: Pointer to network buffer
828  * @size: size to be popped
829  *
830  * Return: none
831  */
832 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
833 {
834 	return skb_trim(skb, skb->len - size);
835 }
836 
837 
838 /*
839  * prototypes. Implemented in qdf_nbuf.c
840  */
841 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
842 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
843 				qdf_nbuf_rx_cksum_t *cksum);
844 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
845 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
846 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
847 void __qdf_nbuf_ref(struct sk_buff *skb);
848 int __qdf_nbuf_shared(struct sk_buff *skb);
849 
850 /*
851  * qdf_nbuf_pool_delete() implementation - do nothing in linux
852  */
853 #define __qdf_nbuf_pool_delete(osdev)
854 
855 /**
856  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
857  * @skb: Pointer to network buffer
858  *
859  * if GFP_ATOMIC is overkill then we can check whether its
860  * called from interrupt context and then do it or else in
861  * normal case use GFP_KERNEL
862  *
863  * example     use "in_irq() || irqs_disabled()"
864  *
865  * Return: cloned skb
866  */
867 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
868 {
869 	struct sk_buff *skb_new = NULL;
870 
871 	skb_new = skb_clone(skb, GFP_ATOMIC);
872 	if (skb_new)
873 		__qdf_nbuf_count_inc(skb_new);
874 
875 	return skb_new;
876 }
877 
878 /**
879  * __qdf_nbuf_copy() - returns a private copy of the skb
880  * @skb: Pointer to network buffer
881  *
882  * This API returns a private copy of the skb, the skb returned is completely
883  *  modifiable by callers
884  *
885  * Return: skb or NULL
886  */
887 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
888 {
889 	struct sk_buff *skb_new = NULL;
890 
891 	skb_new = skb_copy(skb, GFP_ATOMIC);
892 	if (skb_new)
893 		__qdf_nbuf_count_inc(skb_new);
894 
895 	return skb_new;
896 }
897 
898 #define __qdf_nbuf_reserve      skb_reserve
899 
900 
901 /**
902  * __qdf_nbuf_head() - return the pointer the skb's head pointer
903  * @skb: Pointer to network buffer
904  *
905  * Return: Pointer to head buffer
906  */
907 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
908 {
909 	return skb->head;
910 }
911 
912 /**
913  * __qdf_nbuf_data() - return the pointer to data header in the skb
914  * @skb: Pointer to network buffer
915  *
916  * Return: Pointer to skb data
917  */
918 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
919 {
920 	return skb->data;
921 }
922 
923 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
924 {
925 	return (uint8_t *)&skb->data;
926 }
927 
928 /**
929  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
930  * @skb: Pointer to network buffer
931  *
932  * Return: skb protocol
933  */
934 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
935 {
936 	return skb->protocol;
937 }
938 
939 /**
940  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
941  * @skb: Pointer to network buffer
942  *
943  * Return: skb ip_summed
944  */
945 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
946 {
947 	return skb->ip_summed;
948 }
949 
950 /**
951  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
952  * @skb: Pointer to network buffer
953  * @ip_summed: ip checksum
954  *
955  * Return: none
956  */
957 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
958 		 uint8_t ip_summed)
959 {
960 	skb->ip_summed = ip_summed;
961 }
962 
963 /**
964  * __qdf_nbuf_get_priority() - return the priority value of the skb
965  * @skb: Pointer to network buffer
966  *
967  * Return: skb priority
968  */
969 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
970 {
971 	return skb->priority;
972 }
973 
974 /**
975  * __qdf_nbuf_set_priority() - sets the priority value of the skb
976  * @skb: Pointer to network buffer
977  * @p: priority
978  *
979  * Return: none
980  */
981 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
982 {
983 	skb->priority = p;
984 }
985 
986 /**
987  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
988  * @skb: Current skb
989  * @next_skb: Next skb
990  *
991  * Return: void
992  */
993 static inline void
994 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
995 {
996 	skb->next = skb_next;
997 }
998 
999 /**
1000  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1001  * @skb: Current skb
1002  *
1003  * Return: the next skb pointed to by the current skb
1004  */
1005 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1006 {
1007 	return skb->next;
1008 }
1009 
1010 /**
1011  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1012  * @skb: Current skb
1013  * @next_skb: Next skb
1014  *
1015  * This fn is used to link up extensions to the head skb. Does not handle
1016  * linking to the head
1017  *
1018  * Return: none
1019  */
1020 static inline void
1021 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1022 {
1023 	skb->next = skb_next;
1024 }
1025 
1026 /**
1027  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1028  * @skb: Current skb
1029  *
1030  * Return: the next skb pointed to by the current skb
1031  */
1032 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1033 {
1034 	return skb->next;
1035 }
1036 
1037 /**
1038  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1039  * @skb_head: head_buf nbuf holding head segment (single)
1040  * @ext_list: nbuf list holding linked extensions to the head
1041  * @ext_len: Total length of all buffers in the extension list
1042  *
1043  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1044  * to the nbuf holding the head segment (seg0)
1045  *
1046  * Return: none
1047  */
1048 static inline void
1049 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1050 			struct sk_buff *ext_list, size_t ext_len)
1051 {
1052 	skb_shinfo(skb_head)->frag_list = ext_list;
1053 	skb_head->data_len = ext_len;
1054 	skb_head->len += skb_head->data_len;
1055 }
1056 
1057 /**
1058  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1059  * @head_buf: Network buf holding head segment (single)
1060  *
1061  * This ext_list is populated when we have Jumbo packet, for example in case of
1062  * monitor mode amsdu packet reception, and are stiched using frags_list.
1063  *
1064  * Return: Network buf list holding linked extensions from head buf.
1065  */
1066 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1067 {
1068 	return (skb_shinfo(head_buf)->frag_list);
1069 }
1070 
1071 /**
1072  * __qdf_nbuf_get_age() - return the checksum value of the skb
1073  * @skb: Pointer to network buffer
1074  *
1075  * Return: checksum value
1076  */
1077 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1078 {
1079 	return skb->csum;
1080 }
1081 
1082 /**
1083  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1084  * @skb: Pointer to network buffer
1085  * @v: Value
1086  *
1087  * Return: none
1088  */
1089 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1090 {
1091 	skb->csum = v;
1092 }
1093 
1094 /**
1095  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1096  * @skb: Pointer to network buffer
1097  * @adj: Adjustment value
1098  *
1099  * Return: none
1100  */
1101 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1102 {
1103 	skb->csum -= adj;
1104 }
1105 
1106 /**
1107  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1108  * @skb: Pointer to network buffer
1109  * @offset: Offset value
1110  * @len: Length
1111  * @to: Destination pointer
1112  *
1113  * Return: length of the copy bits for skb
1114  */
1115 static inline int32_t
1116 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1117 {
1118 	return skb_copy_bits(skb, offset, to, len);
1119 }
1120 
1121 /**
1122  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1123  * @skb: Pointer to network buffer
1124  * @len:  Packet length
1125  *
1126  * Return: none
1127  */
1128 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1129 {
1130 	if (skb->len > len) {
1131 		skb_trim(skb, len);
1132 	} else {
1133 		if (skb_tailroom(skb) < len - skb->len) {
1134 			if (unlikely(pskb_expand_head(skb, 0,
1135 				len - skb->len - skb_tailroom(skb),
1136 				GFP_ATOMIC))) {
1137 				dev_kfree_skb_any(skb);
1138 				qdf_assert(0);
1139 			}
1140 		}
1141 		skb_put(skb, (len - skb->len));
1142 	}
1143 }
1144 
1145 /**
1146  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1147  * @skb: Pointer to network buffer
1148  * @protocol: Protocol type
1149  *
1150  * Return: none
1151  */
1152 static inline void
1153 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1154 {
1155 	skb->protocol = protocol;
1156 }
1157 
1158 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1159 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1160 
1161 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1162 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1163 
1164 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1165 				      uint32_t *lo, uint32_t *hi);
1166 
1167 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1168 	struct qdf_tso_info_t *tso_info);
1169 
1170 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1171 			  struct qdf_tso_seg_elem_t *tso_seg,
1172 			  bool is_last_seg);
1173 
1174 #ifdef FEATURE_TSO
1175 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1176 
1177 #else
1178 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1179 {
1180 	return 0;
1181 }
1182 
1183 #endif /* FEATURE_TSO */
1184 
1185 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1186 {
1187 	if (skb_is_gso(skb) &&
1188 		(skb_is_gso_v6(skb) ||
1189 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1190 		return true;
1191 	else
1192 		return false;
1193 }
1194 
1195 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1196 
1197 int __qdf_nbuf_get_users(struct sk_buff *skb);
1198 
1199 /**
1200  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1201  *			      and get hw_classify by peeking
1202  *			      into packet
1203  * @nbuf:		Network buffer (skb on Linux)
1204  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1205  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1206  *			needs to be set in case of CE classification support
1207  *			Is set by this macro.
1208  * @hw_classify:	This is a flag which is set to indicate
1209  *			CE classification is enabled.
1210  *			Do not set this bit for VLAN packets
1211  *			OR for mcast / bcast frames.
1212  *
1213  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1214  * whether to enable tx_classify bit in CE.
1215  *
1216  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1217  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1218  * it is the length and a 802.3 frame else it is Ethernet Type II
1219  * (RFC 894).
1220  * Bit 4 in pkt_subtype is the tx_classify bit
1221  *
1222  * Return:	void
1223  */
1224 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1225 				pkt_subtype, hw_classify)	\
1226 do {								\
1227 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1228 	uint16_t ether_type = ntohs(eh->h_proto);		\
1229 	bool is_mc_bc;						\
1230 								\
1231 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1232 		   is_multicast_ether_addr((uint8_t *)eh);	\
1233 								\
1234 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1235 		hw_classify = 1;				\
1236 		pkt_subtype = 0x01 <<				\
1237 			HTT_TX_CLASSIFY_BIT_S;			\
1238 	}							\
1239 								\
1240 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1241 		pkt_type = htt_pkt_type_ethernet;		\
1242 								\
1243 } while (0)
1244 
1245 /**
1246  * nbuf private buffer routines
1247  */
1248 
1249 /**
1250  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1251  * @skb: Pointer to network buffer
1252  * @addr: Pointer to store header's addr
1253  * @m_len: network buffer length
1254  *
1255  * Return: none
1256  */
1257 static inline void
1258 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1259 {
1260 	*addr = skb->data;
1261 	*len = skb->len;
1262 }
1263 
1264 /**
1265  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1266  * @head: Head pointer
1267  * @tail: Tail pointer
1268  * @qlen: Queue length
1269  */
1270 typedef struct __qdf_nbuf_qhead {
1271 	struct sk_buff *head;
1272 	struct sk_buff *tail;
1273 	unsigned int qlen;
1274 } __qdf_nbuf_queue_t;
1275 
1276 /******************Functions *************/
1277 
1278 /**
1279  * __qdf_nbuf_queue_init() - initiallize the queue head
1280  * @qhead: Queue head
1281  *
1282  * Return: QDF status
1283  */
1284 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1285 {
1286 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1287 	return QDF_STATUS_SUCCESS;
1288 }
1289 
1290 /**
1291  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1292  * @qhead: Queue head
1293  * @skb: Pointer to network buffer
1294  *
1295  * This is a lockless version, driver must acquire locks if it
1296  * needs to synchronize
1297  *
1298  * Return: none
1299  */
1300 static inline void
1301 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1302 {
1303 	skb->next = NULL;       /*Nullify the next ptr */
1304 
1305 	if (!qhead->head)
1306 		qhead->head = skb;
1307 	else
1308 		qhead->tail->next = skb;
1309 
1310 	qhead->tail = skb;
1311 	qhead->qlen++;
1312 }
1313 
1314 /**
1315  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1316  * @dest: target netbuf queue
1317  * @src:  source netbuf queue
1318  *
1319  * Return: target netbuf queue
1320  */
1321 static inline __qdf_nbuf_queue_t *
1322 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1323 {
1324 	if (!dest)
1325 		return NULL;
1326 	else if (!src || !(src->head))
1327 		return dest;
1328 
1329 	if (!(dest->head))
1330 		dest->head = src->head;
1331 	else
1332 		dest->tail->next = src->head;
1333 
1334 	dest->tail = src->tail;
1335 	dest->qlen += src->qlen;
1336 	return dest;
1337 }
1338 
1339 /**
1340  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1341  * @qhead: Queue head
1342  * @skb: Pointer to network buffer
1343  *
1344  * This is a lockless version, driver must acquire locks if it needs to
1345  * synchronize
1346  *
1347  * Return: none
1348  */
1349 static inline void
1350 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1351 {
1352 	if (!qhead->head) {
1353 		/*Empty queue Tail pointer Must be updated */
1354 		qhead->tail = skb;
1355 	}
1356 	skb->next = qhead->head;
1357 	qhead->head = skb;
1358 	qhead->qlen++;
1359 }
1360 
1361 /**
1362  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1363  * @qhead: Queue head
1364  *
1365  * This is a lockless version. Driver should take care of the locks
1366  *
1367  * Return: skb or NULL
1368  */
1369 static inline
1370 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1371 {
1372 	__qdf_nbuf_t tmp = NULL;
1373 
1374 	if (qhead->head) {
1375 		qhead->qlen--;
1376 		tmp = qhead->head;
1377 		if (qhead->head == qhead->tail) {
1378 			qhead->head = NULL;
1379 			qhead->tail = NULL;
1380 		} else {
1381 			qhead->head = tmp->next;
1382 		}
1383 		tmp->next = NULL;
1384 	}
1385 	return tmp;
1386 }
1387 
1388 /**
1389  * __qdf_nbuf_queue_free() - free a queue
1390  * @qhead: head of queue
1391  *
1392  * Return: QDF status
1393  */
1394 static inline QDF_STATUS
1395 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1396 {
1397 	__qdf_nbuf_t  buf = NULL;
1398 
1399 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1400 		__qdf_nbuf_free(buf);
1401 	return QDF_STATUS_SUCCESS;
1402 }
1403 
1404 
1405 /**
1406  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1407  * @qhead: head of queue
1408  *
1409  * Return: NULL if the queue is empty
1410  */
1411 static inline struct sk_buff *
1412 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1413 {
1414 	return qhead->head;
1415 }
1416 
1417 /**
1418  * __qdf_nbuf_queue_len() - return the queue length
1419  * @qhead: Queue head
1420  *
1421  * Return: Queue length
1422  */
1423 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1424 {
1425 	return qhead->qlen;
1426 }
1427 
1428 /**
1429  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1430  * @skb: Pointer to network buffer
1431  *
1432  * This API returns the next skb from packet chain, remember the skb is
1433  * still in the queue
1434  *
1435  * Return: NULL if no packets are there
1436  */
1437 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1438 {
1439 	return skb->next;
1440 }
1441 
1442 /**
1443  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1444  * @qhead: Queue head
1445  *
1446  * Return: true if length is 0 else false
1447  */
1448 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1449 {
1450 	return qhead->qlen == 0;
1451 }
1452 
1453 /*
1454  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1455  * Because the queue head will most likely put in some structure,
1456  * we don't use pointer type as the definition.
1457  */
1458 
1459 /*
1460  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1461  * Because the queue head will most likely put in some structure,
1462  * we don't use pointer type as the definition.
1463  */
1464 
1465 static inline void
1466 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1467 {
1468 }
1469 
1470 /**
1471  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1472  *        expands the headroom
1473  *        in the data region. In case of failure the skb is released.
1474  * @skb: sk buff
1475  * @headroom: size of headroom
1476  *
1477  * Return: skb or NULL
1478  */
1479 static inline struct sk_buff *
1480 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1481 {
1482 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1483 		dev_kfree_skb_any(skb);
1484 		skb = NULL;
1485 	}
1486 	return skb;
1487 }
1488 
1489 /**
1490  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1491  *        exapnds the tailroom
1492  *        in data region. In case of failure it releases the skb.
1493  * @skb: sk buff
1494  * @tailroom: size of tailroom
1495  *
1496  * Return: skb or NULL
1497  */
1498 static inline struct sk_buff *
1499 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1500 {
1501 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1502 		return skb;
1503 	/**
1504 	 * unlikely path
1505 	 */
1506 	dev_kfree_skb_any(skb);
1507 	return NULL;
1508 }
1509 
1510 /**
1511  * __qdf_nbuf_linearize() - skb linearize
1512  * @skb: sk buff
1513  *
1514  * create a version of the specified nbuf whose contents
1515  * can be safely modified without affecting other
1516  * users.If the nbuf is non-linear then this function
1517  * linearize. if unable to linearize returns -ENOMEM on
1518  * success 0 is returned
1519  *
1520  * Return: 0 on Success, -ENOMEM on failure is returned.
1521  */
1522 static inline int
1523 __qdf_nbuf_linearize(struct sk_buff *skb)
1524 {
1525 	return skb_linearize(skb);
1526 }
1527 
1528 /**
1529  * __qdf_nbuf_unshare() - skb unshare
1530  * @skb: sk buff
1531  *
1532  * create a version of the specified nbuf whose contents
1533  * can be safely modified without affecting other
1534  * users.If the nbuf is a clone then this function
1535  * creates a new copy of the data. If the buffer is not
1536  * a clone the original buffer is returned.
1537  *
1538  * Return: skb or NULL
1539  */
1540 static inline struct sk_buff *
1541 __qdf_nbuf_unshare(struct sk_buff *skb)
1542 {
1543 	return skb_unshare(skb, GFP_ATOMIC);
1544 }
1545 
1546 /**
1547  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1548  *@buf: sk buff
1549  *
1550  * Return: true/false
1551  */
1552 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1553 {
1554 	return skb_cloned(skb);
1555 }
1556 
1557 /**
1558  * __qdf_nbuf_pool_init() - init pool
1559  * @net: net handle
1560  *
1561  * Return: QDF status
1562  */
1563 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1564 {
1565 	return QDF_STATUS_SUCCESS;
1566 }
1567 
1568 /*
1569  * adf_nbuf_pool_delete() implementation - do nothing in linux
1570  */
1571 #define __qdf_nbuf_pool_delete(osdev)
1572 
1573 /**
1574  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1575  *        release the skb.
1576  * @skb: sk buff
1577  * @headroom: size of headroom
1578  * @tailroom: size of tailroom
1579  *
1580  * Return: skb or NULL
1581  */
1582 static inline struct sk_buff *
1583 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1584 {
1585 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1586 		return skb;
1587 
1588 	dev_kfree_skb_any(skb);
1589 	return NULL;
1590 }
1591 
1592 /**
1593  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1594  *
1595  * Return: true/false
1596  */
1597 static inline bool
1598 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1599 			 uint8_t **where)
1600 {
1601 	qdf_assert(0);
1602 	return false;
1603 }
1604 
1605 /**
1606  * __qdf_nbuf_reset_ctxt() - mem zero control block
1607  * @nbuf: buffer
1608  *
1609  * Return: none
1610  */
1611 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1612 {
1613 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1614 }
1615 
1616 /**
1617  * __qdf_nbuf_network_header() - get network header
1618  * @buf: buffer
1619  *
1620  * Return: network header pointer
1621  */
1622 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1623 {
1624 	return skb_network_header(buf);
1625 }
1626 
1627 /**
1628  * __qdf_nbuf_transport_header() - get transport header
1629  * @buf: buffer
1630  *
1631  * Return: transport header pointer
1632  */
1633 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1634 {
1635 	return skb_transport_header(buf);
1636 }
1637 
1638 /**
1639  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1640  *  passed as part of network buffer by network stack
1641  * @skb: sk buff
1642  *
1643  * Return: TCP MSS size
1644  *
1645  */
1646 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1647 {
1648 	return skb_shinfo(skb)->gso_size;
1649 }
1650 
1651 /**
1652  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1653  * @nbuf: sk buff
1654  *
1655  * Return: none
1656  */
1657 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1658 
1659 /**
1660  * __qdf_nbuf_set_rx_info() - set rx info
1661  * @nbuf: sk buffer
1662  * @info: rx info
1663  * @len: length
1664  *
1665  * Return: none
1666  */
1667 static inline void
1668 __qdf_nbuf_set_rx_info(__qdf_nbuf_t nbuf, void *info, uint32_t len)
1669 {
1670 	/* Customer may have skb->cb size increased, e.g. to 96 bytes,
1671 	 * then len's large enough to save the rs status info struct
1672 	 */
1673 	uint8_t offset = sizeof(struct qdf_nbuf_cb);
1674 	uint32_t max = sizeof(((struct sk_buff *)0)->cb)-offset;
1675 
1676 	len = (len > max ? max : len);
1677 
1678 	memcpy(((uint8_t *)(nbuf->cb) + offset), info, len);
1679 }
1680 
1681 /**
1682  * __qdf_nbuf_get_rx_info() - get rx info
1683  * @nbuf: sk buffer
1684  *
1685  * Return: rx_info
1686  */
1687 static inline void *
1688 __qdf_nbuf_get_rx_info(__qdf_nbuf_t nbuf)
1689 {
1690 	uint8_t offset = sizeof(struct qdf_nbuf_cb);
1691 
1692 	return (void *)((uint8_t *)(nbuf->cb) + offset);
1693 }
1694 
1695 /*
1696  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1697  * @nbuf: sk buff
1698  *
1699  * Return: void ptr
1700  */
1701 static inline void *
1702 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1703 {
1704 	return (void *)nbuf->cb;
1705 }
1706 
1707 /**
1708  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1709  * @skb: sk buff
1710  *
1711  * Return: head size
1712  */
1713 static inline size_t
1714 __qdf_nbuf_headlen(struct sk_buff *skb)
1715 {
1716 	return skb_headlen(skb);
1717 }
1718 
1719 /**
1720  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1721  * @skb: sk buff
1722  *
1723  * Return: number of fragments
1724  */
1725 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1726 {
1727 	return skb_shinfo(skb)->nr_frags;
1728 }
1729 
1730 /**
1731  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1732  * @buf: sk buff
1733  *
1734  * Return: true/false
1735  */
1736 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1737 {
1738 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1739 }
1740 
1741 /**
1742  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1743  * @buf: sk buff
1744  *
1745  * Return: true/false
1746  */
1747 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1748 {
1749 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1750 }
1751 
1752 /**
1753  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr lenght of the skb
1754  * @skb: sk buff
1755  *
1756  * Return: size of l2+l3+l4 header length
1757  */
1758 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1759 {
1760 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1761 }
1762 
1763 /**
1764  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1765  * @buf: sk buff
1766  *
1767  * Return:  true/false
1768  */
1769 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1770 {
1771 	if (skb_is_nonlinear(skb))
1772 		return true;
1773 	else
1774 		return false;
1775 }
1776 
1777 /**
1778  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1779  * @buf: sk buff
1780  *
1781  * Return: TCP sequence number
1782  */
1783 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1784 {
1785 	return ntohl(tcp_hdr(skb)->seq);
1786 }
1787 
1788 /**
1789  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1790  *@buf: sk buff
1791  *
1792  * Return: data pointer to typecast into your priv structure
1793  */
1794 static inline uint8_t *
1795 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1796 {
1797 	return &skb->cb[8];
1798 }
1799 
1800 /**
1801  * __qdf_invalidate_range() - invalidate virtual address range
1802  * @start: start address of the address range
1803  * @end: end address of the address range
1804  *
1805  * Note that this function does not write back the cache entries.
1806  *
1807  * Return: none
1808  */
1809 #ifdef MSM_PLATFORM
1810 static inline void __qdf_invalidate_range(void *start, void *end)
1811 {
1812 	dmac_inv_range(start, end);
1813 }
1814 
1815 #else
1816 static inline void __qdf_invalidate_range(void *start, void *end)
1817 {
1818 	/* TODO figure out how to invalidate cache on x86 and other
1819 	 * non-MSM platform
1820 	 */
1821 	pr_err("Cache invalidate not yet implemneted for non-MSM platforms\n");
1822 }
1823 #endif
1824 
1825 /**
1826  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1827  * @buf: Pointer to nbuf
1828  *
1829  * Return: None
1830  */
1831 static inline void
1832 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1833 {
1834 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1835 }
1836 
1837 /**
1838  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
1839  *
1840  * @buf: sk buff
1841  *
1842  * Return: Queue mapping
1843  */
1844 static inline uint16_t
1845 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
1846 {
1847 	return skb->queue_mapping;
1848 }
1849 
1850 /**
1851  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
1852  *
1853  * @buf: sk buff
1854  *
1855  * Return: void
1856  */
1857 static inline void
1858 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
1859 {
1860 	__net_timestamp(skb);
1861 }
1862 
1863 /**
1864  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
1865  *
1866  * @buf: sk buff
1867  *
1868  * Return: time difference in ms
1869  */
1870 static inline uint64_t
1871 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
1872 {
1873 	return ktime_to_ms(net_timedelta(skb->tstamp));
1874 }
1875 
1876 /**
1877  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
1878  *
1879  * @buf: sk buff
1880  *
1881  * Return: time difference in micro seconds
1882  */
1883 static inline uint64_t
1884 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
1885 {
1886 	return ktime_to_us(net_timedelta(skb->tstamp));
1887 }
1888 #ifdef CONFIG_WIN
1889 #include <i_qdf_nbuf_w.h>
1890 #else
1891 #include <i_qdf_nbuf_m.h>
1892 #endif
1893 #endif /*_I_QDF_NET_BUF_H */
1894