xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 1397a33f48ea6455be40871470b286e535820eb8)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *   6. "ipa.owned" bit must be first member in both TX and RX unions
95  *      for the priv_cb_m since it must be at same offset for both
96  *      TX and RX union.
97  *
98  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
99  *
100  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
101  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
102  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
103  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
104  * @rx.dev.priv_cb_w.reserved1: reserved
105  *
106  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
107  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
108  * @rx.dev.priv_cb_m.lro_ctx: LRO context
109  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
110  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
111  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
112  * @rx.dev.priv_cb_m.peer_local_id: peer_local_id for RX pkt
113  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
114  *
115  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
116  * @rx.peer_cached_buf_frm: peer cached buffer
117  * @rx.tcp_proto: L4 protocol is TCP
118  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
119  * @rx.ipv6_proto: L3 protocol is IPV6
120  * @rx.ip_offset: offset to IP header
121  * @rx.tcp_offset: offset to TCP header
122  * @rx_ctx_id: Rx context id
123  * @flush_ind: flush indication
124  * @num_elements_in_list: number of elements in the nbuf list
125  *
126  * @rx.tcp_udp_chksum: L4 payload checksum
127  * @rx.tcp_wim: TCP window size
128  *
129  * @rx.flow_id: 32bit flow id
130  *
131  * @rx.flag_chfrag_start: first MSDU in an AMSDU
132  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
133  * @rx.flag_chfrag_end: last MSDU in an AMSDU
134  * @rx.packet_buff_pool: indicate packet from pre-allocated pool for Rx ring
135  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
136  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
137  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
138  * @rx.flag_is_frag: flag to indicate skb has frag list
139  * @rx.rsrvd: reserved
140  *
141  * @rx.trace: combined structure for DP and protocol trace
142  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
143  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
144  * @rx.trace.dp_trace: flag (Datapath trace)
145  * @rx.trace.packet_track: RX_DATA packet
146  * @rx.trace.rsrvd: enable packet logging
147  *
148  * @rx.ftype: mcast2ucast, TSO, SG, MESH
149  * @rx.is_raw_frame: RAW frame
150  * @rx.reserved: reserved
151  *
152  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
153  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
154  *
155  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
156  *                 + (1) CE classification enablement bit
157  *                 + (2) packet type (802.3 or Ethernet type II)
158  *                 + (3) packet offset (usually length of HTC/HTT descr)
159  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
160  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
161  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
162  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
163  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
164  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
165  * @tx.dev.priv_cb_m.reserved: reserved
166  *
167  * @tx.ftype: mcast2ucast, TSO, SG, MESH
168  * @tx.vdev_id: vdev (for protocol trace)
169  * @tx.len: length of efrag pointed by the above pointers
170  *
171  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
172  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
173  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
174  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
175  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
176  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
177  * @tx.flags.bits.flag_ext_header: extended flags
178  * @tx.flags.bits.reserved: reserved
179  * @tx.trace: combined structure for DP and protocol trace
180  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
181  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
182  * @tx.trace.is_packet_priv:
183  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
184  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
185  *                          + (MGMT_ACTION)] - 4 bits
186  * @tx.trace.dp_trace: flag (Datapath trace)
187  * @tx.trace.is_bcast: flag (Broadcast packet)
188  * @tx.trace.is_mcast: flag (Multicast packet)
189  * @tx.trace.packet_type: flag (Packet type)
190  * @tx.trace.htt2_frm: flag (high-latency path only)
191  * @tx.trace.print: enable packet logging
192  *
193  * @tx.vaddr: virtual address of ~
194  * @tx.paddr: physical/DMA address of ~
195  */
196 struct qdf_nbuf_cb {
197 	/* common */
198 	qdf_paddr_t paddr; /* of skb->data */
199 	/* valid only in one direction */
200 	union {
201 		/* Note: MAX: 40 bytes */
202 		struct {
203 			union {
204 				struct {
205 					void *ext_cb_ptr;
206 					void *fctx;
207 					uint16_t msdu_len;
208 					uint16_t peer_id;
209 					uint32_t reserved1;
210 				} priv_cb_w;
211 				struct {
212 					/* ipa_owned bit is common between rx
213 					 * control block and tx control block.
214 					 * Do not change location of this bit.
215 					 */
216 					uint32_t ipa_owned:1,
217 						 reserved:15,
218 						 peer_local_id:16;
219 					uint32_t tcp_seq_num;
220 					uint32_t tcp_ack_num;
221 					union {
222 						struct {
223 							uint16_t msdu_len;
224 							uint16_t peer_id;
225 						} wifi3;
226 						struct {
227 							uint32_t map_index;
228 						} wifi2;
229 					} dp;
230 					unsigned char *lro_ctx;
231 				} priv_cb_m;
232 			} dev;
233 			uint32_t lro_eligible:1,
234 				peer_cached_buf_frm:1,
235 				tcp_proto:1,
236 				tcp_pure_ack:1,
237 				ipv6_proto:1,
238 				ip_offset:7,
239 				tcp_offset:7,
240 				rx_ctx_id:4,
241 				flush_ind:1,
242 				num_elements_in_list:8;
243 			uint32_t tcp_udp_chksum:16,
244 				 tcp_win:16;
245 			uint32_t flow_id;
246 			uint8_t flag_chfrag_start:1,
247 				flag_chfrag_cont:1,
248 				flag_chfrag_end:1,
249 				packet_buff_pool:1,
250 				flag_da_mcbc:1,
251 				flag_da_valid:1,
252 				flag_sa_valid:1,
253 				flag_is_frag:1;
254 			union {
255 				uint8_t packet_state;
256 				uint8_t dp_trace:1,
257 					packet_track:4,
258 					rsrvd:3;
259 			} trace;
260 			uint8_t ftype;
261 			uint8_t is_raw_frame:1,
262 				reserved:7;
263 		} rx;
264 
265 		/* Note: MAX: 40 bytes */
266 		struct {
267 			union {
268 				struct {
269 					void *ext_cb_ptr;
270 					void *fctx;
271 				} priv_cb_w;
272 				struct {
273 					/* ipa_owned bit is common between rx
274 					 * control block and tx control block.
275 					 * Do not change location of this bit.
276 					 */
277 					struct {
278 						uint32_t owned:1,
279 							priv:31;
280 					} ipa;
281 					uint32_t data_attr;
282 					uint16_t desc_id;
283 					uint16_t mgmt_desc_id;
284 					struct {
285 						uint8_t bi_map:1,
286 							reserved:7;
287 					} dma_option;
288 					uint8_t reserved[3];
289 				} priv_cb_m;
290 			} dev;
291 			uint8_t ftype;
292 			uint8_t vdev_id;
293 			uint16_t len;
294 			union {
295 				struct {
296 					uint8_t flag_efrag:1,
297 						flag_nbuf:1,
298 						num:1,
299 						flag_chfrag_start:1,
300 						flag_chfrag_cont:1,
301 						flag_chfrag_end:1,
302 						flag_ext_header:1,
303 						flag_notify_comp:1;
304 				} bits;
305 				uint8_t u8;
306 			} flags;
307 			struct {
308 				uint8_t packet_state:7,
309 					is_packet_priv:1;
310 				uint8_t packet_track:4,
311 					proto_type:4;
312 				uint8_t dp_trace:1,
313 					is_bcast:1,
314 					is_mcast:1,
315 					packet_type:3,
316 					/* used only for hl*/
317 					htt2_frm:1,
318 					print:1;
319 			} trace;
320 			unsigned char *vaddr;
321 			qdf_paddr_t paddr;
322 		} tx;
323 	} u;
324 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
325 
326 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
327 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
328 
329 /**
330  *  access macros to qdf_nbuf_cb
331  *  Note: These macros can be used as L-values as well as R-values.
332  *        When used as R-values, they effectively function as "get" macros
333  *        When used as L_values, they effectively function as "set" macros
334  */
335 
336 #define QDF_NBUF_CB_PADDR(skb) \
337 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
338 
339 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
340 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
341 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
342 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
343 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
344 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
345 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
346 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
347 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
348 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
349 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
351 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
353 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
354 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
355 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \
356 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flush_ind)
357 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
358 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
359 
360 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
361 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
362 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
363 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
364 
365 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
366 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
367 
368 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
369 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
370 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
371 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
372 
373 #define QDF_NBUF_CB_RX_FTYPE(skb) \
374 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
375 
376 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
377 	(((struct qdf_nbuf_cb *) \
378 	((skb)->cb))->u.rx.flag_chfrag_start)
379 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
380 	(((struct qdf_nbuf_cb *) \
381 	((skb)->cb))->u.rx.flag_chfrag_cont)
382 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
383 		(((struct qdf_nbuf_cb *) \
384 		((skb)->cb))->u.rx.flag_chfrag_end)
385 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
386 		(((struct qdf_nbuf_cb *) \
387 		((skb)->cb))->u.rx.packet_buff_pool)
388 
389 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
390 	(((struct qdf_nbuf_cb *) \
391 	((skb)->cb))->u.rx.flag_da_mcbc)
392 
393 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
394 	(((struct qdf_nbuf_cb *) \
395 	((skb)->cb))->u.rx.flag_da_valid)
396 
397 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
398 	(((struct qdf_nbuf_cb *) \
399 	((skb)->cb))->u.rx.flag_sa_valid)
400 
401 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
402 	(((struct qdf_nbuf_cb *) \
403 	((skb)->cb))->u.rx.is_raw_frame)
404 
405 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
406 	(((struct qdf_nbuf_cb *) \
407 	((skb)->cb))->u.rx.flag_is_frag)
408 
409 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
410 	qdf_nbuf_set_state(skb, PACKET_STATE)
411 
412 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
413 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
414 
415 #define QDF_NBUF_CB_TX_FTYPE(skb) \
416 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
417 
418 
419 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
420 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
421 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
422 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
423 
424 /* Tx Flags Accessor Macros*/
425 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
426 	(((struct qdf_nbuf_cb *) \
427 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
428 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
429 	(((struct qdf_nbuf_cb *) \
430 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
431 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
432 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
433 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
434 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
435 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
436 	(((struct qdf_nbuf_cb *) \
437 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
438 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
439 	(((struct qdf_nbuf_cb *) \
440 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
441 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
442 		(((struct qdf_nbuf_cb *) \
443 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
444 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
445 		(((struct qdf_nbuf_cb *) \
446 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
447 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
448 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
449 /* End of Tx Flags Accessor Macros */
450 
451 /* Tx trace accessor macros */
452 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
453 	(((struct qdf_nbuf_cb *) \
454 		((skb)->cb))->u.tx.trace.packet_state)
455 
456 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 		((skb)->cb))->u.tx.trace.is_packet_priv)
459 
460 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
461 	(((struct qdf_nbuf_cb *) \
462 		((skb)->cb))->u.tx.trace.packet_track)
463 
464 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
465 		(((struct qdf_nbuf_cb *) \
466 			((skb)->cb))->u.rx.trace.packet_track)
467 
468 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
469 	(((struct qdf_nbuf_cb *) \
470 		((skb)->cb))->u.tx.trace.proto_type)
471 
472 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
473 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
474 
475 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
476 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
477 
478 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
479 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
480 
481 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
482 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
483 
484 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
485 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
486 
487 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
488 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
489 
490 #define QDF_NBUF_CB_SET_BCAST(skb) \
491 	(((struct qdf_nbuf_cb *) \
492 		((skb)->cb))->u.tx.trace.is_bcast = true)
493 
494 #define QDF_NBUF_CB_SET_MCAST(skb) \
495 	(((struct qdf_nbuf_cb *) \
496 		((skb)->cb))->u.tx.trace.is_mcast = true)
497 /* End of Tx trace accessor macros */
498 
499 
500 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
501 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
502 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
503 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
504 
505 /* assume the OS provides a single fragment */
506 #define __qdf_nbuf_get_num_frags(skb)		   \
507 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
508 
509 #define __qdf_nbuf_reset_num_frags(skb) \
510 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
511 
512 /**
513  *   end of nbuf->cb access macros
514  */
515 
516 typedef void (*qdf_nbuf_trace_update_t)(char *);
517 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
518 
519 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
520 
521 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
522 	(QDF_NBUF_CB_PADDR(skb) = paddr)
523 
524 #define __qdf_nbuf_frag_push_head(					\
525 	skb, frag_len, frag_vaddr, frag_paddr)				\
526 	do {					\
527 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
528 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
529 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
530 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
531 	} while (0)
532 
533 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
534 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
535 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
536 
537 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
538 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
539 
540 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
541 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
542 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
543 	 /* assume that the OS only provides a single fragment */	\
544 	 QDF_NBUF_CB_PADDR(skb))
545 
546 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
547 
548 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
549 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
550 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
551 
552 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
553 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
554 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
555 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
556 
557 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
558 	do {								\
559 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
560 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
561 		if (frag_num)						\
562 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
563 							      is_wstrm; \
564 		else					\
565 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
566 							      is_wstrm; \
567 	} while (0)
568 
569 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
570 	do { \
571 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
572 	} while (0)
573 
574 #define __qdf_nbuf_get_vdev_ctx(skb) \
575 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
576 
577 #define __qdf_nbuf_set_tx_ftype(skb, type) \
578 	do { \
579 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
580 	} while (0)
581 
582 #define __qdf_nbuf_get_tx_ftype(skb) \
583 		 QDF_NBUF_CB_TX_FTYPE((skb))
584 
585 
586 #define __qdf_nbuf_set_rx_ftype(skb, type) \
587 	do { \
588 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
589 	} while (0)
590 
591 #define __qdf_nbuf_get_rx_ftype(skb) \
592 		 QDF_NBUF_CB_RX_FTYPE((skb))
593 
594 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
595 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
596 
597 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
598 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
599 
600 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
601 	do { \
602 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
603 	} while (0)
604 
605 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
606 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
607 
608 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
609 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
610 
611 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
612 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
613 
614 #define __qdf_nbuf_set_da_mcbc(skb, val) \
615 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
616 
617 #define __qdf_nbuf_is_da_mcbc(skb) \
618 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
619 
620 #define __qdf_nbuf_set_da_valid(skb, val) \
621 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
622 
623 #define __qdf_nbuf_is_da_valid(skb) \
624 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
625 
626 #define __qdf_nbuf_set_sa_valid(skb, val) \
627 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
628 
629 #define __qdf_nbuf_is_sa_valid(skb) \
630 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
631 
632 #define __qdf_nbuf_set_raw_frame(skb, val) \
633 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
634 
635 #define __qdf_nbuf_is_raw_frame(skb) \
636 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
637 
638 #define __qdf_nbuf_set_is_frag(skb, val) \
639 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
640 
641 #define __qdf_nbuf_is_frag(skb) \
642 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
643 
644 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
645 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
646 
647 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
648 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
649 
650 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
651 	do { \
652 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
653 	} while (0)
654 
655 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
656 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
657 
658 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
659 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
660 
661 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
662 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
663 
664 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
665 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
666 
667 #define __qdf_nbuf_trace_get_proto_type(skb) \
668 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
669 
670 #define __qdf_nbuf_data_attr_get(skb)		\
671 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
672 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
673 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
674 
675 /**
676  * __qdf_nbuf_num_frags_init() - init extra frags
677  * @skb: sk buffer
678  *
679  * Return: none
680  */
681 static inline
682 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
683 {
684 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
685 }
686 
687 /*
688  * prototypes. Implemented in qdf_nbuf.c
689  */
690 
691 /**
692  * __qdf_nbuf_alloc() - Allocate nbuf
693  * @osdev: Device handle
694  * @size: Netbuf requested size
695  * @reserve: headroom to start with
696  * @align: Align
697  * @prio: Priority
698  * @func: Function name of the call site
699  * @line: line number of the call site
700  *
701  * This allocates an nbuf aligns if needed and reserves some space in the front,
702  * since the reserve is done after alignment the reserve value if being
703  * unaligned will result in an unaligned address.
704  *
705  * Return: nbuf or %NULL if no memory
706  */
707 __qdf_nbuf_t
708 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
709 		 int prio, const char *func, uint32_t line);
710 
711 void __qdf_nbuf_free(struct sk_buff *skb);
712 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
713 			struct sk_buff *skb, qdf_dma_dir_t dir);
714 void __qdf_nbuf_unmap(__qdf_device_t osdev,
715 			struct sk_buff *skb, qdf_dma_dir_t dir);
716 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
717 				 struct sk_buff *skb, qdf_dma_dir_t dir);
718 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
719 			struct sk_buff *skb, qdf_dma_dir_t dir);
720 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
721 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
722 
723 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
724 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
725 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
726 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
727 	qdf_dma_dir_t dir, int nbytes);
728 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
729 	qdf_dma_dir_t dir, int nbytes);
730 
731 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
732 	qdf_dma_dir_t dir);
733 
734 QDF_STATUS __qdf_nbuf_map_nbytes_single(
735 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
736 void __qdf_nbuf_unmap_nbytes_single(
737 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
738 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
739 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
740 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
741 QDF_STATUS __qdf_nbuf_frag_map(
742 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
743 	int offset, qdf_dma_dir_t dir, int cur_frag);
744 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
745 
746 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
747 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
748 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
749 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
750 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
751 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
752 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
753 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
754 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
755 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
756 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
757 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
758 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
759 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
760 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
761 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
762 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
763 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
764 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
765 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
766 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
767 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
768 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
769 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
770 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
771 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
772 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
773 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
774 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
775 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
776 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
777 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
778 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
779 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
780 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
781 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
782 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
783 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
784 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
785 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
786 
787 #ifdef QDF_NBUF_GLOBAL_COUNT
788 int __qdf_nbuf_count_get(void);
789 void __qdf_nbuf_count_inc(struct sk_buff *skb);
790 void __qdf_nbuf_count_dec(struct sk_buff *skb);
791 void __qdf_nbuf_mod_init(void);
792 void __qdf_nbuf_mod_exit(void);
793 
794 #else
795 
796 static inline int __qdf_nbuf_count_get(void)
797 {
798 	return 0;
799 }
800 
801 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
802 {
803 	return;
804 }
805 
806 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
807 {
808 	return;
809 }
810 
811 static inline void __qdf_nbuf_mod_init(void)
812 {
813 	return;
814 }
815 
816 static inline void __qdf_nbuf_mod_exit(void)
817 {
818 	return;
819 }
820 #endif
821 
822 /**
823  * __qdf_to_status() - OS to QDF status conversion
824  * @error : OS error
825  *
826  * Return: QDF status
827  */
828 static inline QDF_STATUS __qdf_to_status(signed int error)
829 {
830 	switch (error) {
831 	case 0:
832 		return QDF_STATUS_SUCCESS;
833 	case ENOMEM:
834 	case -ENOMEM:
835 		return QDF_STATUS_E_NOMEM;
836 	default:
837 		return QDF_STATUS_E_NOSUPPORT;
838 	}
839 }
840 
841 /**
842  * __qdf_nbuf_len() - return the amount of valid data in the skb
843  * @skb: Pointer to network buffer
844  *
845  * This API returns the amount of valid data in the skb, If there are frags
846  * then it returns total length.
847  *
848  * Return: network buffer length
849  */
850 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
851 {
852 	int i, extra_frag_len = 0;
853 
854 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
855 	if (i > 0)
856 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
857 
858 	return extra_frag_len + skb->len;
859 }
860 
861 /**
862  * __qdf_nbuf_cat() - link two nbufs
863  * @dst: Buffer to piggyback into
864  * @src: Buffer to put
865  *
866  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
867  * It is callers responsibility to free the src skb.
868  *
869  * Return: QDF_STATUS (status of the call) if failed the src skb
870  *         is released
871  */
872 static inline QDF_STATUS
873 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
874 {
875 	QDF_STATUS error = 0;
876 
877 	qdf_assert(dst && src);
878 
879 	/*
880 	 * Since pskb_expand_head unconditionally reallocates the skb->head
881 	 * buffer, first check whether the current buffer is already large
882 	 * enough.
883 	 */
884 	if (skb_tailroom(dst) < src->len) {
885 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
886 		if (error)
887 			return __qdf_to_status(error);
888 	}
889 
890 	memcpy(skb_tail_pointer(dst), src->data, src->len);
891 	skb_put(dst, src->len);
892 	return __qdf_to_status(error);
893 }
894 
895 /*
896  * nbuf manipulation routines
897  */
898 /**
899  * __qdf_nbuf_headroom() - return the amount of tail space available
900  * @buf: Pointer to network buffer
901  *
902  * Return: amount of tail room
903  */
904 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
905 {
906 	return skb_headroom(skb);
907 }
908 
909 /**
910  * __qdf_nbuf_tailroom() - return the amount of tail space available
911  * @buf: Pointer to network buffer
912  *
913  * Return: amount of tail room
914  */
915 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
916 {
917 	return skb_tailroom(skb);
918 }
919 
920 /**
921  * __qdf_nbuf_put_tail() - Puts data in the end
922  * @skb: Pointer to network buffer
923  * @size: size to be pushed
924  *
925  * Return: data pointer of this buf where new data has to be
926  *         put, or NULL if there is not enough room in this buf.
927  */
928 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
929 {
930 	if (skb_tailroom(skb) < size) {
931 		if (unlikely(pskb_expand_head(skb, 0,
932 			size - skb_tailroom(skb), GFP_ATOMIC))) {
933 			dev_kfree_skb_any(skb);
934 			return NULL;
935 		}
936 	}
937 	return skb_put(skb, size);
938 }
939 
940 /**
941  * __qdf_nbuf_trim_tail() - trim data out from the end
942  * @skb: Pointer to network buffer
943  * @size: size to be popped
944  *
945  * Return: none
946  */
947 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
948 {
949 	return skb_trim(skb, skb->len - size);
950 }
951 
952 
953 /*
954  * prototypes. Implemented in qdf_nbuf.c
955  */
956 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
957 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
958 				qdf_nbuf_rx_cksum_t *cksum);
959 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
960 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
961 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
962 void __qdf_nbuf_ref(struct sk_buff *skb);
963 int __qdf_nbuf_shared(struct sk_buff *skb);
964 
965 /*
966  * qdf_nbuf_pool_delete() implementation - do nothing in linux
967  */
968 #define __qdf_nbuf_pool_delete(osdev)
969 
970 /**
971  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
972  * @skb: Pointer to network buffer
973  *
974  * if GFP_ATOMIC is overkill then we can check whether its
975  * called from interrupt context and then do it or else in
976  * normal case use GFP_KERNEL
977  *
978  * example     use "in_irq() || irqs_disabled()"
979  *
980  * Return: cloned skb
981  */
982 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
983 {
984 	struct sk_buff *skb_new = NULL;
985 
986 	skb_new = skb_clone(skb, GFP_ATOMIC);
987 	if (skb_new)
988 		__qdf_nbuf_count_inc(skb_new);
989 
990 	return skb_new;
991 }
992 
993 /**
994  * __qdf_nbuf_copy() - returns a private copy of the skb
995  * @skb: Pointer to network buffer
996  *
997  * This API returns a private copy of the skb, the skb returned is completely
998  *  modifiable by callers
999  *
1000  * Return: skb or NULL
1001  */
1002 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1003 {
1004 	struct sk_buff *skb_new = NULL;
1005 
1006 	skb_new = skb_copy(skb, GFP_ATOMIC);
1007 	if (skb_new)
1008 		__qdf_nbuf_count_inc(skb_new);
1009 
1010 	return skb_new;
1011 }
1012 
1013 #define __qdf_nbuf_reserve      skb_reserve
1014 
1015 /**
1016  * __qdf_nbuf_reset() - reset the buffer data and pointer
1017  * @buf: Network buf instance
1018  * @reserve: reserve
1019  * @align: align
1020  *
1021  * Return: none
1022  */
1023 static inline void
1024 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1025 {
1026 	int offset;
1027 
1028 	skb_push(skb, skb_headroom(skb));
1029 	skb_put(skb, skb_tailroom(skb));
1030 	memset(skb->data, 0x0, skb->len);
1031 	skb_trim(skb, 0);
1032 	skb_reserve(skb, NET_SKB_PAD);
1033 	memset(skb->cb, 0x0, sizeof(skb->cb));
1034 
1035 	/*
1036 	 * The default is for netbuf fragments to be interpreted
1037 	 * as wordstreams rather than bytestreams.
1038 	 */
1039 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1040 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1041 
1042 	/*
1043 	 * Align & make sure that the tail & data are adjusted properly
1044 	 */
1045 
1046 	if (align) {
1047 		offset = ((unsigned long)skb->data) % align;
1048 		if (offset)
1049 			skb_reserve(skb, align - offset);
1050 	}
1051 
1052 	skb_reserve(skb, reserve);
1053 }
1054 
1055 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1056 /**
1057  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1058  *                                       in kernel
1059  *
1060  * Return: true if dev_scratch is supported
1061  *         false if dev_scratch is not supported
1062  */
1063 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1064 {
1065 	return true;
1066 }
1067 
1068 /**
1069  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1070  * @skb: Pointer to network buffer
1071  *
1072  * Return: dev_scratch if dev_scratch supported
1073  *         0 if dev_scratch not supported
1074  */
1075 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1076 {
1077 	return skb->dev_scratch;
1078 }
1079 
1080 /**
1081  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1082  * @skb: Pointer to network buffer
1083  * @value: value to be set in dev_scratch of network buffer
1084  *
1085  * Return: void
1086  */
1087 static inline void
1088 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1089 {
1090 	skb->dev_scratch = value;
1091 }
1092 #else
1093 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1094 {
1095 	return false;
1096 }
1097 
1098 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1099 {
1100 	return 0;
1101 }
1102 
1103 static inline void
1104 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1105 {
1106 }
1107 #endif /* KERNEL_VERSION(4, 14, 0) */
1108 
1109 /**
1110  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1111  * @skb: Pointer to network buffer
1112  *
1113  * Return: Pointer to head buffer
1114  */
1115 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1116 {
1117 	return skb->head;
1118 }
1119 
1120 /**
1121  * __qdf_nbuf_data() - return the pointer to data header in the skb
1122  * @skb: Pointer to network buffer
1123  *
1124  * Return: Pointer to skb data
1125  */
1126 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1127 {
1128 	return skb->data;
1129 }
1130 
1131 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1132 {
1133 	return (uint8_t *)&skb->data;
1134 }
1135 
1136 /**
1137  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1138  * @skb: Pointer to network buffer
1139  *
1140  * Return: skb protocol
1141  */
1142 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1143 {
1144 	return skb->protocol;
1145 }
1146 
1147 /**
1148  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1149  * @skb: Pointer to network buffer
1150  *
1151  * Return: skb ip_summed
1152  */
1153 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1154 {
1155 	return skb->ip_summed;
1156 }
1157 
1158 /**
1159  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1160  * @skb: Pointer to network buffer
1161  * @ip_summed: ip checksum
1162  *
1163  * Return: none
1164  */
1165 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1166 		 uint8_t ip_summed)
1167 {
1168 	skb->ip_summed = ip_summed;
1169 }
1170 
1171 /**
1172  * __qdf_nbuf_get_priority() - return the priority value of the skb
1173  * @skb: Pointer to network buffer
1174  *
1175  * Return: skb priority
1176  */
1177 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1178 {
1179 	return skb->priority;
1180 }
1181 
1182 /**
1183  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1184  * @skb: Pointer to network buffer
1185  * @p: priority
1186  *
1187  * Return: none
1188  */
1189 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1190 {
1191 	skb->priority = p;
1192 }
1193 
1194 /**
1195  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1196  * @skb: Current skb
1197  * @next_skb: Next skb
1198  *
1199  * Return: void
1200  */
1201 static inline void
1202 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1203 {
1204 	skb->next = skb_next;
1205 }
1206 
1207 /**
1208  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1209  * @skb: Current skb
1210  *
1211  * Return: the next skb pointed to by the current skb
1212  */
1213 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1214 {
1215 	return skb->next;
1216 }
1217 
1218 /**
1219  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1220  * @skb: Current skb
1221  * @next_skb: Next skb
1222  *
1223  * This fn is used to link up extensions to the head skb. Does not handle
1224  * linking to the head
1225  *
1226  * Return: none
1227  */
1228 static inline void
1229 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1230 {
1231 	skb->next = skb_next;
1232 }
1233 
1234 /**
1235  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1236  * @skb: Current skb
1237  *
1238  * Return: the next skb pointed to by the current skb
1239  */
1240 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1241 {
1242 	return skb->next;
1243 }
1244 
1245 /**
1246  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1247  * @skb_head: head_buf nbuf holding head segment (single)
1248  * @ext_list: nbuf list holding linked extensions to the head
1249  * @ext_len: Total length of all buffers in the extension list
1250  *
1251  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1252  * to the nbuf holding the head segment (seg0)
1253  *
1254  * Return: none
1255  */
1256 static inline void
1257 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1258 			struct sk_buff *ext_list, size_t ext_len)
1259 {
1260 	skb_shinfo(skb_head)->frag_list = ext_list;
1261 	skb_head->data_len = ext_len;
1262 	skb_head->len += skb_head->data_len;
1263 }
1264 
1265 /**
1266  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1267  * @head_buf: Network buf holding head segment (single)
1268  *
1269  * This ext_list is populated when we have Jumbo packet, for example in case of
1270  * monitor mode amsdu packet reception, and are stiched using frags_list.
1271  *
1272  * Return: Network buf list holding linked extensions from head buf.
1273  */
1274 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1275 {
1276 	return (skb_shinfo(head_buf)->frag_list);
1277 }
1278 
1279 /**
1280  * __qdf_nbuf_get_age() - return the checksum value of the skb
1281  * @skb: Pointer to network buffer
1282  *
1283  * Return: checksum value
1284  */
1285 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1286 {
1287 	return skb->csum;
1288 }
1289 
1290 /**
1291  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1292  * @skb: Pointer to network buffer
1293  * @v: Value
1294  *
1295  * Return: none
1296  */
1297 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1298 {
1299 	skb->csum = v;
1300 }
1301 
1302 /**
1303  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1304  * @skb: Pointer to network buffer
1305  * @adj: Adjustment value
1306  *
1307  * Return: none
1308  */
1309 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1310 {
1311 	skb->csum -= adj;
1312 }
1313 
1314 /**
1315  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1316  * @skb: Pointer to network buffer
1317  * @offset: Offset value
1318  * @len: Length
1319  * @to: Destination pointer
1320  *
1321  * Return: length of the copy bits for skb
1322  */
1323 static inline int32_t
1324 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1325 {
1326 	return skb_copy_bits(skb, offset, to, len);
1327 }
1328 
1329 /**
1330  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1331  * @skb: Pointer to network buffer
1332  * @len:  Packet length
1333  *
1334  * Return: none
1335  */
1336 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1337 {
1338 	if (skb->len > len) {
1339 		skb_trim(skb, len);
1340 	} else {
1341 		if (skb_tailroom(skb) < len - skb->len) {
1342 			if (unlikely(pskb_expand_head(skb, 0,
1343 				len - skb->len - skb_tailroom(skb),
1344 				GFP_ATOMIC))) {
1345 				dev_kfree_skb_any(skb);
1346 				qdf_assert(0);
1347 			}
1348 		}
1349 		skb_put(skb, (len - skb->len));
1350 	}
1351 }
1352 
1353 /**
1354  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1355  * @skb: Pointer to network buffer
1356  * @protocol: Protocol type
1357  *
1358  * Return: none
1359  */
1360 static inline void
1361 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1362 {
1363 	skb->protocol = protocol;
1364 }
1365 
1366 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1367 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1368 
1369 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1370 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1371 
1372 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1373 				      uint32_t *lo, uint32_t *hi);
1374 
1375 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1376 	struct qdf_tso_info_t *tso_info);
1377 
1378 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1379 			  struct qdf_tso_seg_elem_t *tso_seg,
1380 			  bool is_last_seg);
1381 
1382 #ifdef FEATURE_TSO
1383 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1384 
1385 #else
1386 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1387 {
1388 	return 0;
1389 }
1390 
1391 #endif /* FEATURE_TSO */
1392 
1393 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1394 {
1395 	if (skb_is_gso(skb) &&
1396 		(skb_is_gso_v6(skb) ||
1397 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1398 		return true;
1399 	else
1400 		return false;
1401 }
1402 
1403 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1404 
1405 int __qdf_nbuf_get_users(struct sk_buff *skb);
1406 
1407 /**
1408  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1409  *			      and get hw_classify by peeking
1410  *			      into packet
1411  * @nbuf:		Network buffer (skb on Linux)
1412  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1413  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1414  *			needs to be set in case of CE classification support
1415  *			Is set by this macro.
1416  * @hw_classify:	This is a flag which is set to indicate
1417  *			CE classification is enabled.
1418  *			Do not set this bit for VLAN packets
1419  *			OR for mcast / bcast frames.
1420  *
1421  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1422  * whether to enable tx_classify bit in CE.
1423  *
1424  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1425  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1426  * it is the length and a 802.3 frame else it is Ethernet Type II
1427  * (RFC 894).
1428  * Bit 4 in pkt_subtype is the tx_classify bit
1429  *
1430  * Return:	void
1431  */
1432 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1433 				pkt_subtype, hw_classify)	\
1434 do {								\
1435 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1436 	uint16_t ether_type = ntohs(eh->h_proto);		\
1437 	bool is_mc_bc;						\
1438 								\
1439 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1440 		   is_multicast_ether_addr((uint8_t *)eh);	\
1441 								\
1442 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1443 		hw_classify = 1;				\
1444 		pkt_subtype = 0x01 <<				\
1445 			HTT_TX_CLASSIFY_BIT_S;			\
1446 	}							\
1447 								\
1448 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1449 		pkt_type = htt_pkt_type_ethernet;		\
1450 								\
1451 } while (0)
1452 
1453 /**
1454  * nbuf private buffer routines
1455  */
1456 
1457 /**
1458  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1459  * @skb: Pointer to network buffer
1460  * @addr: Pointer to store header's addr
1461  * @m_len: network buffer length
1462  *
1463  * Return: none
1464  */
1465 static inline void
1466 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1467 {
1468 	*addr = skb->data;
1469 	*len = skb->len;
1470 }
1471 
1472 /**
1473  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1474  * @head: Head pointer
1475  * @tail: Tail pointer
1476  * @qlen: Queue length
1477  */
1478 typedef struct __qdf_nbuf_qhead {
1479 	struct sk_buff *head;
1480 	struct sk_buff *tail;
1481 	unsigned int qlen;
1482 } __qdf_nbuf_queue_t;
1483 
1484 /******************Functions *************/
1485 
1486 /**
1487  * __qdf_nbuf_queue_init() - initiallize the queue head
1488  * @qhead: Queue head
1489  *
1490  * Return: QDF status
1491  */
1492 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1493 {
1494 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1495 	return QDF_STATUS_SUCCESS;
1496 }
1497 
1498 /**
1499  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1500  * @qhead: Queue head
1501  * @skb: Pointer to network buffer
1502  *
1503  * This is a lockless version, driver must acquire locks if it
1504  * needs to synchronize
1505  *
1506  * Return: none
1507  */
1508 static inline void
1509 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1510 {
1511 	skb->next = NULL;       /*Nullify the next ptr */
1512 
1513 	if (!qhead->head)
1514 		qhead->head = skb;
1515 	else
1516 		qhead->tail->next = skb;
1517 
1518 	qhead->tail = skb;
1519 	qhead->qlen++;
1520 }
1521 
1522 /**
1523  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1524  * @dest: target netbuf queue
1525  * @src:  source netbuf queue
1526  *
1527  * Return: target netbuf queue
1528  */
1529 static inline __qdf_nbuf_queue_t *
1530 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1531 {
1532 	if (!dest)
1533 		return NULL;
1534 	else if (!src || !(src->head))
1535 		return dest;
1536 
1537 	if (!(dest->head))
1538 		dest->head = src->head;
1539 	else
1540 		dest->tail->next = src->head;
1541 
1542 	dest->tail = src->tail;
1543 	dest->qlen += src->qlen;
1544 	return dest;
1545 }
1546 
1547 /**
1548  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1549  * @qhead: Queue head
1550  * @skb: Pointer to network buffer
1551  *
1552  * This is a lockless version, driver must acquire locks if it needs to
1553  * synchronize
1554  *
1555  * Return: none
1556  */
1557 static inline void
1558 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1559 {
1560 	if (!qhead->head) {
1561 		/*Empty queue Tail pointer Must be updated */
1562 		qhead->tail = skb;
1563 	}
1564 	skb->next = qhead->head;
1565 	qhead->head = skb;
1566 	qhead->qlen++;
1567 }
1568 
1569 /**
1570  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1571  * @qhead: Queue head
1572  *
1573  * This is a lockless version. Driver should take care of the locks
1574  *
1575  * Return: skb or NULL
1576  */
1577 static inline
1578 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1579 {
1580 	__qdf_nbuf_t tmp = NULL;
1581 
1582 	if (qhead->head) {
1583 		qhead->qlen--;
1584 		tmp = qhead->head;
1585 		if (qhead->head == qhead->tail) {
1586 			qhead->head = NULL;
1587 			qhead->tail = NULL;
1588 		} else {
1589 			qhead->head = tmp->next;
1590 		}
1591 		tmp->next = NULL;
1592 	}
1593 	return tmp;
1594 }
1595 
1596 /**
1597  * __qdf_nbuf_queue_free() - free a queue
1598  * @qhead: head of queue
1599  *
1600  * Return: QDF status
1601  */
1602 static inline QDF_STATUS
1603 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1604 {
1605 	__qdf_nbuf_t  buf = NULL;
1606 
1607 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1608 		__qdf_nbuf_free(buf);
1609 	return QDF_STATUS_SUCCESS;
1610 }
1611 
1612 
1613 /**
1614  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1615  * @qhead: head of queue
1616  *
1617  * Return: NULL if the queue is empty
1618  */
1619 static inline struct sk_buff *
1620 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1621 {
1622 	return qhead->head;
1623 }
1624 
1625 /**
1626  * __qdf_nbuf_queue_len() - return the queue length
1627  * @qhead: Queue head
1628  *
1629  * Return: Queue length
1630  */
1631 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1632 {
1633 	return qhead->qlen;
1634 }
1635 
1636 /**
1637  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1638  * @skb: Pointer to network buffer
1639  *
1640  * This API returns the next skb from packet chain, remember the skb is
1641  * still in the queue
1642  *
1643  * Return: NULL if no packets are there
1644  */
1645 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1646 {
1647 	return skb->next;
1648 }
1649 
1650 /**
1651  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1652  * @qhead: Queue head
1653  *
1654  * Return: true if length is 0 else false
1655  */
1656 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1657 {
1658 	return qhead->qlen == 0;
1659 }
1660 
1661 /*
1662  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1663  * Because the queue head will most likely put in some structure,
1664  * we don't use pointer type as the definition.
1665  */
1666 
1667 /*
1668  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1669  * Because the queue head will most likely put in some structure,
1670  * we don't use pointer type as the definition.
1671  */
1672 
1673 static inline void
1674 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1675 {
1676 }
1677 
1678 /**
1679  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1680  *        expands the headroom
1681  *        in the data region. In case of failure the skb is released.
1682  * @skb: sk buff
1683  * @headroom: size of headroom
1684  *
1685  * Return: skb or NULL
1686  */
1687 static inline struct sk_buff *
1688 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1689 {
1690 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1691 		dev_kfree_skb_any(skb);
1692 		skb = NULL;
1693 	}
1694 	return skb;
1695 }
1696 
1697 /**
1698  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1699  *        exapnds the tailroom
1700  *        in data region. In case of failure it releases the skb.
1701  * @skb: sk buff
1702  * @tailroom: size of tailroom
1703  *
1704  * Return: skb or NULL
1705  */
1706 static inline struct sk_buff *
1707 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1708 {
1709 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1710 		return skb;
1711 	/**
1712 	 * unlikely path
1713 	 */
1714 	dev_kfree_skb_any(skb);
1715 	return NULL;
1716 }
1717 
1718 /**
1719  * __qdf_nbuf_linearize() - skb linearize
1720  * @skb: sk buff
1721  *
1722  * create a version of the specified nbuf whose contents
1723  * can be safely modified without affecting other
1724  * users.If the nbuf is non-linear then this function
1725  * linearize. if unable to linearize returns -ENOMEM on
1726  * success 0 is returned
1727  *
1728  * Return: 0 on Success, -ENOMEM on failure is returned.
1729  */
1730 static inline int
1731 __qdf_nbuf_linearize(struct sk_buff *skb)
1732 {
1733 	return skb_linearize(skb);
1734 }
1735 
1736 /**
1737  * __qdf_nbuf_unshare() - skb unshare
1738  * @skb: sk buff
1739  *
1740  * create a version of the specified nbuf whose contents
1741  * can be safely modified without affecting other
1742  * users.If the nbuf is a clone then this function
1743  * creates a new copy of the data. If the buffer is not
1744  * a clone the original buffer is returned.
1745  *
1746  * Return: skb or NULL
1747  */
1748 static inline struct sk_buff *
1749 __qdf_nbuf_unshare(struct sk_buff *skb)
1750 {
1751 	return skb_unshare(skb, GFP_ATOMIC);
1752 }
1753 
1754 /**
1755  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1756  *@buf: sk buff
1757  *
1758  * Return: true/false
1759  */
1760 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1761 {
1762 	return skb_cloned(skb);
1763 }
1764 
1765 /**
1766  * __qdf_nbuf_pool_init() - init pool
1767  * @net: net handle
1768  *
1769  * Return: QDF status
1770  */
1771 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1772 {
1773 	return QDF_STATUS_SUCCESS;
1774 }
1775 
1776 /*
1777  * adf_nbuf_pool_delete() implementation - do nothing in linux
1778  */
1779 #define __qdf_nbuf_pool_delete(osdev)
1780 
1781 /**
1782  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1783  *        release the skb.
1784  * @skb: sk buff
1785  * @headroom: size of headroom
1786  * @tailroom: size of tailroom
1787  *
1788  * Return: skb or NULL
1789  */
1790 static inline struct sk_buff *
1791 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1792 {
1793 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1794 		return skb;
1795 
1796 	dev_kfree_skb_any(skb);
1797 	return NULL;
1798 }
1799 
1800 /**
1801  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1802  *
1803  * Return: true/false
1804  */
1805 static inline bool
1806 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1807 			 uint8_t **where)
1808 {
1809 	qdf_assert(0);
1810 	return false;
1811 }
1812 
1813 /**
1814  * __qdf_nbuf_reset_ctxt() - mem zero control block
1815  * @nbuf: buffer
1816  *
1817  * Return: none
1818  */
1819 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1820 {
1821 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1822 }
1823 
1824 /**
1825  * __qdf_nbuf_network_header() - get network header
1826  * @buf: buffer
1827  *
1828  * Return: network header pointer
1829  */
1830 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1831 {
1832 	return skb_network_header(buf);
1833 }
1834 
1835 /**
1836  * __qdf_nbuf_transport_header() - get transport header
1837  * @buf: buffer
1838  *
1839  * Return: transport header pointer
1840  */
1841 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1842 {
1843 	return skb_transport_header(buf);
1844 }
1845 
1846 /**
1847  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1848  *  passed as part of network buffer by network stack
1849  * @skb: sk buff
1850  *
1851  * Return: TCP MSS size
1852  *
1853  */
1854 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1855 {
1856 	return skb_shinfo(skb)->gso_size;
1857 }
1858 
1859 /**
1860  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1861  * @nbuf: sk buff
1862  *
1863  * Return: none
1864  */
1865 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1866 
1867 /*
1868  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1869  * @nbuf: sk buff
1870  *
1871  * Return: void ptr
1872  */
1873 static inline void *
1874 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1875 {
1876 	return (void *)nbuf->cb;
1877 }
1878 
1879 /**
1880  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1881  * @skb: sk buff
1882  *
1883  * Return: head size
1884  */
1885 static inline size_t
1886 __qdf_nbuf_headlen(struct sk_buff *skb)
1887 {
1888 	return skb_headlen(skb);
1889 }
1890 
1891 /**
1892  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1893  * @skb: sk buff
1894  *
1895  * Return: number of fragments
1896  */
1897 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1898 {
1899 	return skb_shinfo(skb)->nr_frags;
1900 }
1901 
1902 /**
1903  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1904  * @buf: sk buff
1905  *
1906  * Return: true/false
1907  */
1908 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1909 {
1910 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1911 }
1912 
1913 /**
1914  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1915  * @buf: sk buff
1916  *
1917  * Return: true/false
1918  */
1919 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1920 {
1921 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1922 }
1923 
1924 /**
1925  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1926  * @skb: sk buff
1927  *
1928  * Return: size of l2+l3+l4 header length
1929  */
1930 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1931 {
1932 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1933 }
1934 
1935 /**
1936  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1937  * @buf: sk buff
1938  *
1939  * Return:  true/false
1940  */
1941 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1942 {
1943 	if (skb_is_nonlinear(skb))
1944 		return true;
1945 	else
1946 		return false;
1947 }
1948 
1949 /**
1950  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1951  * @buf: sk buff
1952  *
1953  * Return: TCP sequence number
1954  */
1955 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1956 {
1957 	return ntohl(tcp_hdr(skb)->seq);
1958 }
1959 
1960 /**
1961  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1962  *@buf: sk buff
1963  *
1964  * Return: data pointer to typecast into your priv structure
1965  */
1966 static inline uint8_t *
1967 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1968 {
1969 	return &skb->cb[8];
1970 }
1971 
1972 /**
1973  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1974  * @buf: Pointer to nbuf
1975  *
1976  * Return: None
1977  */
1978 static inline void
1979 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1980 {
1981 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1982 }
1983 
1984 /**
1985  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
1986  *
1987  * @buf: sk buff
1988  * @queue_id: Queue id
1989  *
1990  * Return: void
1991  */
1992 static inline void
1993 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
1994 {
1995 	skb_record_rx_queue(skb, queue_id);
1996 }
1997 
1998 /**
1999  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2000  *
2001  * @buf: sk buff
2002  *
2003  * Return: Queue mapping
2004  */
2005 static inline uint16_t
2006 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2007 {
2008 	return skb->queue_mapping;
2009 }
2010 
2011 /**
2012  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2013  *
2014  * @buf: sk buff
2015  *
2016  * Return: void
2017  */
2018 static inline void
2019 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2020 {
2021 	__net_timestamp(skb);
2022 }
2023 
2024 /**
2025  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2026  *
2027  * @buf: sk buff
2028  *
2029  * Return: time difference in ms
2030  */
2031 static inline uint64_t
2032 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2033 {
2034 	return ktime_to_ms(net_timedelta(skb->tstamp));
2035 }
2036 
2037 /**
2038  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2039  *
2040  * @buf: sk buff
2041  *
2042  * Return: time difference in micro seconds
2043  */
2044 static inline uint64_t
2045 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2046 {
2047 	return ktime_to_us(net_timedelta(skb->tstamp));
2048 }
2049 
2050 /**
2051  * __qdf_nbuf_orphan() - orphan a nbuf
2052  * @skb: sk buff
2053  *
2054  * If a buffer currently has an owner then we call the
2055  * owner's destructor function
2056  *
2057  * Return: void
2058  */
2059 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2060 {
2061 	return skb_orphan(skb);
2062 }
2063 
2064 static inline struct sk_buff *
2065 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2066 {
2067 	return skb_dequeue(skb_queue_head);
2068 }
2069 
2070 static inline
2071 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2072 {
2073 	return skb_queue_head->qlen;
2074 }
2075 
2076 static inline
2077 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2078 					struct sk_buff *skb)
2079 {
2080 	return skb_queue_tail(skb_queue_head, skb);
2081 }
2082 
2083 static inline
2084 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2085 {
2086 	return skb_queue_head_init(skb_queue_head);
2087 }
2088 
2089 static inline
2090 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2091 {
2092 	return skb_queue_purge(skb_queue_head);
2093 }
2094 
2095 #ifdef CONFIG_WIN
2096 #include <i_qdf_nbuf_w.h>
2097 #else
2098 #include <i_qdf_nbuf_m.h>
2099 #endif
2100 #endif /*_I_QDF_NET_BUF_H */
2101