xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 /*
45  * Use socket buffer as the underlying implementation as skbuf .
46  * Linux use sk_buff to represent both packet and data,
47  * so we use sk_buffer to represent both skbuf .
48  */
49 typedef struct sk_buff *__qdf_nbuf_t;
50 
51 /**
52  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
53  *
54  * This is used for skb queue management via linux skb buff head APIs
55  */
56 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
57 
58 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
59 
60 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
61 
62 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
63  * max tx fragments added by the driver
64  * The driver will always add one tx fragment (the tx descriptor)
65  */
66 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
67 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
68 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
69 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
70 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
71 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
72 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
73 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
74 
75 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
76 
77 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
78 #define IEEE80211_RADIOTAP_HE 23
79 #define IEEE80211_RADIOTAP_HE_MU 24
80 #endif
81 
82 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
83 
84 #define IEEE80211_RADIOTAP_EXT1_USIG	1
85 #define IEEE80211_RADIOTAP_EXT1_EHT	2
86 
87 /* mark the first packet after wow wakeup */
88 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
89 
90 /* TCP Related MASK */
91 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
92 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
93 #define QDF_NBUF_PKT_TCPOP_RST			0x04
94 
95 /*
96  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
97  */
98 typedef union {
99 	uint64_t       u64;
100 	qdf_dma_addr_t dma_addr;
101 } qdf_paddr_t;
102 
103 /**
104  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
105  *                    - data passed between layers of the driver.
106  *
107  * Notes:
108  *   1. Hard limited to 48 bytes. Please count your bytes
109  *   2. The size of this structure has to be easily calculatable and
110  *      consistently so: do not use any conditional compile flags
111  *   3. Split into a common part followed by a tx/rx overlay
112  *   4. There is only one extra frag, which represents the HTC/HTT header
113  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
114  *      for the priv_cb_w since it must be at same offset for both
115  *      TX and RX union
116  *   6. "ipa.owned" bit must be first member in both TX and RX unions
117  *      for the priv_cb_m since it must be at same offset for both
118  *      TX and RX union.
119  *
120  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
121  *
122  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
123  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
124  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
125  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
126  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
127  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
128  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
129  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
130  *
131  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
132  * @rx.dev.priv_cb_m.flush_ind: flush indication
133  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
134  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
135  * @rx.dev.priv_cb_m.exc_frm: exception frame
136  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
137  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
138 					     sw execption bit from ring desc
139  * @rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
140  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
141  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
142  * @rx.dev.priv_cb_m.lro_ctx: LRO context
143  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
144  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
145  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
146  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
147  *
148  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
149  * @rx.tcp_proto: L4 protocol is TCP
150  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
151  * @rx.ipv6_proto: L3 protocol is IPV6
152  * @rx.ip_offset: offset to IP header
153  * @rx.tcp_offset: offset to TCP header
154  * @rx_ctx_id: Rx context id
155  * @num_elements_in_list: number of elements in the nbuf list
156  *
157  * @rx.tcp_udp_chksum: L4 payload checksum
158  * @rx.tcp_wim: TCP window size
159  *
160  * @rx.flow_id: 32bit flow id
161  *
162  * @rx.flag_chfrag_start: first MSDU in an AMSDU
163  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
164  * @rx.flag_chfrag_end: last MSDU in an AMSDU
165  * @rx.flag_retry: flag to indicate MSDU is retried
166  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
167  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
168  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
169  * @rx.flag_is_frag: flag to indicate skb has frag list
170  * @rx.rsrvd: reserved
171  *
172  * @rx.trace: combined structure for DP and protocol trace
173  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
174  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
175  * @rx.trace.dp_trace: flag (Datapath trace)
176  * @rx.trace.packet_track: RX_DATA packet
177  * @rx.trace.rsrvd: enable packet logging
178  *
179  * @rx.vdev_id: vdev_id for RX pkt
180  * @rx.is_raw_frame: RAW frame
181  * @rx.fcs_err: FCS error
182  * @rx.tid_val: tid value
183  * @rx.reserved: reserved
184  * @rx.ftype: mcast2ucast, TSO, SG, MESH
185  *
186  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
187  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
188  *
189  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
190  *                 + (1) CE classification enablement bit
191  *                 + (2) packet type (802.3 or Ethernet type II)
192  *                 + (3) packet offset (usually length of HTC/HTT descr)
193  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
194  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
195  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
196  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
197  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
198  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
199  * @tx.dev.priv_cb_m.reserved: reserved
200  *
201  * @tx.ftype: mcast2ucast, TSO, SG, MESH
202  * @tx.vdev_id: vdev (for protocol trace)
203  * @tx.len: length of efrag pointed by the above pointers
204  *
205  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
206  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
207  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
208  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
209  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
210  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
211  * @tx.flags.bits.flag_ext_header: extended flags
212  * @tx.flags.bits.is_critical: flag indicating a critical frame
213  * @tx.trace: combined structure for DP and protocol trace
214  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
215  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
216  * @tx.trace.is_packet_priv:
217  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
218  * @tx.trace.to_fw: Flag to indicate send this packet to FW
219  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
220  *                          + (MGMT_ACTION)] - 4 bits
221  * @tx.trace.dp_trace: flag (Datapath trace)
222  * @tx.trace.is_bcast: flag (Broadcast packet)
223  * @tx.trace.is_mcast: flag (Multicast packet)
224  * @tx.trace.packet_type: flag (Packet type)
225  * @tx.trace.htt2_frm: flag (high-latency path only)
226  * @tx.trace.print: enable packet logging
227  *
228  * @tx.vaddr: virtual address of ~
229  * @tx.paddr: physical/DMA address of ~
230  */
231 struct qdf_nbuf_cb {
232 	/* common */
233 	qdf_paddr_t paddr; /* of skb->data */
234 	/* valid only in one direction */
235 	union {
236 		/* Note: MAX: 40 bytes */
237 		struct {
238 			union {
239 				struct {
240 					void *ext_cb_ptr;
241 					void *fctx;
242 					uint16_t msdu_len : 14,
243 						 flag_intra_bss : 1,
244 						 ipa_smmu_map : 1;
245 					uint16_t peer_id;
246 					uint16_t protocol_tag;
247 					uint16_t flow_tag;
248 				} priv_cb_w;
249 				struct {
250 					/* ipa_owned bit is common between rx
251 					 * control block and tx control block.
252 					 * Do not change location of this bit.
253 					 */
254 					uint32_t ipa_owned:1,
255 						 peer_cached_buf_frm:1,
256 						 flush_ind:1,
257 						 packet_buf_pool:1,
258 						 l3_hdr_pad:3,
259 						 /* exception frame flag */
260 						 exc_frm:1,
261 						 ipa_smmu_map:1,
262 						 reo_dest_ind_or_sw_excpt:5,
263 						 lmac_id:2,
264 						 reserved1:16;
265 					uint32_t tcp_seq_num;
266 					uint32_t tcp_ack_num;
267 					union {
268 						struct {
269 							uint16_t msdu_len;
270 							uint16_t peer_id;
271 						} wifi3;
272 						struct {
273 							uint32_t map_index;
274 						} wifi2;
275 					} dp;
276 					unsigned char *lro_ctx;
277 				} priv_cb_m;
278 			} dev;
279 			uint32_t lro_eligible:1,
280 				tcp_proto:1,
281 				tcp_pure_ack:1,
282 				ipv6_proto:1,
283 				ip_offset:7,
284 				tcp_offset:7,
285 				rx_ctx_id:4,
286 				fcs_err:1,
287 				is_raw_frame:1,
288 				num_elements_in_list:8;
289 			uint32_t tcp_udp_chksum:16,
290 				 tcp_win:16;
291 			uint32_t flow_id;
292 			uint8_t flag_chfrag_start:1,
293 				flag_chfrag_cont:1,
294 				flag_chfrag_end:1,
295 				flag_retry:1,
296 				flag_da_mcbc:1,
297 				flag_da_valid:1,
298 				flag_sa_valid:1,
299 				flag_is_frag:1;
300 			union {
301 				uint8_t packet_state;
302 				uint8_t dp_trace:1,
303 					packet_track:3,
304 					rsrvd:4;
305 			} trace;
306 			uint16_t vdev_id:8,
307 				 tid_val:4,
308 				 ftype:4;
309 		} rx;
310 
311 		/* Note: MAX: 40 bytes */
312 		struct {
313 			union {
314 				struct {
315 					void *ext_cb_ptr;
316 					void *fctx;
317 				} priv_cb_w;
318 				struct {
319 					/* ipa_owned bit is common between rx
320 					 * control block and tx control block.
321 					 * Do not change location of this bit.
322 					 */
323 					struct {
324 						uint32_t owned:1,
325 							priv:31;
326 					} ipa;
327 					uint32_t data_attr;
328 					uint16_t desc_id;
329 					uint16_t mgmt_desc_id;
330 					struct {
331 						uint8_t bi_map:1,
332 							reserved:7;
333 					} dma_option;
334 					uint8_t flag_notify_comp:1,
335 						rsvd:7;
336 					uint8_t reserved[2];
337 				} priv_cb_m;
338 			} dev;
339 			uint8_t ftype;
340 			uint8_t vdev_id;
341 			uint16_t len;
342 			union {
343 				struct {
344 					uint8_t flag_efrag:1,
345 						flag_nbuf:1,
346 						num:1,
347 						flag_chfrag_start:1,
348 						flag_chfrag_cont:1,
349 						flag_chfrag_end:1,
350 						flag_ext_header:1,
351 						is_critical:1;
352 				} bits;
353 				uint8_t u8;
354 			} flags;
355 			struct {
356 				uint8_t packet_state:7,
357 					is_packet_priv:1;
358 				uint8_t packet_track:3,
359 					to_fw:1,
360 					proto_type:4;
361 				uint8_t dp_trace:1,
362 					is_bcast:1,
363 					is_mcast:1,
364 					packet_type:3,
365 					/* used only for hl*/
366 					htt2_frm:1,
367 					print:1;
368 			} trace;
369 			unsigned char *vaddr;
370 			qdf_paddr_t paddr;
371 		} tx;
372 	} u;
373 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
374 
375 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
376 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
377 			(sizeof(struct qdf_nbuf_cb)) <=
378 			sizeof_field(struct sk_buff, cb));
379 #else
380 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
381 			(sizeof(struct qdf_nbuf_cb)) <=
382 			FIELD_SIZEOF(struct sk_buff, cb));
383 #endif
384 
385 /**
386  *  access macros to qdf_nbuf_cb
387  *  Note: These macros can be used as L-values as well as R-values.
388  *        When used as R-values, they effectively function as "get" macros
389  *        When used as L_values, they effectively function as "set" macros
390  */
391 
392 #define QDF_NBUF_CB_PADDR(skb) \
393 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
394 
395 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
396 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
397 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
398 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
399 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
400 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
401 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
402 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
403 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
404 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
405 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
406 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
407 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
408 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
409 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
410 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
411 
412 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
413 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
414 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
415 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
416 
417 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
418 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
419 
420 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
421 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
422 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
423 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
424 
425 #define QDF_NBUF_CB_RX_FTYPE(skb) \
426 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
427 
428 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
429 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
430 
431 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
432 	(((struct qdf_nbuf_cb *) \
433 	((skb)->cb))->u.rx.flag_chfrag_start)
434 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
435 	(((struct qdf_nbuf_cb *) \
436 	((skb)->cb))->u.rx.flag_chfrag_cont)
437 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
438 		(((struct qdf_nbuf_cb *) \
439 		((skb)->cb))->u.rx.flag_chfrag_end)
440 
441 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
442 	(((struct qdf_nbuf_cb *) \
443 	((skb)->cb))->u.rx.flag_da_mcbc)
444 
445 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
446 	(((struct qdf_nbuf_cb *) \
447 	((skb)->cb))->u.rx.flag_da_valid)
448 
449 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
450 	(((struct qdf_nbuf_cb *) \
451 	((skb)->cb))->u.rx.flag_sa_valid)
452 
453 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
454 	(((struct qdf_nbuf_cb *) \
455 	((skb)->cb))->u.rx.flag_retry)
456 
457 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
458 	(((struct qdf_nbuf_cb *) \
459 	((skb)->cb))->u.rx.is_raw_frame)
460 
461 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
462 	(((struct qdf_nbuf_cb *) \
463 	((skb)->cb))->u.rx.tid_val)
464 
465 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
466 	(((struct qdf_nbuf_cb *) \
467 	((skb)->cb))->u.rx.flag_is_frag)
468 
469 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
470 	(((struct qdf_nbuf_cb *) \
471 	((skb)->cb))->u.rx.fcs_err)
472 
473 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
474 	qdf_nbuf_set_state(skb, PACKET_STATE)
475 
476 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
477 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
478 
479 #define QDF_NBUF_CB_TX_FTYPE(skb) \
480 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
481 
482 
483 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
484 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
485 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
486 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
487 
488 /* Tx Flags Accessor Macros*/
489 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
490 	(((struct qdf_nbuf_cb *) \
491 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
492 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
493 	(((struct qdf_nbuf_cb *) \
494 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
495 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
496 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
497 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
498 	(((struct qdf_nbuf_cb *) \
499 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
500 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
501 	(((struct qdf_nbuf_cb *) \
502 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
503 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
504 		(((struct qdf_nbuf_cb *) \
505 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
506 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
507 		(((struct qdf_nbuf_cb *) \
508 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
509 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
510 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
511 
512 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
513 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
514 /* End of Tx Flags Accessor Macros */
515 
516 /* Tx trace accessor macros */
517 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
518 	(((struct qdf_nbuf_cb *) \
519 		((skb)->cb))->u.tx.trace.packet_state)
520 
521 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
522 	(((struct qdf_nbuf_cb *) \
523 		((skb)->cb))->u.tx.trace.is_packet_priv)
524 
525 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
526 	(((struct qdf_nbuf_cb *) \
527 		((skb)->cb))->u.tx.trace.packet_track)
528 
529 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
530 	(((struct qdf_nbuf_cb *) \
531 		((skb)->cb))->u.tx.trace.to_fw)
532 
533 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
534 		(((struct qdf_nbuf_cb *) \
535 			((skb)->cb))->u.rx.trace.packet_track)
536 
537 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
538 	(((struct qdf_nbuf_cb *) \
539 		((skb)->cb))->u.tx.trace.proto_type)
540 
541 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
542 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
543 
544 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
545 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
546 
547 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
548 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
549 
550 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
551 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
552 
553 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
554 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
555 
556 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
557 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
558 
559 #define QDF_NBUF_CB_SET_BCAST(skb) \
560 	(((struct qdf_nbuf_cb *) \
561 		((skb)->cb))->u.tx.trace.is_bcast = true)
562 
563 #define QDF_NBUF_CB_SET_MCAST(skb) \
564 	(((struct qdf_nbuf_cb *) \
565 		((skb)->cb))->u.tx.trace.is_mcast = true)
566 /* End of Tx trace accessor macros */
567 
568 
569 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
570 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
571 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
572 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
573 
574 /* assume the OS provides a single fragment */
575 #define __qdf_nbuf_get_num_frags(skb)		   \
576 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
577 
578 #define __qdf_nbuf_reset_num_frags(skb) \
579 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
580 
581 /**
582  *   end of nbuf->cb access macros
583  */
584 
585 typedef void (*qdf_nbuf_trace_update_t)(char *);
586 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
587 
588 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
589 
590 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
591 	(QDF_NBUF_CB_PADDR(skb) = paddr)
592 
593 #define __qdf_nbuf_frag_push_head(					\
594 	skb, frag_len, frag_vaddr, frag_paddr)				\
595 	do {					\
596 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
597 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
598 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
599 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
600 	} while (0)
601 
602 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
603 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
604 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
605 
606 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
607 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
608 
609 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
610 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
611 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
612 	 /* assume that the OS only provides a single fragment */	\
613 	 QDF_NBUF_CB_PADDR(skb))
614 
615 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
616 
617 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
618 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
619 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
620 
621 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
622 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
623 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
624 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
625 
626 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
627 	do {								\
628 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
629 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
630 		if (frag_num)						\
631 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
632 							      is_wstrm; \
633 		else					\
634 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
635 							      is_wstrm; \
636 	} while (0)
637 
638 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
639 	do { \
640 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
641 	} while (0)
642 
643 #define __qdf_nbuf_get_vdev_ctx(skb) \
644 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
645 
646 #define __qdf_nbuf_set_tx_ftype(skb, type) \
647 	do { \
648 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
649 	} while (0)
650 
651 #define __qdf_nbuf_get_tx_ftype(skb) \
652 		 QDF_NBUF_CB_TX_FTYPE((skb))
653 
654 
655 #define __qdf_nbuf_set_rx_ftype(skb, type) \
656 	do { \
657 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
658 	} while (0)
659 
660 #define __qdf_nbuf_get_rx_ftype(skb) \
661 		 QDF_NBUF_CB_RX_FTYPE((skb))
662 
663 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
664 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
665 
666 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
667 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
668 
669 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
670 	do { \
671 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
672 	} while (0)
673 
674 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
675 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
676 
677 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
678 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
679 
680 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
681 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
682 
683 #define __qdf_nbuf_set_da_mcbc(skb, val) \
684 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
685 
686 #define __qdf_nbuf_is_da_mcbc(skb) \
687 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
688 
689 #define __qdf_nbuf_set_da_valid(skb, val) \
690 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
691 
692 #define __qdf_nbuf_is_da_valid(skb) \
693 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
694 
695 #define __qdf_nbuf_set_sa_valid(skb, val) \
696 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
697 
698 #define __qdf_nbuf_is_sa_valid(skb) \
699 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
700 
701 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
702 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
703 
704 #define __qdf_nbuf_is_rx_retry_flag(skb) \
705 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
706 
707 #define __qdf_nbuf_set_raw_frame(skb, val) \
708 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
709 
710 #define __qdf_nbuf_is_raw_frame(skb) \
711 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
712 
713 #define __qdf_nbuf_get_tid_val(skb) \
714 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
715 
716 #define __qdf_nbuf_set_tid_val(skb, val) \
717 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
718 
719 #define __qdf_nbuf_set_is_frag(skb, val) \
720 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
721 
722 #define __qdf_nbuf_is_frag(skb) \
723 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
724 
725 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
726 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
727 
728 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
729 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
730 
731 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
732 	do { \
733 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
734 	} while (0)
735 
736 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
737 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
738 
739 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
740 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
741 
742 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
743 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
744 
745 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
746 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
747 
748 #define __qdf_nbuf_trace_get_proto_type(skb) \
749 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
750 
751 #define __qdf_nbuf_data_attr_get(skb)		\
752 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
753 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
754 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
755 
756 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
757 		skb_queue_walk_safe(queue, var, tvar)
758 
759 /**
760  * __qdf_nbuf_num_frags_init() - init extra frags
761  * @skb: sk buffer
762  *
763  * Return: none
764  */
765 static inline
766 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
767 {
768 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
769 }
770 
771 /*
772  * prototypes. Implemented in qdf_nbuf.c
773  */
774 
775 /**
776  * __qdf_nbuf_alloc() - Allocate nbuf
777  * @osdev: Device handle
778  * @size: Netbuf requested size
779  * @reserve: headroom to start with
780  * @align: Align
781  * @prio: Priority
782  * @func: Function name of the call site
783  * @line: line number of the call site
784  *
785  * This allocates an nbuf aligns if needed and reserves some space in the front,
786  * since the reserve is done after alignment the reserve value if being
787  * unaligned will result in an unaligned address.
788  *
789  * Return: nbuf or %NULL if no memory
790  */
791 __qdf_nbuf_t
792 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
793 		 int prio, const char *func, uint32_t line);
794 
795 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
796 				     const char *func, uint32_t line);
797 
798 /**
799  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
800  * @size: Size to be allocated for skb
801  * @reserve: Reserve headroom size
802  * @align: Align data
803  * @func: Function name of the call site
804  * @line: Line number of the callsite
805  *
806  * This API allocates a nbuf and aligns it if needed and reserves some headroom
807  * space after the alignment where nbuf is not allocated from skb recycler pool.
808  *
809  * Return: Allocated nbuf pointer
810  */
811 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
812 					  const char *func, uint32_t line);
813 
814 /**
815  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
816  * @skb: Pointer to network buffer
817  *
818  * if GFP_ATOMIC is overkill then we can check whether its
819  * called from interrupt context and then do it or else in
820  * normal case use GFP_KERNEL
821  *
822  * example     use "in_irq() || irqs_disabled()"
823  *
824  * Return: cloned skb
825  */
826 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
827 
828 void __qdf_nbuf_free(struct sk_buff *skb);
829 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
830 			struct sk_buff *skb, qdf_dma_dir_t dir);
831 void __qdf_nbuf_unmap(__qdf_device_t osdev,
832 			struct sk_buff *skb, qdf_dma_dir_t dir);
833 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
834 				 struct sk_buff *skb, qdf_dma_dir_t dir);
835 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
836 			struct sk_buff *skb, qdf_dma_dir_t dir);
837 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
838 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
839 
840 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
841 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
842 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
843 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
844 	qdf_dma_dir_t dir, int nbytes);
845 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
846 	qdf_dma_dir_t dir, int nbytes);
847 
848 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
849 	qdf_dma_dir_t dir);
850 
851 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
852 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
853 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
854 QDF_STATUS __qdf_nbuf_frag_map(
855 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
856 	int offset, qdf_dma_dir_t dir, int cur_frag);
857 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
858 
859 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
860 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
861 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
862 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
863 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
864 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
865 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
866 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
867 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
868 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
869 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
870 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
871 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
872 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
873 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
874 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
875 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
876 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
877 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
878 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
879 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
880 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
881 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
882 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
883 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
884 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
885 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
886 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
887 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
888 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
889 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
890 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
891 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
892 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
893 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
894 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
895 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
896 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
897 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
898 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
899 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
900 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
901 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
902 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
903 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
904 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
905 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
906 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
907 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
908 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
909 
910 #ifdef QDF_NBUF_GLOBAL_COUNT
911 int __qdf_nbuf_count_get(void);
912 void __qdf_nbuf_count_inc(struct sk_buff *skb);
913 void __qdf_nbuf_count_dec(struct sk_buff *skb);
914 void __qdf_nbuf_mod_init(void);
915 void __qdf_nbuf_mod_exit(void);
916 
917 #else
918 
919 static inline int __qdf_nbuf_count_get(void)
920 {
921 	return 0;
922 }
923 
924 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
925 {
926 	return;
927 }
928 
929 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
930 {
931 	return;
932 }
933 
934 static inline void __qdf_nbuf_mod_init(void)
935 {
936 	return;
937 }
938 
939 static inline void __qdf_nbuf_mod_exit(void)
940 {
941 	return;
942 }
943 #endif
944 
945 /**
946  * __qdf_to_status() - OS to QDF status conversion
947  * @error : OS error
948  *
949  * Return: QDF status
950  */
951 static inline QDF_STATUS __qdf_to_status(signed int error)
952 {
953 	switch (error) {
954 	case 0:
955 		return QDF_STATUS_SUCCESS;
956 	case ENOMEM:
957 	case -ENOMEM:
958 		return QDF_STATUS_E_NOMEM;
959 	default:
960 		return QDF_STATUS_E_NOSUPPORT;
961 	}
962 }
963 
964 /**
965  * __qdf_nbuf_len() - return the amount of valid data in the skb
966  * @skb: Pointer to network buffer
967  *
968  * This API returns the amount of valid data in the skb, If there are frags
969  * then it returns total length.
970  *
971  * Return: network buffer length
972  */
973 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
974 {
975 	int i, extra_frag_len = 0;
976 
977 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
978 	if (i > 0)
979 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
980 
981 	return extra_frag_len + skb->len;
982 }
983 
984 /**
985  * __qdf_nbuf_cat() - link two nbufs
986  * @dst: Buffer to piggyback into
987  * @src: Buffer to put
988  *
989  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
990  * It is callers responsibility to free the src skb.
991  *
992  * Return: QDF_STATUS (status of the call) if failed the src skb
993  *         is released
994  */
995 static inline QDF_STATUS
996 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
997 {
998 	QDF_STATUS error = 0;
999 
1000 	qdf_assert(dst && src);
1001 
1002 	/*
1003 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1004 	 * buffer, first check whether the current buffer is already large
1005 	 * enough.
1006 	 */
1007 	if (skb_tailroom(dst) < src->len) {
1008 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1009 		if (error)
1010 			return __qdf_to_status(error);
1011 	}
1012 
1013 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1014 	skb_put(dst, src->len);
1015 	return __qdf_to_status(error);
1016 }
1017 
1018 /*
1019  * nbuf manipulation routines
1020  */
1021 /**
1022  * __qdf_nbuf_headroom() - return the amount of tail space available
1023  * @buf: Pointer to network buffer
1024  *
1025  * Return: amount of tail room
1026  */
1027 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1028 {
1029 	return skb_headroom(skb);
1030 }
1031 
1032 /**
1033  * __qdf_nbuf_tailroom() - return the amount of tail space available
1034  * @buf: Pointer to network buffer
1035  *
1036  * Return: amount of tail room
1037  */
1038 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1039 {
1040 	return skb_tailroom(skb);
1041 }
1042 
1043 /**
1044  * __qdf_nbuf_put_tail() - Puts data in the end
1045  * @skb: Pointer to network buffer
1046  * @size: size to be pushed
1047  *
1048  * Return: data pointer of this buf where new data has to be
1049  *         put, or NULL if there is not enough room in this buf.
1050  */
1051 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1052 {
1053 	if (skb_tailroom(skb) < size) {
1054 		if (unlikely(pskb_expand_head(skb, 0,
1055 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1056 			dev_kfree_skb_any(skb);
1057 			return NULL;
1058 		}
1059 	}
1060 	return skb_put(skb, size);
1061 }
1062 
1063 /**
1064  * __qdf_nbuf_trim_tail() - trim data out from the end
1065  * @skb: Pointer to network buffer
1066  * @size: size to be popped
1067  *
1068  * Return: none
1069  */
1070 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1071 {
1072 	return skb_trim(skb, skb->len - size);
1073 }
1074 
1075 
1076 /*
1077  * prototypes. Implemented in qdf_nbuf.c
1078  */
1079 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1080 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1081 				qdf_nbuf_rx_cksum_t *cksum);
1082 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1083 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1084 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1085 void __qdf_nbuf_ref(struct sk_buff *skb);
1086 int __qdf_nbuf_shared(struct sk_buff *skb);
1087 
1088 /**
1089  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1090  * @skb: sk buff
1091  *
1092  * Return: number of fragments
1093  */
1094 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1095 {
1096 	return skb_shinfo(skb)->nr_frags;
1097 }
1098 
1099 /**
1100  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1101  * @skb: sk buff
1102  *
1103  * This API returns a total number of fragments from the fraglist
1104  * Return: total number of fragments
1105  */
1106 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1107 {
1108 	uint32_t num_frag = 0;
1109 	struct sk_buff *list = NULL;
1110 
1111 	num_frag = skb_shinfo(skb)->nr_frags;
1112 	skb_walk_frags(skb, list)
1113 		num_frag += skb_shinfo(list)->nr_frags;
1114 
1115 	return num_frag;
1116 }
1117 
1118 /*
1119  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1120  */
1121 #define __qdf_nbuf_pool_delete(osdev)
1122 
1123 /**
1124  * __qdf_nbuf_copy() - returns a private copy of the skb
1125  * @skb: Pointer to network buffer
1126  *
1127  * This API returns a private copy of the skb, the skb returned is completely
1128  *  modifiable by callers
1129  *
1130  * Return: skb or NULL
1131  */
1132 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1133 {
1134 	struct sk_buff *skb_new = NULL;
1135 
1136 	skb_new = skb_copy(skb, GFP_ATOMIC);
1137 	if (skb_new) {
1138 		__qdf_nbuf_count_inc(skb_new);
1139 	}
1140 	return skb_new;
1141 }
1142 
1143 #define __qdf_nbuf_reserve      skb_reserve
1144 
1145 /**
1146  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1147  * @skb: Pointer to network buffer
1148  * @data: data pointer
1149  *
1150  * Return: none
1151  */
1152 static inline void
1153 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1154 {
1155 	skb->data = data;
1156 }
1157 
1158 /**
1159  * __qdf_nbuf_set_len() - set buffer data length
1160  * @skb: Pointer to network buffer
1161  * @len: data length
1162  *
1163  * Return: none
1164  */
1165 static inline void
1166 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1167 {
1168 	skb->len = len;
1169 }
1170 
1171 /**
1172  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1173  * @skb: Pointer to network buffer
1174  * @len: skb data length
1175  *
1176  * Return: none
1177  */
1178 static inline void
1179 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1180 {
1181 	skb_set_tail_pointer(skb, len);
1182 }
1183 
1184 /**
1185  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1186  * @skb: Pointer to network buffer
1187  * @list: list to use
1188  *
1189  * This is a lockless version, driver must acquire locks if it
1190  * needs to synchronize
1191  *
1192  * Return: none
1193  */
1194 static inline void
1195 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1196 {
1197 	__skb_unlink(skb, list);
1198 }
1199 
1200 /**
1201  * __qdf_nbuf_reset() - reset the buffer data and pointer
1202  * @buf: Network buf instance
1203  * @reserve: reserve
1204  * @align: align
1205  *
1206  * Return: none
1207  */
1208 static inline void
1209 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1210 {
1211 	int offset;
1212 
1213 	skb_push(skb, skb_headroom(skb));
1214 	skb_put(skb, skb_tailroom(skb));
1215 	memset(skb->data, 0x0, skb->len);
1216 	skb_trim(skb, 0);
1217 	skb_reserve(skb, NET_SKB_PAD);
1218 	memset(skb->cb, 0x0, sizeof(skb->cb));
1219 
1220 	/*
1221 	 * The default is for netbuf fragments to be interpreted
1222 	 * as wordstreams rather than bytestreams.
1223 	 */
1224 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1225 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1226 
1227 	/*
1228 	 * Align & make sure that the tail & data are adjusted properly
1229 	 */
1230 
1231 	if (align) {
1232 		offset = ((unsigned long)skb->data) % align;
1233 		if (offset)
1234 			skb_reserve(skb, align - offset);
1235 	}
1236 
1237 	skb_reserve(skb, reserve);
1238 }
1239 
1240 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1241 /**
1242  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1243  *                                       in kernel
1244  *
1245  * Return: true if dev_scratch is supported
1246  *         false if dev_scratch is not supported
1247  */
1248 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1249 {
1250 	return true;
1251 }
1252 
1253 /**
1254  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1255  * @skb: Pointer to network buffer
1256  *
1257  * Return: dev_scratch if dev_scratch supported
1258  *         0 if dev_scratch not supported
1259  */
1260 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1261 {
1262 	return skb->dev_scratch;
1263 }
1264 
1265 /**
1266  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1267  * @skb: Pointer to network buffer
1268  * @value: value to be set in dev_scratch of network buffer
1269  *
1270  * Return: void
1271  */
1272 static inline void
1273 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1274 {
1275 	skb->dev_scratch = value;
1276 }
1277 #else
1278 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1279 {
1280 	return false;
1281 }
1282 
1283 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1284 {
1285 	return 0;
1286 }
1287 
1288 static inline void
1289 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1290 {
1291 }
1292 #endif /* KERNEL_VERSION(4, 14, 0) */
1293 
1294 /**
1295  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1296  * @skb: Pointer to network buffer
1297  *
1298  * Return: Pointer to head buffer
1299  */
1300 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1301 {
1302 	return skb->head;
1303 }
1304 
1305 /**
1306  * __qdf_nbuf_data() - return the pointer to data header in the skb
1307  * @skb: Pointer to network buffer
1308  *
1309  * Return: Pointer to skb data
1310  */
1311 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1312 {
1313 	return skb->data;
1314 }
1315 
1316 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1317 {
1318 	return (uint8_t *)&skb->data;
1319 }
1320 
1321 /**
1322  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1323  * @skb: Pointer to network buffer
1324  *
1325  * Return: skb protocol
1326  */
1327 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1328 {
1329 	return skb->protocol;
1330 }
1331 
1332 /**
1333  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1334  * @skb: Pointer to network buffer
1335  *
1336  * Return: skb ip_summed
1337  */
1338 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1339 {
1340 	return skb->ip_summed;
1341 }
1342 
1343 /**
1344  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1345  * @skb: Pointer to network buffer
1346  * @ip_summed: ip checksum
1347  *
1348  * Return: none
1349  */
1350 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1351 		 uint8_t ip_summed)
1352 {
1353 	skb->ip_summed = ip_summed;
1354 }
1355 
1356 /**
1357  * __qdf_nbuf_get_priority() - return the priority value of the skb
1358  * @skb: Pointer to network buffer
1359  *
1360  * Return: skb priority
1361  */
1362 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1363 {
1364 	return skb->priority;
1365 }
1366 
1367 /**
1368  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1369  * @skb: Pointer to network buffer
1370  * @p: priority
1371  *
1372  * Return: none
1373  */
1374 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1375 {
1376 	skb->priority = p;
1377 }
1378 
1379 /**
1380  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1381  * @skb: Current skb
1382  * @next_skb: Next skb
1383  *
1384  * Return: void
1385  */
1386 static inline void
1387 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1388 {
1389 	skb->next = skb_next;
1390 }
1391 
1392 /**
1393  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1394  * @skb: Current skb
1395  *
1396  * Return: the next skb pointed to by the current skb
1397  */
1398 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1399 {
1400 	return skb->next;
1401 }
1402 
1403 /**
1404  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1405  * @skb: Current skb
1406  * @next_skb: Next skb
1407  *
1408  * This fn is used to link up extensions to the head skb. Does not handle
1409  * linking to the head
1410  *
1411  * Return: none
1412  */
1413 static inline void
1414 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1415 {
1416 	skb->next = skb_next;
1417 }
1418 
1419 /**
1420  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1421  * @skb: Current skb
1422  *
1423  * Return: the next skb pointed to by the current skb
1424  */
1425 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1426 {
1427 	return skb->next;
1428 }
1429 
1430 /**
1431  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1432  * @skb_head: head_buf nbuf holding head segment (single)
1433  * @ext_list: nbuf list holding linked extensions to the head
1434  * @ext_len: Total length of all buffers in the extension list
1435  *
1436  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1437  * to the nbuf holding the head segment (seg0)
1438  *
1439  * Return: none
1440  */
1441 static inline void
1442 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1443 			struct sk_buff *ext_list, size_t ext_len)
1444 {
1445 	skb_shinfo(skb_head)->frag_list = ext_list;
1446 	skb_head->data_len += ext_len;
1447 	skb_head->len += ext_len;
1448 }
1449 
1450 /**
1451  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1452  * @head_buf: Network buf holding head segment (single)
1453  *
1454  * This ext_list is populated when we have Jumbo packet, for example in case of
1455  * monitor mode amsdu packet reception, and are stiched using frags_list.
1456  *
1457  * Return: Network buf list holding linked extensions from head buf.
1458  */
1459 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1460 {
1461 	return (skb_shinfo(head_buf)->frag_list);
1462 }
1463 
1464 /**
1465  * __qdf_nbuf_get_age() - return the checksum value of the skb
1466  * @skb: Pointer to network buffer
1467  *
1468  * Return: checksum value
1469  */
1470 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1471 {
1472 	return skb->csum;
1473 }
1474 
1475 /**
1476  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1477  * @skb: Pointer to network buffer
1478  * @v: Value
1479  *
1480  * Return: none
1481  */
1482 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1483 {
1484 	skb->csum = v;
1485 }
1486 
1487 /**
1488  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1489  * @skb: Pointer to network buffer
1490  * @adj: Adjustment value
1491  *
1492  * Return: none
1493  */
1494 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1495 {
1496 	skb->csum -= adj;
1497 }
1498 
1499 /**
1500  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1501  * @skb: Pointer to network buffer
1502  * @offset: Offset value
1503  * @len: Length
1504  * @to: Destination pointer
1505  *
1506  * Return: length of the copy bits for skb
1507  */
1508 static inline int32_t
1509 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1510 {
1511 	return skb_copy_bits(skb, offset, to, len);
1512 }
1513 
1514 /**
1515  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1516  * @skb: Pointer to network buffer
1517  * @len:  Packet length
1518  *
1519  * Return: none
1520  */
1521 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1522 {
1523 	if (skb->len > len) {
1524 		skb_trim(skb, len);
1525 	} else {
1526 		if (skb_tailroom(skb) < len - skb->len) {
1527 			if (unlikely(pskb_expand_head(skb, 0,
1528 				len - skb->len - skb_tailroom(skb),
1529 				GFP_ATOMIC))) {
1530 				QDF_DEBUG_PANIC(
1531 				   "SKB tailroom is lessthan requested length."
1532 				   " tail-room: %u, len: %u, skb->len: %u",
1533 				   skb_tailroom(skb), len, skb->len);
1534 				dev_kfree_skb_any(skb);
1535 			}
1536 		}
1537 		skb_put(skb, (len - skb->len));
1538 	}
1539 }
1540 
1541 /**
1542  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1543  * @skb: Pointer to network buffer
1544  * @protocol: Protocol type
1545  *
1546  * Return: none
1547  */
1548 static inline void
1549 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1550 {
1551 	skb->protocol = protocol;
1552 }
1553 
1554 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1555 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1556 
1557 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1558 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1559 
1560 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1561 				      uint32_t *lo, uint32_t *hi);
1562 
1563 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1564 	struct qdf_tso_info_t *tso_info);
1565 
1566 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1567 			  struct qdf_tso_seg_elem_t *tso_seg,
1568 			  bool is_last_seg);
1569 
1570 #ifdef FEATURE_TSO
1571 /**
1572  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1573  *                                    payload len
1574  * @skb: buffer
1575  *
1576  * Return: size
1577  */
1578 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1579 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1580 
1581 #else
1582 static inline
1583 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1584 {
1585 	return 0;
1586 }
1587 
1588 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1589 {
1590 	return 0;
1591 }
1592 
1593 #endif /* FEATURE_TSO */
1594 
1595 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1596 {
1597 	if (skb_is_gso(skb) &&
1598 		(skb_is_gso_v6(skb) ||
1599 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1600 		return true;
1601 	else
1602 		return false;
1603 }
1604 
1605 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1606 
1607 int __qdf_nbuf_get_users(struct sk_buff *skb);
1608 
1609 /**
1610  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1611  *			      and get hw_classify by peeking
1612  *			      into packet
1613  * @nbuf:		Network buffer (skb on Linux)
1614  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1615  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1616  *			needs to be set in case of CE classification support
1617  *			Is set by this macro.
1618  * @hw_classify:	This is a flag which is set to indicate
1619  *			CE classification is enabled.
1620  *			Do not set this bit for VLAN packets
1621  *			OR for mcast / bcast frames.
1622  *
1623  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1624  * whether to enable tx_classify bit in CE.
1625  *
1626  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1627  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1628  * it is the length and a 802.3 frame else it is Ethernet Type II
1629  * (RFC 894).
1630  * Bit 4 in pkt_subtype is the tx_classify bit
1631  *
1632  * Return:	void
1633  */
1634 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1635 				pkt_subtype, hw_classify)	\
1636 do {								\
1637 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1638 	uint16_t ether_type = ntohs(eh->h_proto);		\
1639 	bool is_mc_bc;						\
1640 								\
1641 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1642 		   is_multicast_ether_addr((uint8_t *)eh);	\
1643 								\
1644 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1645 		hw_classify = 1;				\
1646 		pkt_subtype = 0x01 <<				\
1647 			HTT_TX_CLASSIFY_BIT_S;			\
1648 	}							\
1649 								\
1650 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1651 		pkt_type = htt_pkt_type_ethernet;		\
1652 								\
1653 } while (0)
1654 
1655 /**
1656  * nbuf private buffer routines
1657  */
1658 
1659 /**
1660  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1661  * @skb: Pointer to network buffer
1662  * @addr: Pointer to store header's addr
1663  * @m_len: network buffer length
1664  *
1665  * Return: none
1666  */
1667 static inline void
1668 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1669 {
1670 	*addr = skb->data;
1671 	*len = skb->len;
1672 }
1673 
1674 /**
1675  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1676  * @head: Head pointer
1677  * @tail: Tail pointer
1678  * @qlen: Queue length
1679  */
1680 typedef struct __qdf_nbuf_qhead {
1681 	struct sk_buff *head;
1682 	struct sk_buff *tail;
1683 	unsigned int qlen;
1684 } __qdf_nbuf_queue_t;
1685 
1686 /******************Functions *************/
1687 
1688 /**
1689  * __qdf_nbuf_queue_init() - initiallize the queue head
1690  * @qhead: Queue head
1691  *
1692  * Return: QDF status
1693  */
1694 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1695 {
1696 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1697 	return QDF_STATUS_SUCCESS;
1698 }
1699 
1700 /**
1701  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1702  * @qhead: Queue head
1703  * @skb: Pointer to network buffer
1704  *
1705  * This is a lockless version, driver must acquire locks if it
1706  * needs to synchronize
1707  *
1708  * Return: none
1709  */
1710 static inline void
1711 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1712 {
1713 	skb->next = NULL;       /*Nullify the next ptr */
1714 
1715 	if (!qhead->head)
1716 		qhead->head = skb;
1717 	else
1718 		qhead->tail->next = skb;
1719 
1720 	qhead->tail = skb;
1721 	qhead->qlen++;
1722 }
1723 
1724 /**
1725  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1726  * @dest: target netbuf queue
1727  * @src:  source netbuf queue
1728  *
1729  * Return: target netbuf queue
1730  */
1731 static inline __qdf_nbuf_queue_t *
1732 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1733 {
1734 	if (!dest)
1735 		return NULL;
1736 	else if (!src || !(src->head))
1737 		return dest;
1738 
1739 	if (!(dest->head))
1740 		dest->head = src->head;
1741 	else
1742 		dest->tail->next = src->head;
1743 
1744 	dest->tail = src->tail;
1745 	dest->qlen += src->qlen;
1746 	return dest;
1747 }
1748 
1749 /**
1750  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1751  * @qhead: Queue head
1752  * @skb: Pointer to network buffer
1753  *
1754  * This is a lockless version, driver must acquire locks if it needs to
1755  * synchronize
1756  *
1757  * Return: none
1758  */
1759 static inline void
1760 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1761 {
1762 	if (!qhead->head) {
1763 		/*Empty queue Tail pointer Must be updated */
1764 		qhead->tail = skb;
1765 	}
1766 	skb->next = qhead->head;
1767 	qhead->head = skb;
1768 	qhead->qlen++;
1769 }
1770 
1771 /**
1772  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1773  * @qhead: Queue head
1774  *
1775  * This is a lockless version. Driver should take care of the locks
1776  *
1777  * Return: skb or NULL
1778  */
1779 static inline
1780 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1781 {
1782 	__qdf_nbuf_t tmp = NULL;
1783 
1784 	if (qhead->head) {
1785 		qhead->qlen--;
1786 		tmp = qhead->head;
1787 		if (qhead->head == qhead->tail) {
1788 			qhead->head = NULL;
1789 			qhead->tail = NULL;
1790 		} else {
1791 			qhead->head = tmp->next;
1792 		}
1793 		tmp->next = NULL;
1794 	}
1795 	return tmp;
1796 }
1797 
1798 /**
1799  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1800  * @qhead: head of queue
1801  *
1802  * Return: NULL if the queue is empty
1803  */
1804 static inline struct sk_buff *
1805 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1806 {
1807 	return qhead->head;
1808 }
1809 
1810 /**
1811  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1812  * @qhead: head of queue
1813  *
1814  * Return: NULL if the queue is empty
1815  */
1816 static inline struct sk_buff *
1817 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1818 {
1819 	return qhead->tail;
1820 }
1821 
1822 /**
1823  * __qdf_nbuf_queue_len() - return the queue length
1824  * @qhead: Queue head
1825  *
1826  * Return: Queue length
1827  */
1828 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1829 {
1830 	return qhead->qlen;
1831 }
1832 
1833 /**
1834  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1835  * @skb: Pointer to network buffer
1836  *
1837  * This API returns the next skb from packet chain, remember the skb is
1838  * still in the queue
1839  *
1840  * Return: NULL if no packets are there
1841  */
1842 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1843 {
1844 	return skb->next;
1845 }
1846 
1847 /**
1848  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1849  * @qhead: Queue head
1850  *
1851  * Return: true if length is 0 else false
1852  */
1853 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1854 {
1855 	return qhead->qlen == 0;
1856 }
1857 
1858 /*
1859  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1860  * Because the queue head will most likely put in some structure,
1861  * we don't use pointer type as the definition.
1862  */
1863 
1864 /*
1865  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1866  * Because the queue head will most likely put in some structure,
1867  * we don't use pointer type as the definition.
1868  */
1869 
1870 static inline void
1871 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1872 {
1873 }
1874 
1875 /**
1876  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1877  *        expands the headroom
1878  *        in the data region. In case of failure the skb is released.
1879  * @skb: sk buff
1880  * @headroom: size of headroom
1881  *
1882  * Return: skb or NULL
1883  */
1884 static inline struct sk_buff *
1885 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1886 {
1887 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1888 		dev_kfree_skb_any(skb);
1889 		skb = NULL;
1890 	}
1891 	return skb;
1892 }
1893 
1894 /**
1895  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1896  *        exapnds the tailroom
1897  *        in data region. In case of failure it releases the skb.
1898  * @skb: sk buff
1899  * @tailroom: size of tailroom
1900  *
1901  * Return: skb or NULL
1902  */
1903 static inline struct sk_buff *
1904 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1905 {
1906 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1907 		return skb;
1908 	/**
1909 	 * unlikely path
1910 	 */
1911 	dev_kfree_skb_any(skb);
1912 	return NULL;
1913 }
1914 
1915 /**
1916  * __qdf_nbuf_linearize() - skb linearize
1917  * @skb: sk buff
1918  *
1919  * create a version of the specified nbuf whose contents
1920  * can be safely modified without affecting other
1921  * users.If the nbuf is non-linear then this function
1922  * linearize. if unable to linearize returns -ENOMEM on
1923  * success 0 is returned
1924  *
1925  * Return: 0 on Success, -ENOMEM on failure is returned.
1926  */
1927 static inline int
1928 __qdf_nbuf_linearize(struct sk_buff *skb)
1929 {
1930 	return skb_linearize(skb);
1931 }
1932 
1933 /**
1934  * __qdf_nbuf_unshare() - skb unshare
1935  * @skb: sk buff
1936  *
1937  * create a version of the specified nbuf whose contents
1938  * can be safely modified without affecting other
1939  * users.If the nbuf is a clone then this function
1940  * creates a new copy of the data. If the buffer is not
1941  * a clone the original buffer is returned.
1942  *
1943  * Return: skb or NULL
1944  */
1945 static inline struct sk_buff *
1946 __qdf_nbuf_unshare(struct sk_buff *skb)
1947 {
1948 	struct sk_buff *skb_new;
1949 
1950 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
1951 
1952 	skb_new = skb_unshare(skb, GFP_ATOMIC);
1953 	if (skb_new)
1954 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
1955 
1956 	return skb_new;
1957 }
1958 
1959 /**
1960  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1961  *@buf: sk buff
1962  *
1963  * Return: true/false
1964  */
1965 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1966 {
1967 	return skb_cloned(skb);
1968 }
1969 
1970 /**
1971  * __qdf_nbuf_pool_init() - init pool
1972  * @net: net handle
1973  *
1974  * Return: QDF status
1975  */
1976 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1977 {
1978 	return QDF_STATUS_SUCCESS;
1979 }
1980 
1981 /*
1982  * adf_nbuf_pool_delete() implementation - do nothing in linux
1983  */
1984 #define __qdf_nbuf_pool_delete(osdev)
1985 
1986 /**
1987  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1988  *        release the skb.
1989  * @skb: sk buff
1990  * @headroom: size of headroom
1991  * @tailroom: size of tailroom
1992  *
1993  * Return: skb or NULL
1994  */
1995 static inline struct sk_buff *
1996 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1997 {
1998 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1999 		return skb;
2000 
2001 	dev_kfree_skb_any(skb);
2002 	return NULL;
2003 }
2004 
2005 /**
2006  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2007  * @buf: Network buf instance
2008  * @headroom: Additional headroom to be added
2009  * @tailroom: Additional tailroom to be added
2010  *
2011  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2012  *	or NULL if there is no memory
2013  */
2014 static inline struct sk_buff *
2015 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2016 {
2017 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2018 }
2019 
2020 /**
2021  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2022  * @buf: Network buf instance
2023  *
2024  * Return: True, if buf has frag_list else return False
2025  */
2026 static inline bool
2027 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2028 {
2029 	return skb_has_frag_list(buf);
2030 }
2031 
2032 /**
2033  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2034  * @buf: Network buf instance
2035  *
2036  * Return: Network buf instance
2037  */
2038 static inline struct sk_buff *
2039 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2040 {
2041 	struct sk_buff *list;
2042 
2043 	if (!__qdf_nbuf_has_fraglist(buf))
2044 		return NULL;
2045 
2046 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2047 		;
2048 
2049 	return list;
2050 }
2051 
2052 /**
2053  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2054  * @buf: Network buf instance
2055  *
2056  * Return: void
2057  */
2058 static inline void
2059 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2060 {
2061 	struct sk_buff *list;
2062 
2063 	skb_walk_frags(buf, list)
2064 		skb_get(list);
2065 }
2066 
2067 /**
2068  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2069  *
2070  * Return: true/false
2071  */
2072 static inline bool
2073 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2074 			 uint8_t **where)
2075 {
2076 	qdf_assert(0);
2077 	return false;
2078 }
2079 
2080 /**
2081  * __qdf_nbuf_reset_ctxt() - mem zero control block
2082  * @nbuf: buffer
2083  *
2084  * Return: none
2085  */
2086 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2087 {
2088 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2089 }
2090 
2091 /**
2092  * __qdf_nbuf_network_header() - get network header
2093  * @buf: buffer
2094  *
2095  * Return: network header pointer
2096  */
2097 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2098 {
2099 	return skb_network_header(buf);
2100 }
2101 
2102 /**
2103  * __qdf_nbuf_transport_header() - get transport header
2104  * @buf: buffer
2105  *
2106  * Return: transport header pointer
2107  */
2108 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2109 {
2110 	return skb_transport_header(buf);
2111 }
2112 
2113 /**
2114  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2115  *  passed as part of network buffer by network stack
2116  * @skb: sk buff
2117  *
2118  * Return: TCP MSS size
2119  *
2120  */
2121 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2122 {
2123 	return skb_shinfo(skb)->gso_size;
2124 }
2125 
2126 /**
2127  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2128  * @nbuf: sk buff
2129  *
2130  * Return: none
2131  */
2132 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2133 
2134 /*
2135  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2136  * @nbuf: sk buff
2137  *
2138  * Return: void ptr
2139  */
2140 static inline void *
2141 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2142 {
2143 	return (void *)nbuf->cb;
2144 }
2145 
2146 /**
2147  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2148  * @skb: sk buff
2149  *
2150  * Return: head size
2151  */
2152 static inline size_t
2153 __qdf_nbuf_headlen(struct sk_buff *skb)
2154 {
2155 	return skb_headlen(skb);
2156 }
2157 
2158 /**
2159  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2160  * @buf: sk buff
2161  *
2162  * Return: true/false
2163  */
2164 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2165 {
2166 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2167 }
2168 
2169 /**
2170  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2171  * @buf: sk buff
2172  *
2173  * Return: true/false
2174  */
2175 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2176 {
2177 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2178 }
2179 
2180 /**
2181  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2182  * @skb: sk buff
2183  *
2184  * Return: size of l2+l3+l4 header length
2185  */
2186 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2187 {
2188 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2189 }
2190 
2191 /**
2192  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2193  * @skb: sk buff
2194  *
2195  * Return: size of TCP header length
2196  */
2197 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2198 {
2199 	return tcp_hdrlen(skb);
2200 }
2201 
2202 /**
2203  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2204  * @buf: sk buff
2205  *
2206  * Return:  true/false
2207  */
2208 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2209 {
2210 	if (skb_is_nonlinear(skb))
2211 		return true;
2212 	else
2213 		return false;
2214 }
2215 
2216 /**
2217  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2218  * @buf: sk buff
2219  *
2220  * Return: TCP sequence number
2221  */
2222 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2223 {
2224 	return ntohl(tcp_hdr(skb)->seq);
2225 }
2226 
2227 /**
2228  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2229  *@buf: sk buff
2230  *
2231  * Return: data pointer to typecast into your priv structure
2232  */
2233 static inline uint8_t *
2234 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2235 {
2236 	return &skb->cb[8];
2237 }
2238 
2239 /**
2240  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2241  * @buf: Pointer to nbuf
2242  *
2243  * Return: None
2244  */
2245 static inline void
2246 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2247 {
2248 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2249 }
2250 
2251 /**
2252  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2253  *
2254  * @buf: sk buff
2255  * @queue_id: Queue id
2256  *
2257  * Return: void
2258  */
2259 static inline void
2260 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2261 {
2262 	skb_record_rx_queue(skb, queue_id);
2263 }
2264 
2265 /**
2266  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2267  *
2268  * @buf: sk buff
2269  *
2270  * Return: Queue mapping
2271  */
2272 static inline uint16_t
2273 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2274 {
2275 	return skb->queue_mapping;
2276 }
2277 
2278 /**
2279  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2280  *
2281  * @buf: sk buff
2282  * @val: queue_id
2283  *
2284  */
2285 static inline void
2286 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2287 {
2288 	skb_set_queue_mapping(skb, val);
2289 }
2290 
2291 /**
2292  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2293  *
2294  * @buf: sk buff
2295  *
2296  * Return: void
2297  */
2298 static inline void
2299 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2300 {
2301 	__net_timestamp(skb);
2302 }
2303 
2304 /**
2305  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2306  *
2307  * @buf: sk buff
2308  *
2309  * Return: timestamp stored in skb in ms
2310  */
2311 static inline uint64_t
2312 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2313 {
2314 	return ktime_to_ms(skb_get_ktime(skb));
2315 }
2316 
2317 /**
2318  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2319  *
2320  * @buf: sk buff
2321  *
2322  * Return: time difference in ms
2323  */
2324 static inline uint64_t
2325 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2326 {
2327 	return ktime_to_ms(net_timedelta(skb->tstamp));
2328 }
2329 
2330 /**
2331  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2332  *
2333  * @buf: sk buff
2334  *
2335  * Return: time difference in micro seconds
2336  */
2337 static inline uint64_t
2338 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2339 {
2340 	return ktime_to_us(net_timedelta(skb->tstamp));
2341 }
2342 
2343 /**
2344  * __qdf_nbuf_orphan() - orphan a nbuf
2345  * @skb: sk buff
2346  *
2347  * If a buffer currently has an owner then we call the
2348  * owner's destructor function
2349  *
2350  * Return: void
2351  */
2352 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2353 {
2354 	return skb_orphan(skb);
2355 }
2356 
2357 /**
2358  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2359  * head pointer to end pointer
2360  * @nbuf: qdf_nbuf_t
2361  *
2362  * Return: size of network buffer from head pointer to end
2363  * pointer
2364  */
2365 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2366 {
2367 	return skb_end_offset(nbuf);
2368 }
2369 
2370 /**
2371  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2372  * including the header and variable data area
2373  * @skb: sk buff
2374  *
2375  * Return: size of network buffer
2376  */
2377 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2378 {
2379 	return skb->truesize;
2380 }
2381 
2382 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2383 /**
2384  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2385  * from the total skb mem and DP tx/rx skb mem
2386  * @nbytes: number of bytes
2387  * @dir: direction
2388  * @is_mapped: is mapped or unmapped memory
2389  *
2390  * Return: none
2391  */
2392 static inline void __qdf_record_nbuf_nbytes(
2393 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2394 {
2395 	if (is_mapped) {
2396 		if (dir == QDF_DMA_TO_DEVICE) {
2397 			qdf_mem_dp_tx_skb_cnt_inc();
2398 			qdf_mem_dp_tx_skb_inc(nbytes);
2399 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2400 			qdf_mem_dp_rx_skb_cnt_inc();
2401 			qdf_mem_dp_rx_skb_inc(nbytes);
2402 		}
2403 		qdf_mem_skb_total_inc(nbytes);
2404 	} else {
2405 		if (dir == QDF_DMA_TO_DEVICE) {
2406 			qdf_mem_dp_tx_skb_cnt_dec();
2407 			qdf_mem_dp_tx_skb_dec(nbytes);
2408 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2409 			qdf_mem_dp_rx_skb_cnt_dec();
2410 			qdf_mem_dp_rx_skb_dec(nbytes);
2411 		}
2412 		qdf_mem_skb_total_dec(nbytes);
2413 	}
2414 }
2415 
2416 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2417 static inline void __qdf_record_nbuf_nbytes(
2418 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2419 {
2420 }
2421 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2422 
2423 /**
2424  * __qdf_nbuf_map_nbytes_single() - map nbytes
2425  * @osdev: os device
2426  * @buf: buffer
2427  * @dir: direction
2428  * @nbytes: number of bytes
2429  *
2430  * Return: QDF_STATUS
2431  */
2432 #ifdef A_SIMOS_DEVHOST
2433 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2434 		qdf_device_t osdev, struct sk_buff *buf,
2435 		qdf_dma_dir_t dir, int nbytes)
2436 {
2437 	qdf_dma_addr_t paddr;
2438 
2439 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2440 	return QDF_STATUS_SUCCESS;
2441 }
2442 #else
2443 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2444 		qdf_device_t osdev, struct sk_buff *buf,
2445 		qdf_dma_dir_t dir, int nbytes)
2446 {
2447 	qdf_dma_addr_t paddr;
2448 	QDF_STATUS ret;
2449 
2450 	/* assume that the OS only provides a single fragment */
2451 	QDF_NBUF_CB_PADDR(buf) = paddr =
2452 		dma_map_single(osdev->dev, buf->data,
2453 			       nbytes, __qdf_dma_dir_to_os(dir));
2454 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2455 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2456 	if (QDF_IS_STATUS_SUCCESS(ret))
2457 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2458 					 dir, true);
2459 	return ret;
2460 }
2461 #endif
2462 /**
2463  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2464  * @osdev: os device
2465  * @buf: buffer
2466  * @dir: direction
2467  * @nbytes: number of bytes
2468  *
2469  * Return: none
2470  */
2471 #if defined(A_SIMOS_DEVHOST)
2472 static inline void
2473 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2474 			       qdf_dma_dir_t dir, int nbytes)
2475 {
2476 }
2477 
2478 #else
2479 static inline void
2480 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2481 			       qdf_dma_dir_t dir, int nbytes)
2482 {
2483 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2484 
2485 	if (qdf_likely(paddr)) {
2486 		__qdf_record_nbuf_nbytes(
2487 			__qdf_nbuf_get_end_offset(buf), dir, false);
2488 		dma_unmap_single(osdev->dev, paddr, nbytes,
2489 				 __qdf_dma_dir_to_os(dir));
2490 		return;
2491 	}
2492 }
2493 #endif
2494 
2495 static inline struct sk_buff *
2496 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2497 {
2498 	return skb_dequeue(skb_queue_head);
2499 }
2500 
2501 static inline
2502 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2503 {
2504 	return skb_queue_head->qlen;
2505 }
2506 
2507 static inline
2508 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2509 					struct sk_buff *skb)
2510 {
2511 	return skb_queue_tail(skb_queue_head, skb);
2512 }
2513 
2514 static inline
2515 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2516 {
2517 	return skb_queue_head_init(skb_queue_head);
2518 }
2519 
2520 static inline
2521 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2522 {
2523 	return skb_queue_purge(skb_queue_head);
2524 }
2525 
2526 /**
2527  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2528  * @head: skb list for which lock is to be acquired
2529  *
2530  * Return: void
2531  */
2532 static inline
2533 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2534 {
2535 	spin_lock_bh(&skb_queue_head->lock);
2536 }
2537 
2538 /**
2539  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2540  * @head: skb list for which lock is to be release
2541  *
2542  * Return: void
2543  */
2544 static inline
2545 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2546 {
2547 	spin_unlock_bh(&skb_queue_head->lock);
2548 }
2549 
2550 /**
2551  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2552  * @nbuf: qdf_nbuf_t
2553  * @idx: Index for which frag size is requested
2554  *
2555  * Return: Frag size
2556  */
2557 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2558 							   uint8_t idx)
2559 {
2560 	unsigned int size = 0;
2561 
2562 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2563 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2564 	return size;
2565 }
2566 
2567 /**
2568  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2569  * @nbuf: qdf_nbuf_t
2570  * @idx: Index for which frag address is requested
2571  *
2572  * Return: Frag address in success, else NULL
2573  */
2574 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2575 						    uint8_t idx)
2576 {
2577 	__qdf_frag_t frag_addr = NULL;
2578 
2579 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2580 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2581 	return frag_addr;
2582 }
2583 
2584 /**
2585  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2586  * @nbuf: qdf_nbuf_t
2587  * @idx: Frag index
2588  * @size: Size by which frag_size needs to be increased/decreased
2589  *        +Ve means increase, -Ve means decrease
2590  * @truesize: truesize
2591  */
2592 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2593 						 int size,
2594 						 unsigned int truesize)
2595 {
2596 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2597 }
2598 
2599 /**
2600  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2601  *          and adjust length by size.
2602  * @nbuf: qdf_nbuf_t
2603  * @idx: Frag index
2604  * @offset: Frag page offset should be moved by offset.
2605  *      +Ve - Move offset forward.
2606  *      -Ve - Move offset backward.
2607  *
2608  * Return: QDF_STATUS
2609  */
2610 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2611 					    int offset);
2612 
2613 /**
2614  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
2615  * @nbuf: nbuf pointer
2616  * @idx: frag idx need to be removed
2617  * @truesize: truesize of frag
2618  *
2619  * Return : void
2620  */
2621 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
2622 /**
2623  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2624  * @buf: Frag pointer needs to be added in nbuf frag
2625  * @nbuf: qdf_nbuf_t where frag will be added
2626  * @offset: Offset in frag to be added to nbuf_frags
2627  * @frag_len: Frag length
2628  * @truesize: truesize
2629  * @take_frag_ref: Whether to take ref for frag or not
2630  *      This bool must be set as per below comdition:
2631  *      1. False: If this frag is being added in any nbuf
2632  *              for the first time after allocation.
2633  *      2. True: If frag is already attached part of any
2634  *              nbuf.
2635  *
2636  * It takes ref_count based on boolean flag take_frag_ref
2637  */
2638 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2639 			    int offset, int frag_len,
2640 			    unsigned int truesize, bool take_frag_ref);
2641 
2642 /**
2643  * __qdf_nbuf_ref_frag() - get frag reference
2644  *
2645  * Return: void
2646  */
2647 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2648 
2649 /**
2650  * __qdf_nbuf_set_mark() - Set nbuf mark
2651  * @buf: Pointer to nbuf
2652  * @mark: Value to set mark
2653  *
2654  * Return: None
2655  */
2656 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2657 {
2658 	buf->mark = mark;
2659 }
2660 
2661 /**
2662  * __qdf_nbuf_get_mark() - Get nbuf mark
2663  * @buf: Pointer to nbuf
2664  *
2665  * Return: Value of mark
2666  */
2667 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2668 {
2669 	return buf->mark;
2670 }
2671 
2672 /**
2673  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2674  * the data pointer to the end pointer
2675  * @nbuf: qdf_nbuf_t
2676  *
2677  * Return: size of skb from data pointer to end pointer
2678  */
2679 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2680 {
2681 	return (skb_end_pointer(nbuf) - nbuf->data);
2682 }
2683 
2684 /**
2685  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2686  * @skb: Pointer to network buffer
2687  *
2688  * Return: Return the number of gso segments
2689  */
2690 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2691 {
2692 	return skb_shinfo(skb)->gso_segs;
2693 }
2694 
2695 /**
2696  * __qdf_nbuf_get_gso_size() - Return the number of gso size
2697  * @skb: Pointer to network buffer
2698  *
2699  * Return: Return the number of gso segments
2700  */
2701 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
2702 {
2703 	return skb_shinfo(skb)->gso_size;
2704 }
2705 
2706 /**
2707  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
2708  * @skb: Pointer to network buffer
2709  *
2710  * Return: Return the number of gso segments
2711  */
2712 static inline void
2713 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
2714 {
2715 	skb_shinfo(skb)->gso_size = val;
2716 }
2717 
2718 /**
2719  * __qdf_nbuf_kfree() - Free nbuf using kfree
2720  * @buf: Pointer to network buffer
2721  *
2722  * This function is called to free the skb on failure cases
2723  *
2724  * Return: None
2725  */
2726 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
2727 {
2728 	kfree_skb(skb);
2729 }
2730 
2731 /**
2732  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
2733  * @buf: Pointer to network buffer
2734  *
2735  * This function is called to free the skb on failure cases
2736  *
2737  * Return: None
2738  */
2739 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
2740 {
2741 	dev_kfree_skb(skb);
2742 }
2743 
2744 /**
2745  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
2746  * @buf: Network buffer
2747  *
2748  * Return: TRUE if skb pkt type is mcast
2749  *         FALSE if not
2750  */
2751 static inline
2752 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
2753 {
2754 	return skb->pkt_type == PACKET_MULTICAST;
2755 }
2756 
2757 /**
2758  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
2759  * @buf: Network buffer
2760  *
2761  * Return: TRUE if skb pkt type is mcast
2762  *         FALSE if not
2763  */
2764 static inline
2765 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
2766 {
2767 	return skb->pkt_type == PACKET_BROADCAST;
2768 }
2769 
2770 /**
2771  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
2772  * @buf: Pointer to network buffer
2773  * @value: value to be set in dev_scratch of network buffer
2774  *
2775  * Return: void
2776  */
2777 static inline
2778 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
2779 {
2780 	skb->dev = dev;
2781 }
2782 
2783 /**
2784  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
2785  * @buf: Pointer to network buffer
2786  *
2787  * Return: dev mtu value in nbuf
2788  */
2789 static inline
2790 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
2791 {
2792 	return skb->dev->mtu;
2793 }
2794 
2795 /**
2796  * __qdf_nbuf_set_protocol_eth_tye_trans() - set protocol using eth trans os API
2797  * @buf: Pointer to network buffer
2798  *
2799  * Return: None
2800  */
2801 static inline
2802 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
2803 {
2804 	skb->protocol = eth_type_trans(skb, skb->dev);
2805 }
2806 
2807 /*
2808  * __qdf_nbuf_net_timedelta() - get time delta
2809  * @t: time as __qdf_ktime_t object
2810  *
2811  * Return: time delta as ktime_t object
2812  */
2813 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
2814 {
2815 	return net_timedelta(t);
2816 }
2817 
2818 #ifdef CONFIG_NBUF_AP_PLATFORM
2819 #include <i_qdf_nbuf_w.h>
2820 #else
2821 #include <i_qdf_nbuf_m.h>
2822 #endif
2823 #endif /*_I_QDF_NET_BUF_H */
2824