xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 8c3c4172fbd442a68f7b879958acb6794236aee0)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 /*
45  * Use socket buffer as the underlying implementation as skbuf .
46  * Linux use sk_buff to represent both packet and data,
47  * so we use sk_buffer to represent both skbuf .
48  */
49 typedef struct sk_buff *__qdf_nbuf_t;
50 
51 /**
52  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
53  *
54  * This is used for skb queue management via linux skb buff head APIs
55  */
56 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
57 
58 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
59 
60 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
61 
62 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
63  * max tx fragments added by the driver
64  * The driver will always add one tx fragment (the tx descriptor)
65  */
66 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
67 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
68 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
69 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
70 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
71 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
72 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
73 
74 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
75 
76 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
77 #define IEEE80211_RADIOTAP_HE 23
78 #define IEEE80211_RADIOTAP_HE_MU 24
79 #endif
80 
81 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
82 
83 /* mark the first packet after wow wakeup */
84 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
85 
86 /* TCP Related MASK */
87 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
88 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
89 #define QDF_NBUF_PKT_TCPOP_RST			0x04
90 
91 /*
92  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
93  */
94 typedef union {
95 	uint64_t       u64;
96 	qdf_dma_addr_t dma_addr;
97 } qdf_paddr_t;
98 
99 /**
100  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
101  *                    - data passed between layers of the driver.
102  *
103  * Notes:
104  *   1. Hard limited to 48 bytes. Please count your bytes
105  *   2. The size of this structure has to be easily calculatable and
106  *      consistently so: do not use any conditional compile flags
107  *   3. Split into a common part followed by a tx/rx overlay
108  *   4. There is only one extra frag, which represents the HTC/HTT header
109  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
110  *      for the priv_cb_w since it must be at same offset for both
111  *      TX and RX union
112  *   6. "ipa.owned" bit must be first member in both TX and RX unions
113  *      for the priv_cb_m since it must be at same offset for both
114  *      TX and RX union.
115  *
116  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
117  *
118  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
119  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
120  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
121  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
122  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
123  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
124  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
125  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
126  *
127  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
128  * @rx.dev.priv_cb_m.flush_ind: flush indication
129  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
130  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
131  * @rx.dev.priv_cb_m.exc_frm: exception frame
132  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
133  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
134 					     sw execption bit from ring desc
135  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
136  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
137  * @rx.dev.priv_cb_m.lro_ctx: LRO context
138  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
139  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
140  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
141  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
142  *
143  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
144  * @rx.tcp_proto: L4 protocol is TCP
145  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
146  * @rx.ipv6_proto: L3 protocol is IPV6
147  * @rx.ip_offset: offset to IP header
148  * @rx.tcp_offset: offset to TCP header
149  * @rx_ctx_id: Rx context id
150  * @num_elements_in_list: number of elements in the nbuf list
151  *
152  * @rx.tcp_udp_chksum: L4 payload checksum
153  * @rx.tcp_wim: TCP window size
154  *
155  * @rx.flow_id: 32bit flow id
156  *
157  * @rx.flag_chfrag_start: first MSDU in an AMSDU
158  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
159  * @rx.flag_chfrag_end: last MSDU in an AMSDU
160  * @rx.flag_retry: flag to indicate MSDU is retried
161  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
162  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
163  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
164  * @rx.flag_is_frag: flag to indicate skb has frag list
165  * @rx.rsrvd: reserved
166  *
167  * @rx.trace: combined structure for DP and protocol trace
168  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
169  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
170  * @rx.trace.dp_trace: flag (Datapath trace)
171  * @rx.trace.packet_track: RX_DATA packet
172  * @rx.trace.rsrvd: enable packet logging
173  *
174  * @rx.vdev_id: vdev_id for RX pkt
175  * @rx.is_raw_frame: RAW frame
176  * @rx.fcs_err: FCS error
177  * @rx.tid_val: tid value
178  * @rx.reserved: reserved
179  * @rx.ftype: mcast2ucast, TSO, SG, MESH
180  *
181  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
182  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
183  *
184  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
185  *                 + (1) CE classification enablement bit
186  *                 + (2) packet type (802.3 or Ethernet type II)
187  *                 + (3) packet offset (usually length of HTC/HTT descr)
188  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
189  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
190  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
191  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
192  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
193  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
194  * @tx.dev.priv_cb_m.reserved: reserved
195  *
196  * @tx.ftype: mcast2ucast, TSO, SG, MESH
197  * @tx.vdev_id: vdev (for protocol trace)
198  * @tx.len: length of efrag pointed by the above pointers
199  *
200  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
201  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
202  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
203  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
204  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
205  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
206  * @tx.flags.bits.flag_ext_header: extended flags
207  * @tx.flags.bits.reserved: reserved
208  * @tx.trace: combined structure for DP and protocol trace
209  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
210  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
211  * @tx.trace.is_packet_priv:
212  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
213  * @tx.trace.to_fw: Flag to indicate send this packet to FW
214  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
215  *                          + (MGMT_ACTION)] - 4 bits
216  * @tx.trace.dp_trace: flag (Datapath trace)
217  * @tx.trace.is_bcast: flag (Broadcast packet)
218  * @tx.trace.is_mcast: flag (Multicast packet)
219  * @tx.trace.packet_type: flag (Packet type)
220  * @tx.trace.htt2_frm: flag (high-latency path only)
221  * @tx.trace.print: enable packet logging
222  *
223  * @tx.vaddr: virtual address of ~
224  * @tx.paddr: physical/DMA address of ~
225  */
226 struct qdf_nbuf_cb {
227 	/* common */
228 	qdf_paddr_t paddr; /* of skb->data */
229 	/* valid only in one direction */
230 	union {
231 		/* Note: MAX: 40 bytes */
232 		struct {
233 			union {
234 				struct {
235 					void *ext_cb_ptr;
236 					void *fctx;
237 					uint16_t msdu_len : 14,
238 						 flag_intra_bss : 1,
239 						 ipa_smmu_map : 1;
240 					uint16_t peer_id;
241 					uint16_t protocol_tag;
242 					uint16_t flow_tag;
243 				} priv_cb_w;
244 				struct {
245 					/* ipa_owned bit is common between rx
246 					 * control block and tx control block.
247 					 * Do not change location of this bit.
248 					 */
249 					uint32_t ipa_owned:1,
250 						 peer_cached_buf_frm:1,
251 						 flush_ind:1,
252 						 packet_buf_pool:1,
253 						 l3_hdr_pad:3,
254 						 /* exception frame flag */
255 						 exc_frm:1,
256 						 ipa_smmu_map:1,
257 						 reo_dest_ind_or_sw_excpt:5,
258 						 reserved:2,
259 						 reserved1:16;
260 					uint32_t tcp_seq_num;
261 					uint32_t tcp_ack_num;
262 					union {
263 						struct {
264 							uint16_t msdu_len;
265 							uint16_t peer_id;
266 						} wifi3;
267 						struct {
268 							uint32_t map_index;
269 						} wifi2;
270 					} dp;
271 					unsigned char *lro_ctx;
272 				} priv_cb_m;
273 			} dev;
274 			uint32_t lro_eligible:1,
275 				tcp_proto:1,
276 				tcp_pure_ack:1,
277 				ipv6_proto:1,
278 				ip_offset:7,
279 				tcp_offset:7,
280 				rx_ctx_id:4,
281 				fcs_err:1,
282 				is_raw_frame:1,
283 				num_elements_in_list:8;
284 			uint32_t tcp_udp_chksum:16,
285 				 tcp_win:16;
286 			uint32_t flow_id;
287 			uint8_t flag_chfrag_start:1,
288 				flag_chfrag_cont:1,
289 				flag_chfrag_end:1,
290 				flag_retry:1,
291 				flag_da_mcbc:1,
292 				flag_da_valid:1,
293 				flag_sa_valid:1,
294 				flag_is_frag:1;
295 			union {
296 				uint8_t packet_state;
297 				uint8_t dp_trace:1,
298 					packet_track:3,
299 					rsrvd:4;
300 			} trace;
301 			uint16_t vdev_id:8,
302 				 tid_val:4,
303 				 ftype:4;
304 		} rx;
305 
306 		/* Note: MAX: 40 bytes */
307 		struct {
308 			union {
309 				struct {
310 					void *ext_cb_ptr;
311 					void *fctx;
312 				} priv_cb_w;
313 				struct {
314 					/* ipa_owned bit is common between rx
315 					 * control block and tx control block.
316 					 * Do not change location of this bit.
317 					 */
318 					struct {
319 						uint32_t owned:1,
320 							priv:31;
321 					} ipa;
322 					uint32_t data_attr;
323 					uint16_t desc_id;
324 					uint16_t mgmt_desc_id;
325 					struct {
326 						uint8_t bi_map:1,
327 							reserved:7;
328 					} dma_option;
329 					uint8_t flag_notify_comp:1,
330 						rsvd:7;
331 					uint8_t reserved[2];
332 				} priv_cb_m;
333 			} dev;
334 			uint8_t ftype;
335 			uint8_t vdev_id;
336 			uint16_t len;
337 			union {
338 				struct {
339 					uint8_t flag_efrag:1,
340 						flag_nbuf:1,
341 						num:1,
342 						flag_chfrag_start:1,
343 						flag_chfrag_cont:1,
344 						flag_chfrag_end:1,
345 						flag_ext_header:1,
346 						reserved:1;
347 				} bits;
348 				uint8_t u8;
349 			} flags;
350 			struct {
351 				uint8_t packet_state:7,
352 					is_packet_priv:1;
353 				uint8_t packet_track:3,
354 					to_fw:1,
355 					proto_type:4;
356 				uint8_t dp_trace:1,
357 					is_bcast:1,
358 					is_mcast:1,
359 					packet_type:3,
360 					/* used only for hl*/
361 					htt2_frm:1,
362 					print:1;
363 			} trace;
364 			unsigned char *vaddr;
365 			qdf_paddr_t paddr;
366 		} tx;
367 	} u;
368 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
369 
370 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
371 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
372 			(sizeof(struct qdf_nbuf_cb)) <=
373 			sizeof_field(struct sk_buff, cb));
374 #else
375 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
376 			(sizeof(struct qdf_nbuf_cb)) <=
377 			FIELD_SIZEOF(struct sk_buff, cb));
378 #endif
379 
380 /**
381  *  access macros to qdf_nbuf_cb
382  *  Note: These macros can be used as L-values as well as R-values.
383  *        When used as R-values, they effectively function as "get" macros
384  *        When used as L_values, they effectively function as "set" macros
385  */
386 
387 #define QDF_NBUF_CB_PADDR(skb) \
388 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
389 
390 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
391 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
392 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
393 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
394 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
395 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
396 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
398 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
399 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
400 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
401 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
402 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
403 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
404 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
405 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
406 
407 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
408 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
409 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
410 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
411 
412 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
413 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
414 
415 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
416 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
417 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
418 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
419 
420 #define QDF_NBUF_CB_RX_FTYPE(skb) \
421 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
422 
423 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
424 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
425 
426 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
427 	(((struct qdf_nbuf_cb *) \
428 	((skb)->cb))->u.rx.flag_chfrag_start)
429 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
430 	(((struct qdf_nbuf_cb *) \
431 	((skb)->cb))->u.rx.flag_chfrag_cont)
432 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
433 		(((struct qdf_nbuf_cb *) \
434 		((skb)->cb))->u.rx.flag_chfrag_end)
435 
436 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
437 	(((struct qdf_nbuf_cb *) \
438 	((skb)->cb))->u.rx.flag_da_mcbc)
439 
440 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
441 	(((struct qdf_nbuf_cb *) \
442 	((skb)->cb))->u.rx.flag_da_valid)
443 
444 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
445 	(((struct qdf_nbuf_cb *) \
446 	((skb)->cb))->u.rx.flag_sa_valid)
447 
448 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
449 	(((struct qdf_nbuf_cb *) \
450 	((skb)->cb))->u.rx.flag_retry)
451 
452 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
453 	(((struct qdf_nbuf_cb *) \
454 	((skb)->cb))->u.rx.is_raw_frame)
455 
456 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 	((skb)->cb))->u.rx.tid_val)
459 
460 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
461 	(((struct qdf_nbuf_cb *) \
462 	((skb)->cb))->u.rx.flag_is_frag)
463 
464 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
465 	(((struct qdf_nbuf_cb *) \
466 	((skb)->cb))->u.rx.fcs_err)
467 
468 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
469 	qdf_nbuf_set_state(skb, PACKET_STATE)
470 
471 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
472 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
473 
474 #define QDF_NBUF_CB_TX_FTYPE(skb) \
475 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
476 
477 
478 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
479 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
480 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
481 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
482 
483 /* Tx Flags Accessor Macros*/
484 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
485 	(((struct qdf_nbuf_cb *) \
486 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
487 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
488 	(((struct qdf_nbuf_cb *) \
489 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
490 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
491 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
492 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
493 	(((struct qdf_nbuf_cb *) \
494 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
495 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
496 	(((struct qdf_nbuf_cb *) \
497 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
498 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
499 		(((struct qdf_nbuf_cb *) \
500 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
501 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
502 		(((struct qdf_nbuf_cb *) \
503 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
504 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
505 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
506 /* End of Tx Flags Accessor Macros */
507 
508 /* Tx trace accessor macros */
509 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
510 	(((struct qdf_nbuf_cb *) \
511 		((skb)->cb))->u.tx.trace.packet_state)
512 
513 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
514 	(((struct qdf_nbuf_cb *) \
515 		((skb)->cb))->u.tx.trace.is_packet_priv)
516 
517 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
518 	(((struct qdf_nbuf_cb *) \
519 		((skb)->cb))->u.tx.trace.packet_track)
520 
521 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
522 	(((struct qdf_nbuf_cb *) \
523 		((skb)->cb))->u.tx.trace.to_fw)
524 
525 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
526 		(((struct qdf_nbuf_cb *) \
527 			((skb)->cb))->u.rx.trace.packet_track)
528 
529 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
530 	(((struct qdf_nbuf_cb *) \
531 		((skb)->cb))->u.tx.trace.proto_type)
532 
533 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
534 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
535 
536 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
537 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
538 
539 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
540 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
541 
542 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
543 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
544 
545 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
546 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
547 
548 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
549 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
550 
551 #define QDF_NBUF_CB_SET_BCAST(skb) \
552 	(((struct qdf_nbuf_cb *) \
553 		((skb)->cb))->u.tx.trace.is_bcast = true)
554 
555 #define QDF_NBUF_CB_SET_MCAST(skb) \
556 	(((struct qdf_nbuf_cb *) \
557 		((skb)->cb))->u.tx.trace.is_mcast = true)
558 /* End of Tx trace accessor macros */
559 
560 
561 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
562 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
563 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
564 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
565 
566 /* assume the OS provides a single fragment */
567 #define __qdf_nbuf_get_num_frags(skb)		   \
568 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
569 
570 #define __qdf_nbuf_reset_num_frags(skb) \
571 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
572 
573 /**
574  *   end of nbuf->cb access macros
575  */
576 
577 typedef void (*qdf_nbuf_trace_update_t)(char *);
578 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
579 
580 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
581 
582 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
583 	(QDF_NBUF_CB_PADDR(skb) = paddr)
584 
585 #define __qdf_nbuf_frag_push_head(					\
586 	skb, frag_len, frag_vaddr, frag_paddr)				\
587 	do {					\
588 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
589 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
590 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
591 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
592 	} while (0)
593 
594 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
595 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
596 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
597 
598 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
599 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
600 
601 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
602 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
603 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
604 	 /* assume that the OS only provides a single fragment */	\
605 	 QDF_NBUF_CB_PADDR(skb))
606 
607 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
608 
609 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
610 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
611 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
612 
613 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
614 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
615 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
616 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
617 
618 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
619 	do {								\
620 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
621 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
622 		if (frag_num)						\
623 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
624 							      is_wstrm; \
625 		else					\
626 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
627 							      is_wstrm; \
628 	} while (0)
629 
630 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
631 	do { \
632 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
633 	} while (0)
634 
635 #define __qdf_nbuf_get_vdev_ctx(skb) \
636 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
637 
638 #define __qdf_nbuf_set_tx_ftype(skb, type) \
639 	do { \
640 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
641 	} while (0)
642 
643 #define __qdf_nbuf_get_tx_ftype(skb) \
644 		 QDF_NBUF_CB_TX_FTYPE((skb))
645 
646 
647 #define __qdf_nbuf_set_rx_ftype(skb, type) \
648 	do { \
649 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
650 	} while (0)
651 
652 #define __qdf_nbuf_get_rx_ftype(skb) \
653 		 QDF_NBUF_CB_RX_FTYPE((skb))
654 
655 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
656 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
657 
658 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
659 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
660 
661 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
662 	do { \
663 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
664 	} while (0)
665 
666 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
667 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
668 
669 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
670 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
671 
672 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
673 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
674 
675 #define __qdf_nbuf_set_da_mcbc(skb, val) \
676 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
677 
678 #define __qdf_nbuf_is_da_mcbc(skb) \
679 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
680 
681 #define __qdf_nbuf_set_da_valid(skb, val) \
682 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
683 
684 #define __qdf_nbuf_is_da_valid(skb) \
685 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
686 
687 #define __qdf_nbuf_set_sa_valid(skb, val) \
688 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
689 
690 #define __qdf_nbuf_is_sa_valid(skb) \
691 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
692 
693 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
694 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
695 
696 #define __qdf_nbuf_is_rx_retry_flag(skb) \
697 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
698 
699 #define __qdf_nbuf_set_raw_frame(skb, val) \
700 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
701 
702 #define __qdf_nbuf_is_raw_frame(skb) \
703 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
704 
705 #define __qdf_nbuf_get_tid_val(skb) \
706 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
707 
708 #define __qdf_nbuf_set_tid_val(skb, val) \
709 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
710 
711 #define __qdf_nbuf_set_is_frag(skb, val) \
712 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
713 
714 #define __qdf_nbuf_is_frag(skb) \
715 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
716 
717 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
718 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
719 
720 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
721 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
722 
723 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
724 	do { \
725 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
726 	} while (0)
727 
728 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
729 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
730 
731 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
732 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
733 
734 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
735 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
736 
737 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
738 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
739 
740 #define __qdf_nbuf_trace_get_proto_type(skb) \
741 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
742 
743 #define __qdf_nbuf_data_attr_get(skb)		\
744 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
745 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
746 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
747 
748 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
749 		skb_queue_walk_safe(queue, var, tvar)
750 
751 /**
752  * __qdf_nbuf_num_frags_init() - init extra frags
753  * @skb: sk buffer
754  *
755  * Return: none
756  */
757 static inline
758 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
759 {
760 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
761 }
762 
763 /*
764  * prototypes. Implemented in qdf_nbuf.c
765  */
766 
767 /**
768  * __qdf_nbuf_alloc() - Allocate nbuf
769  * @osdev: Device handle
770  * @size: Netbuf requested size
771  * @reserve: headroom to start with
772  * @align: Align
773  * @prio: Priority
774  * @func: Function name of the call site
775  * @line: line number of the call site
776  *
777  * This allocates an nbuf aligns if needed and reserves some space in the front,
778  * since the reserve is done after alignment the reserve value if being
779  * unaligned will result in an unaligned address.
780  *
781  * Return: nbuf or %NULL if no memory
782  */
783 __qdf_nbuf_t
784 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
785 		 int prio, const char *func, uint32_t line);
786 
787 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size);
788 
789 /**
790  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
791  * @size: Size to be allocated for skb
792  * @reserve: Reserve headroom size
793  * @align: Align data
794  * @func: Function name of the call site
795  * @line: Line number of the callsite
796  *
797  * This API allocates a nbuf and aligns it if needed and reserves some headroom
798  * space after the alignment where nbuf is not allocated from skb recycler pool.
799  *
800  * Return: Allocated nbuf pointer
801  */
802 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
803 					  const char *func, uint32_t line);
804 
805 /**
806  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
807  * @skb: Pointer to network buffer
808  *
809  * if GFP_ATOMIC is overkill then we can check whether its
810  * called from interrupt context and then do it or else in
811  * normal case use GFP_KERNEL
812  *
813  * example     use "in_irq() || irqs_disabled()"
814  *
815  * Return: cloned skb
816  */
817 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
818 
819 void __qdf_nbuf_free(struct sk_buff *skb);
820 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
821 			struct sk_buff *skb, qdf_dma_dir_t dir);
822 void __qdf_nbuf_unmap(__qdf_device_t osdev,
823 			struct sk_buff *skb, qdf_dma_dir_t dir);
824 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
825 				 struct sk_buff *skb, qdf_dma_dir_t dir);
826 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
827 			struct sk_buff *skb, qdf_dma_dir_t dir);
828 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
829 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
830 
831 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
832 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
833 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
834 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
835 	qdf_dma_dir_t dir, int nbytes);
836 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
837 	qdf_dma_dir_t dir, int nbytes);
838 
839 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
840 	qdf_dma_dir_t dir);
841 
842 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
843 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
844 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
845 QDF_STATUS __qdf_nbuf_frag_map(
846 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
847 	int offset, qdf_dma_dir_t dir, int cur_frag);
848 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
849 
850 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
851 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
852 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
853 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
854 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
855 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
856 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
857 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
858 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
859 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
860 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
861 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
862 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
863 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
864 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
865 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
866 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
867 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
868 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
869 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
870 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
871 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
872 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
873 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
874 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
875 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
876 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
877 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
878 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
879 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
880 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
881 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
882 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
883 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
884 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
885 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
886 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
887 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
888 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
889 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
890 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
891 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
892 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
893 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
894 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
895 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
896 
897 #ifdef QDF_NBUF_GLOBAL_COUNT
898 int __qdf_nbuf_count_get(void);
899 void __qdf_nbuf_count_inc(struct sk_buff *skb);
900 void __qdf_nbuf_count_dec(struct sk_buff *skb);
901 void __qdf_nbuf_mod_init(void);
902 void __qdf_nbuf_mod_exit(void);
903 
904 #else
905 
906 static inline int __qdf_nbuf_count_get(void)
907 {
908 	return 0;
909 }
910 
911 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
912 {
913 	return;
914 }
915 
916 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
917 {
918 	return;
919 }
920 
921 static inline void __qdf_nbuf_mod_init(void)
922 {
923 	return;
924 }
925 
926 static inline void __qdf_nbuf_mod_exit(void)
927 {
928 	return;
929 }
930 #endif
931 
932 /**
933  * __qdf_to_status() - OS to QDF status conversion
934  * @error : OS error
935  *
936  * Return: QDF status
937  */
938 static inline QDF_STATUS __qdf_to_status(signed int error)
939 {
940 	switch (error) {
941 	case 0:
942 		return QDF_STATUS_SUCCESS;
943 	case ENOMEM:
944 	case -ENOMEM:
945 		return QDF_STATUS_E_NOMEM;
946 	default:
947 		return QDF_STATUS_E_NOSUPPORT;
948 	}
949 }
950 
951 /**
952  * __qdf_nbuf_len() - return the amount of valid data in the skb
953  * @skb: Pointer to network buffer
954  *
955  * This API returns the amount of valid data in the skb, If there are frags
956  * then it returns total length.
957  *
958  * Return: network buffer length
959  */
960 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
961 {
962 	int i, extra_frag_len = 0;
963 
964 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
965 	if (i > 0)
966 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
967 
968 	return extra_frag_len + skb->len;
969 }
970 
971 /**
972  * __qdf_nbuf_cat() - link two nbufs
973  * @dst: Buffer to piggyback into
974  * @src: Buffer to put
975  *
976  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
977  * It is callers responsibility to free the src skb.
978  *
979  * Return: QDF_STATUS (status of the call) if failed the src skb
980  *         is released
981  */
982 static inline QDF_STATUS
983 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
984 {
985 	QDF_STATUS error = 0;
986 
987 	qdf_assert(dst && src);
988 
989 	/*
990 	 * Since pskb_expand_head unconditionally reallocates the skb->head
991 	 * buffer, first check whether the current buffer is already large
992 	 * enough.
993 	 */
994 	if (skb_tailroom(dst) < src->len) {
995 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
996 		if (error)
997 			return __qdf_to_status(error);
998 	}
999 
1000 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1001 	skb_put(dst, src->len);
1002 	return __qdf_to_status(error);
1003 }
1004 
1005 /*
1006  * nbuf manipulation routines
1007  */
1008 /**
1009  * __qdf_nbuf_headroom() - return the amount of tail space available
1010  * @buf: Pointer to network buffer
1011  *
1012  * Return: amount of tail room
1013  */
1014 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1015 {
1016 	return skb_headroom(skb);
1017 }
1018 
1019 /**
1020  * __qdf_nbuf_tailroom() - return the amount of tail space available
1021  * @buf: Pointer to network buffer
1022  *
1023  * Return: amount of tail room
1024  */
1025 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1026 {
1027 	return skb_tailroom(skb);
1028 }
1029 
1030 /**
1031  * __qdf_nbuf_put_tail() - Puts data in the end
1032  * @skb: Pointer to network buffer
1033  * @size: size to be pushed
1034  *
1035  * Return: data pointer of this buf where new data has to be
1036  *         put, or NULL if there is not enough room in this buf.
1037  */
1038 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1039 {
1040 	if (skb_tailroom(skb) < size) {
1041 		if (unlikely(pskb_expand_head(skb, 0,
1042 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1043 			dev_kfree_skb_any(skb);
1044 			return NULL;
1045 		}
1046 	}
1047 	return skb_put(skb, size);
1048 }
1049 
1050 /**
1051  * __qdf_nbuf_trim_tail() - trim data out from the end
1052  * @skb: Pointer to network buffer
1053  * @size: size to be popped
1054  *
1055  * Return: none
1056  */
1057 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1058 {
1059 	return skb_trim(skb, skb->len - size);
1060 }
1061 
1062 
1063 /*
1064  * prototypes. Implemented in qdf_nbuf.c
1065  */
1066 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1067 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1068 				qdf_nbuf_rx_cksum_t *cksum);
1069 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1070 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1071 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1072 void __qdf_nbuf_ref(struct sk_buff *skb);
1073 int __qdf_nbuf_shared(struct sk_buff *skb);
1074 
1075 /**
1076  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1077  * @skb: sk buff
1078  *
1079  * Return: number of fragments
1080  */
1081 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1082 {
1083 	return skb_shinfo(skb)->nr_frags;
1084 }
1085 
1086 /*
1087  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1088  */
1089 #define __qdf_nbuf_pool_delete(osdev)
1090 
1091 /**
1092  * __qdf_nbuf_copy() - returns a private copy of the skb
1093  * @skb: Pointer to network buffer
1094  *
1095  * This API returns a private copy of the skb, the skb returned is completely
1096  *  modifiable by callers
1097  *
1098  * Return: skb or NULL
1099  */
1100 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1101 {
1102 	struct sk_buff *skb_new = NULL;
1103 
1104 	skb_new = skb_copy(skb, GFP_ATOMIC);
1105 	if (skb_new) {
1106 		__qdf_nbuf_count_inc(skb_new);
1107 	}
1108 	return skb_new;
1109 }
1110 
1111 #define __qdf_nbuf_reserve      skb_reserve
1112 
1113 /**
1114  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1115  * @skb: Pointer to network buffer
1116  * @data: data pointer
1117  *
1118  * Return: none
1119  */
1120 static inline void
1121 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1122 {
1123 	skb->data = data;
1124 }
1125 
1126 /**
1127  * __qdf_nbuf_set_len() - set buffer data length
1128  * @skb: Pointer to network buffer
1129  * @len: data length
1130  *
1131  * Return: none
1132  */
1133 static inline void
1134 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1135 {
1136 	skb->len = len;
1137 }
1138 
1139 /**
1140  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1141  * @skb: Pointer to network buffer
1142  * @len: skb data length
1143  *
1144  * Return: none
1145  */
1146 static inline void
1147 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1148 {
1149 	skb_set_tail_pointer(skb, len);
1150 }
1151 
1152 /**
1153  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1154  * @skb: Pointer to network buffer
1155  * @list: list to use
1156  *
1157  * This is a lockless version, driver must acquire locks if it
1158  * needs to synchronize
1159  *
1160  * Return: none
1161  */
1162 static inline void
1163 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1164 {
1165 	__skb_unlink(skb, list);
1166 }
1167 
1168 /**
1169  * __qdf_nbuf_reset() - reset the buffer data and pointer
1170  * @buf: Network buf instance
1171  * @reserve: reserve
1172  * @align: align
1173  *
1174  * Return: none
1175  */
1176 static inline void
1177 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1178 {
1179 	int offset;
1180 
1181 	skb_push(skb, skb_headroom(skb));
1182 	skb_put(skb, skb_tailroom(skb));
1183 	memset(skb->data, 0x0, skb->len);
1184 	skb_trim(skb, 0);
1185 	skb_reserve(skb, NET_SKB_PAD);
1186 	memset(skb->cb, 0x0, sizeof(skb->cb));
1187 
1188 	/*
1189 	 * The default is for netbuf fragments to be interpreted
1190 	 * as wordstreams rather than bytestreams.
1191 	 */
1192 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1193 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1194 
1195 	/*
1196 	 * Align & make sure that the tail & data are adjusted properly
1197 	 */
1198 
1199 	if (align) {
1200 		offset = ((unsigned long)skb->data) % align;
1201 		if (offset)
1202 			skb_reserve(skb, align - offset);
1203 	}
1204 
1205 	skb_reserve(skb, reserve);
1206 }
1207 
1208 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1209 /**
1210  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1211  *                                       in kernel
1212  *
1213  * Return: true if dev_scratch is supported
1214  *         false if dev_scratch is not supported
1215  */
1216 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1217 {
1218 	return true;
1219 }
1220 
1221 /**
1222  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1223  * @skb: Pointer to network buffer
1224  *
1225  * Return: dev_scratch if dev_scratch supported
1226  *         0 if dev_scratch not supported
1227  */
1228 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1229 {
1230 	return skb->dev_scratch;
1231 }
1232 
1233 /**
1234  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1235  * @skb: Pointer to network buffer
1236  * @value: value to be set in dev_scratch of network buffer
1237  *
1238  * Return: void
1239  */
1240 static inline void
1241 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1242 {
1243 	skb->dev_scratch = value;
1244 }
1245 #else
1246 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1247 {
1248 	return false;
1249 }
1250 
1251 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1252 {
1253 	return 0;
1254 }
1255 
1256 static inline void
1257 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1258 {
1259 }
1260 #endif /* KERNEL_VERSION(4, 14, 0) */
1261 
1262 /**
1263  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1264  * @skb: Pointer to network buffer
1265  *
1266  * Return: Pointer to head buffer
1267  */
1268 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1269 {
1270 	return skb->head;
1271 }
1272 
1273 /**
1274  * __qdf_nbuf_data() - return the pointer to data header in the skb
1275  * @skb: Pointer to network buffer
1276  *
1277  * Return: Pointer to skb data
1278  */
1279 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1280 {
1281 	return skb->data;
1282 }
1283 
1284 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1285 {
1286 	return (uint8_t *)&skb->data;
1287 }
1288 
1289 /**
1290  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1291  * @skb: Pointer to network buffer
1292  *
1293  * Return: skb protocol
1294  */
1295 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1296 {
1297 	return skb->protocol;
1298 }
1299 
1300 /**
1301  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1302  * @skb: Pointer to network buffer
1303  *
1304  * Return: skb ip_summed
1305  */
1306 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1307 {
1308 	return skb->ip_summed;
1309 }
1310 
1311 /**
1312  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1313  * @skb: Pointer to network buffer
1314  * @ip_summed: ip checksum
1315  *
1316  * Return: none
1317  */
1318 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1319 		 uint8_t ip_summed)
1320 {
1321 	skb->ip_summed = ip_summed;
1322 }
1323 
1324 /**
1325  * __qdf_nbuf_get_priority() - return the priority value of the skb
1326  * @skb: Pointer to network buffer
1327  *
1328  * Return: skb priority
1329  */
1330 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1331 {
1332 	return skb->priority;
1333 }
1334 
1335 /**
1336  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1337  * @skb: Pointer to network buffer
1338  * @p: priority
1339  *
1340  * Return: none
1341  */
1342 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1343 {
1344 	skb->priority = p;
1345 }
1346 
1347 /**
1348  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1349  * @skb: Current skb
1350  * @next_skb: Next skb
1351  *
1352  * Return: void
1353  */
1354 static inline void
1355 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1356 {
1357 	skb->next = skb_next;
1358 }
1359 
1360 /**
1361  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1362  * @skb: Current skb
1363  *
1364  * Return: the next skb pointed to by the current skb
1365  */
1366 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1367 {
1368 	return skb->next;
1369 }
1370 
1371 /**
1372  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1373  * @skb: Current skb
1374  * @next_skb: Next skb
1375  *
1376  * This fn is used to link up extensions to the head skb. Does not handle
1377  * linking to the head
1378  *
1379  * Return: none
1380  */
1381 static inline void
1382 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1383 {
1384 	skb->next = skb_next;
1385 }
1386 
1387 /**
1388  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1389  * @skb: Current skb
1390  *
1391  * Return: the next skb pointed to by the current skb
1392  */
1393 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1394 {
1395 	return skb->next;
1396 }
1397 
1398 /**
1399  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1400  * @skb_head: head_buf nbuf holding head segment (single)
1401  * @ext_list: nbuf list holding linked extensions to the head
1402  * @ext_len: Total length of all buffers in the extension list
1403  *
1404  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1405  * to the nbuf holding the head segment (seg0)
1406  *
1407  * Return: none
1408  */
1409 static inline void
1410 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1411 			struct sk_buff *ext_list, size_t ext_len)
1412 {
1413 	skb_shinfo(skb_head)->frag_list = ext_list;
1414 	skb_head->data_len += ext_len;
1415 	skb_head->len += ext_len;
1416 }
1417 
1418 /**
1419  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1420  * @head_buf: Network buf holding head segment (single)
1421  *
1422  * This ext_list is populated when we have Jumbo packet, for example in case of
1423  * monitor mode amsdu packet reception, and are stiched using frags_list.
1424  *
1425  * Return: Network buf list holding linked extensions from head buf.
1426  */
1427 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1428 {
1429 	return (skb_shinfo(head_buf)->frag_list);
1430 }
1431 
1432 /**
1433  * __qdf_nbuf_get_age() - return the checksum value of the skb
1434  * @skb: Pointer to network buffer
1435  *
1436  * Return: checksum value
1437  */
1438 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1439 {
1440 	return skb->csum;
1441 }
1442 
1443 /**
1444  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1445  * @skb: Pointer to network buffer
1446  * @v: Value
1447  *
1448  * Return: none
1449  */
1450 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1451 {
1452 	skb->csum = v;
1453 }
1454 
1455 /**
1456  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1457  * @skb: Pointer to network buffer
1458  * @adj: Adjustment value
1459  *
1460  * Return: none
1461  */
1462 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1463 {
1464 	skb->csum -= adj;
1465 }
1466 
1467 /**
1468  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1469  * @skb: Pointer to network buffer
1470  * @offset: Offset value
1471  * @len: Length
1472  * @to: Destination pointer
1473  *
1474  * Return: length of the copy bits for skb
1475  */
1476 static inline int32_t
1477 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1478 {
1479 	return skb_copy_bits(skb, offset, to, len);
1480 }
1481 
1482 /**
1483  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1484  * @skb: Pointer to network buffer
1485  * @len:  Packet length
1486  *
1487  * Return: none
1488  */
1489 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1490 {
1491 	if (skb->len > len) {
1492 		skb_trim(skb, len);
1493 	} else {
1494 		if (skb_tailroom(skb) < len - skb->len) {
1495 			if (unlikely(pskb_expand_head(skb, 0,
1496 				len - skb->len - skb_tailroom(skb),
1497 				GFP_ATOMIC))) {
1498 				QDF_DEBUG_PANIC(
1499 				   "SKB tailroom is lessthan requested length."
1500 				   " tail-room: %u, len: %u, skb->len: %u",
1501 				   skb_tailroom(skb), len, skb->len);
1502 				dev_kfree_skb_any(skb);
1503 			}
1504 		}
1505 		skb_put(skb, (len - skb->len));
1506 	}
1507 }
1508 
1509 /**
1510  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1511  * @skb: Pointer to network buffer
1512  * @protocol: Protocol type
1513  *
1514  * Return: none
1515  */
1516 static inline void
1517 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1518 {
1519 	skb->protocol = protocol;
1520 }
1521 
1522 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1523 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1524 
1525 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1526 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1527 
1528 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1529 				      uint32_t *lo, uint32_t *hi);
1530 
1531 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1532 	struct qdf_tso_info_t *tso_info);
1533 
1534 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1535 			  struct qdf_tso_seg_elem_t *tso_seg,
1536 			  bool is_last_seg);
1537 
1538 #ifdef FEATURE_TSO
1539 /**
1540  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1541  *                                    payload len
1542  * @skb: buffer
1543  *
1544  * Return: size
1545  */
1546 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1547 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1548 
1549 #else
1550 static inline
1551 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1552 {
1553 	return 0;
1554 }
1555 
1556 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1557 {
1558 	return 0;
1559 }
1560 
1561 #endif /* FEATURE_TSO */
1562 
1563 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1564 {
1565 	if (skb_is_gso(skb) &&
1566 		(skb_is_gso_v6(skb) ||
1567 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1568 		return true;
1569 	else
1570 		return false;
1571 }
1572 
1573 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1574 
1575 int __qdf_nbuf_get_users(struct sk_buff *skb);
1576 
1577 /**
1578  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1579  *			      and get hw_classify by peeking
1580  *			      into packet
1581  * @nbuf:		Network buffer (skb on Linux)
1582  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1583  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1584  *			needs to be set in case of CE classification support
1585  *			Is set by this macro.
1586  * @hw_classify:	This is a flag which is set to indicate
1587  *			CE classification is enabled.
1588  *			Do not set this bit for VLAN packets
1589  *			OR for mcast / bcast frames.
1590  *
1591  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1592  * whether to enable tx_classify bit in CE.
1593  *
1594  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1595  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1596  * it is the length and a 802.3 frame else it is Ethernet Type II
1597  * (RFC 894).
1598  * Bit 4 in pkt_subtype is the tx_classify bit
1599  *
1600  * Return:	void
1601  */
1602 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1603 				pkt_subtype, hw_classify)	\
1604 do {								\
1605 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1606 	uint16_t ether_type = ntohs(eh->h_proto);		\
1607 	bool is_mc_bc;						\
1608 								\
1609 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1610 		   is_multicast_ether_addr((uint8_t *)eh);	\
1611 								\
1612 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1613 		hw_classify = 1;				\
1614 		pkt_subtype = 0x01 <<				\
1615 			HTT_TX_CLASSIFY_BIT_S;			\
1616 	}							\
1617 								\
1618 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1619 		pkt_type = htt_pkt_type_ethernet;		\
1620 								\
1621 } while (0)
1622 
1623 /**
1624  * nbuf private buffer routines
1625  */
1626 
1627 /**
1628  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1629  * @skb: Pointer to network buffer
1630  * @addr: Pointer to store header's addr
1631  * @m_len: network buffer length
1632  *
1633  * Return: none
1634  */
1635 static inline void
1636 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1637 {
1638 	*addr = skb->data;
1639 	*len = skb->len;
1640 }
1641 
1642 /**
1643  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1644  * @head: Head pointer
1645  * @tail: Tail pointer
1646  * @qlen: Queue length
1647  */
1648 typedef struct __qdf_nbuf_qhead {
1649 	struct sk_buff *head;
1650 	struct sk_buff *tail;
1651 	unsigned int qlen;
1652 } __qdf_nbuf_queue_t;
1653 
1654 /******************Functions *************/
1655 
1656 /**
1657  * __qdf_nbuf_queue_init() - initiallize the queue head
1658  * @qhead: Queue head
1659  *
1660  * Return: QDF status
1661  */
1662 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1663 {
1664 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1665 	return QDF_STATUS_SUCCESS;
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1670  * @qhead: Queue head
1671  * @skb: Pointer to network buffer
1672  *
1673  * This is a lockless version, driver must acquire locks if it
1674  * needs to synchronize
1675  *
1676  * Return: none
1677  */
1678 static inline void
1679 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1680 {
1681 	skb->next = NULL;       /*Nullify the next ptr */
1682 
1683 	if (!qhead->head)
1684 		qhead->head = skb;
1685 	else
1686 		qhead->tail->next = skb;
1687 
1688 	qhead->tail = skb;
1689 	qhead->qlen++;
1690 }
1691 
1692 /**
1693  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1694  * @dest: target netbuf queue
1695  * @src:  source netbuf queue
1696  *
1697  * Return: target netbuf queue
1698  */
1699 static inline __qdf_nbuf_queue_t *
1700 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1701 {
1702 	if (!dest)
1703 		return NULL;
1704 	else if (!src || !(src->head))
1705 		return dest;
1706 
1707 	if (!(dest->head))
1708 		dest->head = src->head;
1709 	else
1710 		dest->tail->next = src->head;
1711 
1712 	dest->tail = src->tail;
1713 	dest->qlen += src->qlen;
1714 	return dest;
1715 }
1716 
1717 /**
1718  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1719  * @qhead: Queue head
1720  * @skb: Pointer to network buffer
1721  *
1722  * This is a lockless version, driver must acquire locks if it needs to
1723  * synchronize
1724  *
1725  * Return: none
1726  */
1727 static inline void
1728 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1729 {
1730 	if (!qhead->head) {
1731 		/*Empty queue Tail pointer Must be updated */
1732 		qhead->tail = skb;
1733 	}
1734 	skb->next = qhead->head;
1735 	qhead->head = skb;
1736 	qhead->qlen++;
1737 }
1738 
1739 /**
1740  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1741  * @qhead: Queue head
1742  *
1743  * This is a lockless version. Driver should take care of the locks
1744  *
1745  * Return: skb or NULL
1746  */
1747 static inline
1748 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1749 {
1750 	__qdf_nbuf_t tmp = NULL;
1751 
1752 	if (qhead->head) {
1753 		qhead->qlen--;
1754 		tmp = qhead->head;
1755 		if (qhead->head == qhead->tail) {
1756 			qhead->head = NULL;
1757 			qhead->tail = NULL;
1758 		} else {
1759 			qhead->head = tmp->next;
1760 		}
1761 		tmp->next = NULL;
1762 	}
1763 	return tmp;
1764 }
1765 
1766 /**
1767  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1768  * @qhead: head of queue
1769  *
1770  * Return: NULL if the queue is empty
1771  */
1772 static inline struct sk_buff *
1773 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1774 {
1775 	return qhead->head;
1776 }
1777 
1778 /**
1779  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1780  * @qhead: head of queue
1781  *
1782  * Return: NULL if the queue is empty
1783  */
1784 static inline struct sk_buff *
1785 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1786 {
1787 	return qhead->tail;
1788 }
1789 
1790 /**
1791  * __qdf_nbuf_queue_len() - return the queue length
1792  * @qhead: Queue head
1793  *
1794  * Return: Queue length
1795  */
1796 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1797 {
1798 	return qhead->qlen;
1799 }
1800 
1801 /**
1802  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1803  * @skb: Pointer to network buffer
1804  *
1805  * This API returns the next skb from packet chain, remember the skb is
1806  * still in the queue
1807  *
1808  * Return: NULL if no packets are there
1809  */
1810 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1811 {
1812 	return skb->next;
1813 }
1814 
1815 /**
1816  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1817  * @qhead: Queue head
1818  *
1819  * Return: true if length is 0 else false
1820  */
1821 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1822 {
1823 	return qhead->qlen == 0;
1824 }
1825 
1826 /*
1827  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1828  * Because the queue head will most likely put in some structure,
1829  * we don't use pointer type as the definition.
1830  */
1831 
1832 /*
1833  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1834  * Because the queue head will most likely put in some structure,
1835  * we don't use pointer type as the definition.
1836  */
1837 
1838 static inline void
1839 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1840 {
1841 }
1842 
1843 /**
1844  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1845  *        expands the headroom
1846  *        in the data region. In case of failure the skb is released.
1847  * @skb: sk buff
1848  * @headroom: size of headroom
1849  *
1850  * Return: skb or NULL
1851  */
1852 static inline struct sk_buff *
1853 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1854 {
1855 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1856 		dev_kfree_skb_any(skb);
1857 		skb = NULL;
1858 	}
1859 	return skb;
1860 }
1861 
1862 /**
1863  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1864  *        exapnds the tailroom
1865  *        in data region. In case of failure it releases the skb.
1866  * @skb: sk buff
1867  * @tailroom: size of tailroom
1868  *
1869  * Return: skb or NULL
1870  */
1871 static inline struct sk_buff *
1872 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1873 {
1874 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1875 		return skb;
1876 	/**
1877 	 * unlikely path
1878 	 */
1879 	dev_kfree_skb_any(skb);
1880 	return NULL;
1881 }
1882 
1883 /**
1884  * __qdf_nbuf_linearize() - skb linearize
1885  * @skb: sk buff
1886  *
1887  * create a version of the specified nbuf whose contents
1888  * can be safely modified without affecting other
1889  * users.If the nbuf is non-linear then this function
1890  * linearize. if unable to linearize returns -ENOMEM on
1891  * success 0 is returned
1892  *
1893  * Return: 0 on Success, -ENOMEM on failure is returned.
1894  */
1895 static inline int
1896 __qdf_nbuf_linearize(struct sk_buff *skb)
1897 {
1898 	return skb_linearize(skb);
1899 }
1900 
1901 /**
1902  * __qdf_nbuf_unshare() - skb unshare
1903  * @skb: sk buff
1904  *
1905  * create a version of the specified nbuf whose contents
1906  * can be safely modified without affecting other
1907  * users.If the nbuf is a clone then this function
1908  * creates a new copy of the data. If the buffer is not
1909  * a clone the original buffer is returned.
1910  *
1911  * Return: skb or NULL
1912  */
1913 static inline struct sk_buff *
1914 __qdf_nbuf_unshare(struct sk_buff *skb)
1915 {
1916 	struct sk_buff *skb_new;
1917 
1918 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
1919 
1920 	skb_new = skb_unshare(skb, GFP_ATOMIC);
1921 	if (skb_new)
1922 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
1923 
1924 	return skb_new;
1925 }
1926 
1927 /**
1928  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1929  *@buf: sk buff
1930  *
1931  * Return: true/false
1932  */
1933 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1934 {
1935 	return skb_cloned(skb);
1936 }
1937 
1938 /**
1939  * __qdf_nbuf_pool_init() - init pool
1940  * @net: net handle
1941  *
1942  * Return: QDF status
1943  */
1944 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1945 {
1946 	return QDF_STATUS_SUCCESS;
1947 }
1948 
1949 /*
1950  * adf_nbuf_pool_delete() implementation - do nothing in linux
1951  */
1952 #define __qdf_nbuf_pool_delete(osdev)
1953 
1954 /**
1955  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1956  *        release the skb.
1957  * @skb: sk buff
1958  * @headroom: size of headroom
1959  * @tailroom: size of tailroom
1960  *
1961  * Return: skb or NULL
1962  */
1963 static inline struct sk_buff *
1964 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1965 {
1966 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1967 		return skb;
1968 
1969 	dev_kfree_skb_any(skb);
1970 	return NULL;
1971 }
1972 
1973 /**
1974  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1975  * @buf: Network buf instance
1976  * @headroom: Additional headroom to be added
1977  * @tailroom: Additional tailroom to be added
1978  *
1979  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1980  *	or NULL if there is no memory
1981  */
1982 static inline struct sk_buff *
1983 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1984 {
1985 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1986 }
1987 
1988 /**
1989  * __qdf_nbuf_has_fraglist() - check buf has fraglist
1990  * @buf: Network buf instance
1991  *
1992  * Return: True, if buf has frag_list else return False
1993  */
1994 static inline bool
1995 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
1996 {
1997 	return skb_has_frag_list(buf);
1998 }
1999 
2000 /**
2001  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2002  * @buf: Network buf instance
2003  *
2004  * Return: Network buf instance
2005  */
2006 static inline struct sk_buff *
2007 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2008 {
2009 	struct sk_buff *list;
2010 
2011 	if (!__qdf_nbuf_has_fraglist(buf))
2012 		return NULL;
2013 
2014 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2015 		;
2016 
2017 	return list;
2018 }
2019 
2020 /**
2021  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2022  * @buf: Network buf instance
2023  *
2024  * Return: void
2025  */
2026 static inline void
2027 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2028 {
2029 	struct sk_buff *list;
2030 
2031 	skb_walk_frags(buf, list)
2032 		skb_get(list);
2033 }
2034 
2035 /**
2036  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2037  *
2038  * Return: true/false
2039  */
2040 static inline bool
2041 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2042 			 uint8_t **where)
2043 {
2044 	qdf_assert(0);
2045 	return false;
2046 }
2047 
2048 /**
2049  * __qdf_nbuf_reset_ctxt() - mem zero control block
2050  * @nbuf: buffer
2051  *
2052  * Return: none
2053  */
2054 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2055 {
2056 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2057 }
2058 
2059 /**
2060  * __qdf_nbuf_network_header() - get network header
2061  * @buf: buffer
2062  *
2063  * Return: network header pointer
2064  */
2065 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2066 {
2067 	return skb_network_header(buf);
2068 }
2069 
2070 /**
2071  * __qdf_nbuf_transport_header() - get transport header
2072  * @buf: buffer
2073  *
2074  * Return: transport header pointer
2075  */
2076 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2077 {
2078 	return skb_transport_header(buf);
2079 }
2080 
2081 /**
2082  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2083  *  passed as part of network buffer by network stack
2084  * @skb: sk buff
2085  *
2086  * Return: TCP MSS size
2087  *
2088  */
2089 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2090 {
2091 	return skb_shinfo(skb)->gso_size;
2092 }
2093 
2094 /**
2095  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2096  * @nbuf: sk buff
2097  *
2098  * Return: none
2099  */
2100 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2101 
2102 /*
2103  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2104  * @nbuf: sk buff
2105  *
2106  * Return: void ptr
2107  */
2108 static inline void *
2109 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2110 {
2111 	return (void *)nbuf->cb;
2112 }
2113 
2114 /**
2115  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2116  * @skb: sk buff
2117  *
2118  * Return: head size
2119  */
2120 static inline size_t
2121 __qdf_nbuf_headlen(struct sk_buff *skb)
2122 {
2123 	return skb_headlen(skb);
2124 }
2125 
2126 /**
2127  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2128  * @buf: sk buff
2129  *
2130  * Return: true/false
2131  */
2132 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2133 {
2134 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2135 }
2136 
2137 /**
2138  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2139  * @buf: sk buff
2140  *
2141  * Return: true/false
2142  */
2143 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2144 {
2145 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2146 }
2147 
2148 /**
2149  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2150  * @skb: sk buff
2151  *
2152  * Return: size of l2+l3+l4 header length
2153  */
2154 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2155 {
2156 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2157 }
2158 
2159 /**
2160  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2161  * @buf: sk buff
2162  *
2163  * Return:  true/false
2164  */
2165 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2166 {
2167 	if (skb_is_nonlinear(skb))
2168 		return true;
2169 	else
2170 		return false;
2171 }
2172 
2173 /**
2174  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2175  * @buf: sk buff
2176  *
2177  * Return: TCP sequence number
2178  */
2179 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2180 {
2181 	return ntohl(tcp_hdr(skb)->seq);
2182 }
2183 
2184 /**
2185  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2186  *@buf: sk buff
2187  *
2188  * Return: data pointer to typecast into your priv structure
2189  */
2190 static inline uint8_t *
2191 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2192 {
2193 	return &skb->cb[8];
2194 }
2195 
2196 /**
2197  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2198  * @buf: Pointer to nbuf
2199  *
2200  * Return: None
2201  */
2202 static inline void
2203 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2204 {
2205 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2206 }
2207 
2208 /**
2209  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2210  *
2211  * @buf: sk buff
2212  * @queue_id: Queue id
2213  *
2214  * Return: void
2215  */
2216 static inline void
2217 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2218 {
2219 	skb_record_rx_queue(skb, queue_id);
2220 }
2221 
2222 /**
2223  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2224  *
2225  * @buf: sk buff
2226  *
2227  * Return: Queue mapping
2228  */
2229 static inline uint16_t
2230 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2231 {
2232 	return skb->queue_mapping;
2233 }
2234 
2235 /**
2236  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2237  *
2238  * @buf: sk buff
2239  *
2240  * Return: void
2241  */
2242 static inline void
2243 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2244 {
2245 	__net_timestamp(skb);
2246 }
2247 
2248 /**
2249  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2250  *
2251  * @buf: sk buff
2252  *
2253  * Return: timestamp stored in skb in ms
2254  */
2255 static inline uint64_t
2256 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2257 {
2258 	return ktime_to_ms(skb_get_ktime(skb));
2259 }
2260 
2261 /**
2262  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2263  *
2264  * @buf: sk buff
2265  *
2266  * Return: time difference in ms
2267  */
2268 static inline uint64_t
2269 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2270 {
2271 	return ktime_to_ms(net_timedelta(skb->tstamp));
2272 }
2273 
2274 /**
2275  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2276  *
2277  * @buf: sk buff
2278  *
2279  * Return: time difference in micro seconds
2280  */
2281 static inline uint64_t
2282 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2283 {
2284 	return ktime_to_us(net_timedelta(skb->tstamp));
2285 }
2286 
2287 /**
2288  * __qdf_nbuf_orphan() - orphan a nbuf
2289  * @skb: sk buff
2290  *
2291  * If a buffer currently has an owner then we call the
2292  * owner's destructor function
2293  *
2294  * Return: void
2295  */
2296 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2297 {
2298 	return skb_orphan(skb);
2299 }
2300 
2301 /**
2302  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2303  * head pointer to end pointer
2304  * @nbuf: qdf_nbuf_t
2305  *
2306  * Return: size of network buffer from head pointer to end
2307  * pointer
2308  */
2309 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2310 {
2311 	return skb_end_offset(nbuf);
2312 }
2313 
2314 /**
2315  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2316  * including the header and variable data area
2317  * @skb: sk buff
2318  *
2319  * Return: size of network buffer
2320  */
2321 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2322 {
2323 	return skb->truesize;
2324 }
2325 
2326 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2327 /**
2328  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2329  * from the total skb mem and DP tx/rx skb mem
2330  * @nbytes: number of bytes
2331  * @dir: direction
2332  * @is_mapped: is mapped or unmapped memory
2333  *
2334  * Return: none
2335  */
2336 static inline void __qdf_record_nbuf_nbytes(
2337 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2338 {
2339 	if (is_mapped) {
2340 		if (dir == QDF_DMA_TO_DEVICE) {
2341 			qdf_mem_dp_tx_skb_cnt_inc();
2342 			qdf_mem_dp_tx_skb_inc(nbytes);
2343 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2344 			qdf_mem_dp_rx_skb_cnt_inc();
2345 			qdf_mem_dp_rx_skb_inc(nbytes);
2346 		}
2347 		qdf_mem_skb_total_inc(nbytes);
2348 	} else {
2349 		if (dir == QDF_DMA_TO_DEVICE) {
2350 			qdf_mem_dp_tx_skb_cnt_dec();
2351 			qdf_mem_dp_tx_skb_dec(nbytes);
2352 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2353 			qdf_mem_dp_rx_skb_cnt_dec();
2354 			qdf_mem_dp_rx_skb_dec(nbytes);
2355 		}
2356 		qdf_mem_skb_total_dec(nbytes);
2357 	}
2358 }
2359 
2360 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2361 static inline void __qdf_record_nbuf_nbytes(
2362 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2363 {
2364 }
2365 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2366 
2367 /**
2368  * __qdf_nbuf_map_nbytes_single() - map nbytes
2369  * @osdev: os device
2370  * @buf: buffer
2371  * @dir: direction
2372  * @nbytes: number of bytes
2373  *
2374  * Return: QDF_STATUS
2375  */
2376 #ifdef A_SIMOS_DEVHOST
2377 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2378 		qdf_device_t osdev, struct sk_buff *buf,
2379 		qdf_dma_dir_t dir, int nbytes)
2380 {
2381 	qdf_dma_addr_t paddr;
2382 
2383 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2384 	return QDF_STATUS_SUCCESS;
2385 }
2386 #else
2387 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2388 		qdf_device_t osdev, struct sk_buff *buf,
2389 		qdf_dma_dir_t dir, int nbytes)
2390 {
2391 	qdf_dma_addr_t paddr;
2392 	QDF_STATUS ret;
2393 
2394 	/* assume that the OS only provides a single fragment */
2395 	QDF_NBUF_CB_PADDR(buf) = paddr =
2396 		dma_map_single(osdev->dev, buf->data,
2397 			       nbytes, __qdf_dma_dir_to_os(dir));
2398 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2399 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2400 	if (QDF_IS_STATUS_SUCCESS(ret))
2401 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2402 					 dir, true);
2403 	return ret;
2404 }
2405 #endif
2406 /**
2407  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2408  * @osdev: os device
2409  * @buf: buffer
2410  * @dir: direction
2411  * @nbytes: number of bytes
2412  *
2413  * Return: none
2414  */
2415 #if defined(A_SIMOS_DEVHOST)
2416 static inline void
2417 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2418 			       qdf_dma_dir_t dir, int nbytes)
2419 {
2420 }
2421 
2422 #else
2423 static inline void
2424 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2425 			       qdf_dma_dir_t dir, int nbytes)
2426 {
2427 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2428 
2429 	if (qdf_likely(paddr)) {
2430 		__qdf_record_nbuf_nbytes(
2431 			__qdf_nbuf_get_end_offset(buf), dir, false);
2432 		dma_unmap_single(osdev->dev, paddr, nbytes,
2433 				 __qdf_dma_dir_to_os(dir));
2434 		return;
2435 	}
2436 }
2437 #endif
2438 
2439 static inline struct sk_buff *
2440 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2441 {
2442 	return skb_dequeue(skb_queue_head);
2443 }
2444 
2445 static inline
2446 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2447 {
2448 	return skb_queue_head->qlen;
2449 }
2450 
2451 static inline
2452 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2453 					struct sk_buff *skb)
2454 {
2455 	return skb_queue_tail(skb_queue_head, skb);
2456 }
2457 
2458 static inline
2459 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2460 {
2461 	return skb_queue_head_init(skb_queue_head);
2462 }
2463 
2464 static inline
2465 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2466 {
2467 	return skb_queue_purge(skb_queue_head);
2468 }
2469 
2470 /**
2471  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2472  * @head: skb list for which lock is to be acquired
2473  *
2474  * Return: void
2475  */
2476 static inline
2477 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2478 {
2479 	spin_lock_bh(&skb_queue_head->lock);
2480 }
2481 
2482 /**
2483  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2484  * @head: skb list for which lock is to be release
2485  *
2486  * Return: void
2487  */
2488 static inline
2489 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2490 {
2491 	spin_unlock_bh(&skb_queue_head->lock);
2492 }
2493 
2494 /**
2495  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2496  * @nbuf: qdf_nbuf_t
2497  * @idx: Index for which frag size is requested
2498  *
2499  * Return: Frag size
2500  */
2501 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2502 							   uint8_t idx)
2503 {
2504 	unsigned int size = 0;
2505 
2506 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2507 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2508 	return size;
2509 }
2510 
2511 /**
2512  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2513  * @nbuf: qdf_nbuf_t
2514  * @idx: Index for which frag address is requested
2515  *
2516  * Return: Frag address in success, else NULL
2517  */
2518 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2519 						    uint8_t idx)
2520 {
2521 	__qdf_frag_t frag_addr = NULL;
2522 
2523 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2524 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2525 	return frag_addr;
2526 }
2527 
2528 /**
2529  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2530  * @nbuf: qdf_nbuf_t
2531  * @idx: Frag index
2532  * @size: Size by which frag_size needs to be increased/decreased
2533  *        +Ve means increase, -Ve means decrease
2534  * @truesize: truesize
2535  */
2536 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2537 						 int size,
2538 						 unsigned int truesize)
2539 {
2540 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2541 }
2542 
2543 /**
2544  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2545  *          and adjust length by size.
2546  * @nbuf: qdf_nbuf_t
2547  * @idx: Frag index
2548  * @offset: Frag page offset should be moved by offset.
2549  *      +Ve - Move offset forward.
2550  *      -Ve - Move offset backward.
2551  *
2552  * Return: QDF_STATUS
2553  */
2554 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2555 					    int offset);
2556 
2557 /**
2558  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2559  * @buf: Frag pointer needs to be added in nbuf frag
2560  * @nbuf: qdf_nbuf_t where frag will be added
2561  * @offset: Offset in frag to be added to nbuf_frags
2562  * @frag_len: Frag length
2563  * @truesize: truesize
2564  * @take_frag_ref: Whether to take ref for frag or not
2565  *      This bool must be set as per below comdition:
2566  *      1. False: If this frag is being added in any nbuf
2567  *              for the first time after allocation.
2568  *      2. True: If frag is already attached part of any
2569  *              nbuf.
2570  *
2571  * It takes ref_count based on boolean flag take_frag_ref
2572  */
2573 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2574 			    int offset, int frag_len,
2575 			    unsigned int truesize, bool take_frag_ref);
2576 
2577 /**
2578  * __qdf_nbuf_ref_frag() - get frag reference
2579  *
2580  * Return: void
2581  */
2582 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2583 
2584 /**
2585  * __qdf_nbuf_set_mark() - Set nbuf mark
2586  * @buf: Pointer to nbuf
2587  * @mark: Value to set mark
2588  *
2589  * Return: None
2590  */
2591 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2592 {
2593 	buf->mark = mark;
2594 }
2595 
2596 /**
2597  * __qdf_nbuf_get_mark() - Get nbuf mark
2598  * @buf: Pointer to nbuf
2599  *
2600  * Return: Value of mark
2601  */
2602 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2603 {
2604 	return buf->mark;
2605 }
2606 
2607 /**
2608  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2609  * the data pointer to the end pointer
2610  * @nbuf: qdf_nbuf_t
2611  *
2612  * Return: size of skb from data pointer to end pointer
2613  */
2614 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2615 {
2616 	return (skb_end_pointer(nbuf) - nbuf->data);
2617 }
2618 
2619 /**
2620  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2621  * @skb: Pointer to network buffer
2622  *
2623  * Return: Return the number of gso segments
2624  */
2625 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2626 {
2627 	return skb_shinfo(skb)->gso_segs;
2628 }
2629 
2630 /*
2631  * __qdf_nbuf_net_timedelta() - get time delta
2632  * @t: time as __qdf_ktime_t object
2633  *
2634  * Return: time delta as ktime_t object
2635  */
2636 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
2637 {
2638 	return net_timedelta(t);
2639 }
2640 
2641 #ifdef CONFIG_NBUF_AP_PLATFORM
2642 #include <i_qdf_nbuf_w.h>
2643 #else
2644 #include <i_qdf_nbuf_m.h>
2645 #endif
2646 #endif /*_I_QDF_NET_BUF_H */
2647