xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 4cfc54cf60be58b902e9fd31baa5eac56a9085a7)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 /*
45  * Use socket buffer as the underlying implementation as skbuf .
46  * Linux use sk_buff to represent both packet and data,
47  * so we use sk_buffer to represent both skbuf .
48  */
49 typedef struct sk_buff *__qdf_nbuf_t;
50 
51 /**
52  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
53  *
54  * This is used for skb queue management via linux skb buff head APIs
55  */
56 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
57 
58 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
59 
60 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
61 
62 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
63  * max tx fragments added by the driver
64  * The driver will always add one tx fragment (the tx descriptor)
65  */
66 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
67 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
68 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
69 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
70 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
71 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
72 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
73 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
74 
75 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
76 
77 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
78 #define IEEE80211_RADIOTAP_HE 23
79 #define IEEE80211_RADIOTAP_HE_MU 24
80 #endif
81 
82 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
83 
84 #define IEEE80211_RADIOTAP_EXT1_USIG	1
85 #define IEEE80211_RADIOTAP_EXT1_EHT	2
86 
87 /* mark the first packet after wow wakeup */
88 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
89 
90 /* TCP Related MASK */
91 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
92 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
93 #define QDF_NBUF_PKT_TCPOP_RST			0x04
94 
95 /*
96  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
97  */
98 typedef union {
99 	uint64_t       u64;
100 	qdf_dma_addr_t dma_addr;
101 } qdf_paddr_t;
102 
103 /**
104  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
105  *                    - data passed between layers of the driver.
106  *
107  * Notes:
108  *   1. Hard limited to 48 bytes. Please count your bytes
109  *   2. The size of this structure has to be easily calculatable and
110  *      consistently so: do not use any conditional compile flags
111  *   3. Split into a common part followed by a tx/rx overlay
112  *   4. There is only one extra frag, which represents the HTC/HTT header
113  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
114  *      for the priv_cb_w since it must be at same offset for both
115  *      TX and RX union
116  *   6. "ipa.owned" bit must be first member in both TX and RX unions
117  *      for the priv_cb_m since it must be at same offset for both
118  *      TX and RX union.
119  *
120  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
121  *
122  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
123  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
124  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
125  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
126  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
127  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
128  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
129  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
130  *
131  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
132  * @rx.dev.priv_cb_m.flush_ind: flush indication
133  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
134  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
135  * @rx.dev.priv_cb_m.exc_frm: exception frame
136  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
137  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
138 					     sw execption bit from ring desc
139  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
140  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
141  * @rx.dev.priv_cb_m.lro_ctx: LRO context
142  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
143  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
144  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
145  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
146  *
147  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
148  * @rx.tcp_proto: L4 protocol is TCP
149  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
150  * @rx.ipv6_proto: L3 protocol is IPV6
151  * @rx.ip_offset: offset to IP header
152  * @rx.tcp_offset: offset to TCP header
153  * @rx_ctx_id: Rx context id
154  * @num_elements_in_list: number of elements in the nbuf list
155  *
156  * @rx.tcp_udp_chksum: L4 payload checksum
157  * @rx.tcp_wim: TCP window size
158  *
159  * @rx.flow_id: 32bit flow id
160  *
161  * @rx.flag_chfrag_start: first MSDU in an AMSDU
162  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
163  * @rx.flag_chfrag_end: last MSDU in an AMSDU
164  * @rx.flag_retry: flag to indicate MSDU is retried
165  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
166  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
167  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
168  * @rx.flag_is_frag: flag to indicate skb has frag list
169  * @rx.rsrvd: reserved
170  *
171  * @rx.trace: combined structure for DP and protocol trace
172  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
173  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
174  * @rx.trace.dp_trace: flag (Datapath trace)
175  * @rx.trace.packet_track: RX_DATA packet
176  * @rx.trace.rsrvd: enable packet logging
177  *
178  * @rx.vdev_id: vdev_id for RX pkt
179  * @rx.is_raw_frame: RAW frame
180  * @rx.fcs_err: FCS error
181  * @rx.tid_val: tid value
182  * @rx.reserved: reserved
183  * @rx.ftype: mcast2ucast, TSO, SG, MESH
184  *
185  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
186  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
187  *
188  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
189  *                 + (1) CE classification enablement bit
190  *                 + (2) packet type (802.3 or Ethernet type II)
191  *                 + (3) packet offset (usually length of HTC/HTT descr)
192  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
193  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
194  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
195  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
196  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
197  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
198  * @tx.dev.priv_cb_m.reserved: reserved
199  *
200  * @tx.ftype: mcast2ucast, TSO, SG, MESH
201  * @tx.vdev_id: vdev (for protocol trace)
202  * @tx.len: length of efrag pointed by the above pointers
203  *
204  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
205  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
206  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
207  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
208  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
209  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
210  * @tx.flags.bits.flag_ext_header: extended flags
211  * @tx.flags.bits.is_critical: flag indicating a critical frame
212  * @tx.trace: combined structure for DP and protocol trace
213  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
214  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
215  * @tx.trace.is_packet_priv:
216  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
217  * @tx.trace.to_fw: Flag to indicate send this packet to FW
218  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
219  *                          + (MGMT_ACTION)] - 4 bits
220  * @tx.trace.dp_trace: flag (Datapath trace)
221  * @tx.trace.is_bcast: flag (Broadcast packet)
222  * @tx.trace.is_mcast: flag (Multicast packet)
223  * @tx.trace.packet_type: flag (Packet type)
224  * @tx.trace.htt2_frm: flag (high-latency path only)
225  * @tx.trace.print: enable packet logging
226  *
227  * @tx.vaddr: virtual address of ~
228  * @tx.paddr: physical/DMA address of ~
229  */
230 struct qdf_nbuf_cb {
231 	/* common */
232 	qdf_paddr_t paddr; /* of skb->data */
233 	/* valid only in one direction */
234 	union {
235 		/* Note: MAX: 40 bytes */
236 		struct {
237 			union {
238 				struct {
239 					void *ext_cb_ptr;
240 					void *fctx;
241 					uint16_t msdu_len : 14,
242 						 flag_intra_bss : 1,
243 						 ipa_smmu_map : 1;
244 					uint16_t peer_id;
245 					uint16_t protocol_tag;
246 					uint16_t flow_tag;
247 				} priv_cb_w;
248 				struct {
249 					/* ipa_owned bit is common between rx
250 					 * control block and tx control block.
251 					 * Do not change location of this bit.
252 					 */
253 					uint32_t ipa_owned:1,
254 						 peer_cached_buf_frm:1,
255 						 flush_ind:1,
256 						 packet_buf_pool:1,
257 						 l3_hdr_pad:3,
258 						 /* exception frame flag */
259 						 exc_frm:1,
260 						 ipa_smmu_map:1,
261 						 reo_dest_ind_or_sw_excpt:5,
262 						 reserved:2,
263 						 reserved1:16;
264 					uint32_t tcp_seq_num;
265 					uint32_t tcp_ack_num;
266 					union {
267 						struct {
268 							uint16_t msdu_len;
269 							uint16_t peer_id;
270 						} wifi3;
271 						struct {
272 							uint32_t map_index;
273 						} wifi2;
274 					} dp;
275 					unsigned char *lro_ctx;
276 				} priv_cb_m;
277 			} dev;
278 			uint32_t lro_eligible:1,
279 				tcp_proto:1,
280 				tcp_pure_ack:1,
281 				ipv6_proto:1,
282 				ip_offset:7,
283 				tcp_offset:7,
284 				rx_ctx_id:4,
285 				fcs_err:1,
286 				is_raw_frame:1,
287 				num_elements_in_list:8;
288 			uint32_t tcp_udp_chksum:16,
289 				 tcp_win:16;
290 			uint32_t flow_id;
291 			uint8_t flag_chfrag_start:1,
292 				flag_chfrag_cont:1,
293 				flag_chfrag_end:1,
294 				flag_retry:1,
295 				flag_da_mcbc:1,
296 				flag_da_valid:1,
297 				flag_sa_valid:1,
298 				flag_is_frag:1;
299 			union {
300 				uint8_t packet_state;
301 				uint8_t dp_trace:1,
302 					packet_track:3,
303 					rsrvd:4;
304 			} trace;
305 			uint16_t vdev_id:8,
306 				 tid_val:4,
307 				 ftype:4;
308 		} rx;
309 
310 		/* Note: MAX: 40 bytes */
311 		struct {
312 			union {
313 				struct {
314 					void *ext_cb_ptr;
315 					void *fctx;
316 				} priv_cb_w;
317 				struct {
318 					/* ipa_owned bit is common between rx
319 					 * control block and tx control block.
320 					 * Do not change location of this bit.
321 					 */
322 					struct {
323 						uint32_t owned:1,
324 							priv:31;
325 					} ipa;
326 					uint32_t data_attr;
327 					uint16_t desc_id;
328 					uint16_t mgmt_desc_id;
329 					struct {
330 						uint8_t bi_map:1,
331 							reserved:7;
332 					} dma_option;
333 					uint8_t flag_notify_comp:1,
334 						rsvd:7;
335 					uint8_t reserved[2];
336 				} priv_cb_m;
337 			} dev;
338 			uint8_t ftype;
339 			uint8_t vdev_id;
340 			uint16_t len;
341 			union {
342 				struct {
343 					uint8_t flag_efrag:1,
344 						flag_nbuf:1,
345 						num:1,
346 						flag_chfrag_start:1,
347 						flag_chfrag_cont:1,
348 						flag_chfrag_end:1,
349 						flag_ext_header:1,
350 						is_critical:1;
351 				} bits;
352 				uint8_t u8;
353 			} flags;
354 			struct {
355 				uint8_t packet_state:7,
356 					is_packet_priv:1;
357 				uint8_t packet_track:3,
358 					to_fw:1,
359 					proto_type:4;
360 				uint8_t dp_trace:1,
361 					is_bcast:1,
362 					is_mcast:1,
363 					packet_type:3,
364 					/* used only for hl*/
365 					htt2_frm:1,
366 					print:1;
367 			} trace;
368 			unsigned char *vaddr;
369 			qdf_paddr_t paddr;
370 		} tx;
371 	} u;
372 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
373 
374 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
375 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
376 			(sizeof(struct qdf_nbuf_cb)) <=
377 			sizeof_field(struct sk_buff, cb));
378 #else
379 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
380 			(sizeof(struct qdf_nbuf_cb)) <=
381 			FIELD_SIZEOF(struct sk_buff, cb));
382 #endif
383 
384 /**
385  *  access macros to qdf_nbuf_cb
386  *  Note: These macros can be used as L-values as well as R-values.
387  *        When used as R-values, they effectively function as "get" macros
388  *        When used as L_values, they effectively function as "set" macros
389  */
390 
391 #define QDF_NBUF_CB_PADDR(skb) \
392 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
393 
394 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
395 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
396 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
398 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
399 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
400 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
401 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
402 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
403 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
404 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
405 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
406 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
407 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
408 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
409 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
410 
411 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
412 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
413 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
414 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
415 
416 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
417 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
418 
419 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
420 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
421 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
422 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
423 
424 #define QDF_NBUF_CB_RX_FTYPE(skb) \
425 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
426 
427 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
428 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
429 
430 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
431 	(((struct qdf_nbuf_cb *) \
432 	((skb)->cb))->u.rx.flag_chfrag_start)
433 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
434 	(((struct qdf_nbuf_cb *) \
435 	((skb)->cb))->u.rx.flag_chfrag_cont)
436 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
437 		(((struct qdf_nbuf_cb *) \
438 		((skb)->cb))->u.rx.flag_chfrag_end)
439 
440 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
441 	(((struct qdf_nbuf_cb *) \
442 	((skb)->cb))->u.rx.flag_da_mcbc)
443 
444 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
445 	(((struct qdf_nbuf_cb *) \
446 	((skb)->cb))->u.rx.flag_da_valid)
447 
448 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
449 	(((struct qdf_nbuf_cb *) \
450 	((skb)->cb))->u.rx.flag_sa_valid)
451 
452 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
453 	(((struct qdf_nbuf_cb *) \
454 	((skb)->cb))->u.rx.flag_retry)
455 
456 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 	((skb)->cb))->u.rx.is_raw_frame)
459 
460 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
461 	(((struct qdf_nbuf_cb *) \
462 	((skb)->cb))->u.rx.tid_val)
463 
464 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
465 	(((struct qdf_nbuf_cb *) \
466 	((skb)->cb))->u.rx.flag_is_frag)
467 
468 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
469 	(((struct qdf_nbuf_cb *) \
470 	((skb)->cb))->u.rx.fcs_err)
471 
472 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
473 	qdf_nbuf_set_state(skb, PACKET_STATE)
474 
475 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
476 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
477 
478 #define QDF_NBUF_CB_TX_FTYPE(skb) \
479 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
480 
481 
482 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
483 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
484 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
485 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
486 
487 /* Tx Flags Accessor Macros*/
488 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
489 	(((struct qdf_nbuf_cb *) \
490 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
491 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
492 	(((struct qdf_nbuf_cb *) \
493 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
494 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
495 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
496 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
497 	(((struct qdf_nbuf_cb *) \
498 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
499 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
500 	(((struct qdf_nbuf_cb *) \
501 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
502 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
503 		(((struct qdf_nbuf_cb *) \
504 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
505 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
506 		(((struct qdf_nbuf_cb *) \
507 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
508 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
509 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
510 
511 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
512 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
513 /* End of Tx Flags Accessor Macros */
514 
515 /* Tx trace accessor macros */
516 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
517 	(((struct qdf_nbuf_cb *) \
518 		((skb)->cb))->u.tx.trace.packet_state)
519 
520 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
521 	(((struct qdf_nbuf_cb *) \
522 		((skb)->cb))->u.tx.trace.is_packet_priv)
523 
524 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
525 	(((struct qdf_nbuf_cb *) \
526 		((skb)->cb))->u.tx.trace.packet_track)
527 
528 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
529 	(((struct qdf_nbuf_cb *) \
530 		((skb)->cb))->u.tx.trace.to_fw)
531 
532 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
533 		(((struct qdf_nbuf_cb *) \
534 			((skb)->cb))->u.rx.trace.packet_track)
535 
536 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
537 	(((struct qdf_nbuf_cb *) \
538 		((skb)->cb))->u.tx.trace.proto_type)
539 
540 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
541 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
542 
543 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
544 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
545 
546 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
547 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
548 
549 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
550 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
551 
552 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
553 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
554 
555 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
556 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
557 
558 #define QDF_NBUF_CB_SET_BCAST(skb) \
559 	(((struct qdf_nbuf_cb *) \
560 		((skb)->cb))->u.tx.trace.is_bcast = true)
561 
562 #define QDF_NBUF_CB_SET_MCAST(skb) \
563 	(((struct qdf_nbuf_cb *) \
564 		((skb)->cb))->u.tx.trace.is_mcast = true)
565 /* End of Tx trace accessor macros */
566 
567 
568 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
569 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
570 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
571 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
572 
573 /* assume the OS provides a single fragment */
574 #define __qdf_nbuf_get_num_frags(skb)		   \
575 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
576 
577 #define __qdf_nbuf_reset_num_frags(skb) \
578 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
579 
580 /**
581  *   end of nbuf->cb access macros
582  */
583 
584 typedef void (*qdf_nbuf_trace_update_t)(char *);
585 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
586 
587 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
588 
589 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
590 	(QDF_NBUF_CB_PADDR(skb) = paddr)
591 
592 #define __qdf_nbuf_frag_push_head(					\
593 	skb, frag_len, frag_vaddr, frag_paddr)				\
594 	do {					\
595 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
596 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
597 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
598 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
599 	} while (0)
600 
601 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
602 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
603 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
604 
605 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
606 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
607 
608 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
609 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
610 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
611 	 /* assume that the OS only provides a single fragment */	\
612 	 QDF_NBUF_CB_PADDR(skb))
613 
614 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
615 
616 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
617 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
618 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
619 
620 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
621 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
622 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
623 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
624 
625 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
626 	do {								\
627 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
628 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
629 		if (frag_num)						\
630 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
631 							      is_wstrm; \
632 		else					\
633 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
634 							      is_wstrm; \
635 	} while (0)
636 
637 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
638 	do { \
639 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
640 	} while (0)
641 
642 #define __qdf_nbuf_get_vdev_ctx(skb) \
643 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
644 
645 #define __qdf_nbuf_set_tx_ftype(skb, type) \
646 	do { \
647 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
648 	} while (0)
649 
650 #define __qdf_nbuf_get_tx_ftype(skb) \
651 		 QDF_NBUF_CB_TX_FTYPE((skb))
652 
653 
654 #define __qdf_nbuf_set_rx_ftype(skb, type) \
655 	do { \
656 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
657 	} while (0)
658 
659 #define __qdf_nbuf_get_rx_ftype(skb) \
660 		 QDF_NBUF_CB_RX_FTYPE((skb))
661 
662 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
663 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
664 
665 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
666 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
667 
668 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
669 	do { \
670 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
671 	} while (0)
672 
673 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
674 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
675 
676 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
677 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
678 
679 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
680 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
681 
682 #define __qdf_nbuf_set_da_mcbc(skb, val) \
683 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
684 
685 #define __qdf_nbuf_is_da_mcbc(skb) \
686 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
687 
688 #define __qdf_nbuf_set_da_valid(skb, val) \
689 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
690 
691 #define __qdf_nbuf_is_da_valid(skb) \
692 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
693 
694 #define __qdf_nbuf_set_sa_valid(skb, val) \
695 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
696 
697 #define __qdf_nbuf_is_sa_valid(skb) \
698 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
699 
700 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
701 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
702 
703 #define __qdf_nbuf_is_rx_retry_flag(skb) \
704 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
705 
706 #define __qdf_nbuf_set_raw_frame(skb, val) \
707 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
708 
709 #define __qdf_nbuf_is_raw_frame(skb) \
710 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
711 
712 #define __qdf_nbuf_get_tid_val(skb) \
713 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
714 
715 #define __qdf_nbuf_set_tid_val(skb, val) \
716 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
717 
718 #define __qdf_nbuf_set_is_frag(skb, val) \
719 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
720 
721 #define __qdf_nbuf_is_frag(skb) \
722 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
723 
724 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
725 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
726 
727 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
728 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
729 
730 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
731 	do { \
732 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
733 	} while (0)
734 
735 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
736 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
737 
738 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
739 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
740 
741 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
742 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
743 
744 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
745 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
746 
747 #define __qdf_nbuf_trace_get_proto_type(skb) \
748 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
749 
750 #define __qdf_nbuf_data_attr_get(skb)		\
751 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
752 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
753 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
754 
755 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
756 		skb_queue_walk_safe(queue, var, tvar)
757 
758 /**
759  * __qdf_nbuf_num_frags_init() - init extra frags
760  * @skb: sk buffer
761  *
762  * Return: none
763  */
764 static inline
765 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
766 {
767 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
768 }
769 
770 /*
771  * prototypes. Implemented in qdf_nbuf.c
772  */
773 
774 /**
775  * __qdf_nbuf_alloc() - Allocate nbuf
776  * @osdev: Device handle
777  * @size: Netbuf requested size
778  * @reserve: headroom to start with
779  * @align: Align
780  * @prio: Priority
781  * @func: Function name of the call site
782  * @line: line number of the call site
783  *
784  * This allocates an nbuf aligns if needed and reserves some space in the front,
785  * since the reserve is done after alignment the reserve value if being
786  * unaligned will result in an unaligned address.
787  *
788  * Return: nbuf or %NULL if no memory
789  */
790 __qdf_nbuf_t
791 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
792 		 int prio, const char *func, uint32_t line);
793 
794 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
795 				     const char *func, uint32_t line);
796 
797 /**
798  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
799  * @size: Size to be allocated for skb
800  * @reserve: Reserve headroom size
801  * @align: Align data
802  * @func: Function name of the call site
803  * @line: Line number of the callsite
804  *
805  * This API allocates a nbuf and aligns it if needed and reserves some headroom
806  * space after the alignment where nbuf is not allocated from skb recycler pool.
807  *
808  * Return: Allocated nbuf pointer
809  */
810 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
811 					  const char *func, uint32_t line);
812 
813 /**
814  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
815  * @skb: Pointer to network buffer
816  *
817  * if GFP_ATOMIC is overkill then we can check whether its
818  * called from interrupt context and then do it or else in
819  * normal case use GFP_KERNEL
820  *
821  * example     use "in_irq() || irqs_disabled()"
822  *
823  * Return: cloned skb
824  */
825 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
826 
827 void __qdf_nbuf_free(struct sk_buff *skb);
828 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
829 			struct sk_buff *skb, qdf_dma_dir_t dir);
830 void __qdf_nbuf_unmap(__qdf_device_t osdev,
831 			struct sk_buff *skb, qdf_dma_dir_t dir);
832 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
833 				 struct sk_buff *skb, qdf_dma_dir_t dir);
834 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
835 			struct sk_buff *skb, qdf_dma_dir_t dir);
836 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
837 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
838 
839 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
840 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
841 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
842 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
843 	qdf_dma_dir_t dir, int nbytes);
844 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
845 	qdf_dma_dir_t dir, int nbytes);
846 
847 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
848 	qdf_dma_dir_t dir);
849 
850 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
851 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
852 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
853 QDF_STATUS __qdf_nbuf_frag_map(
854 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
855 	int offset, qdf_dma_dir_t dir, int cur_frag);
856 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
857 
858 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
859 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
860 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
861 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
862 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
863 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
864 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
865 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
866 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
867 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
868 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
869 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
870 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
871 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
872 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
873 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
874 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
875 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
876 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
877 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
878 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
879 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
880 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
881 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
882 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
883 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
884 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
885 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
886 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
887 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
888 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
889 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
890 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
891 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
892 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
893 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
894 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
895 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
896 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
897 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
898 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
899 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
900 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
901 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
902 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
903 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
904 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
905 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
906 
907 #ifdef QDF_NBUF_GLOBAL_COUNT
908 int __qdf_nbuf_count_get(void);
909 void __qdf_nbuf_count_inc(struct sk_buff *skb);
910 void __qdf_nbuf_count_dec(struct sk_buff *skb);
911 void __qdf_nbuf_mod_init(void);
912 void __qdf_nbuf_mod_exit(void);
913 
914 #else
915 
916 static inline int __qdf_nbuf_count_get(void)
917 {
918 	return 0;
919 }
920 
921 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
922 {
923 	return;
924 }
925 
926 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
927 {
928 	return;
929 }
930 
931 static inline void __qdf_nbuf_mod_init(void)
932 {
933 	return;
934 }
935 
936 static inline void __qdf_nbuf_mod_exit(void)
937 {
938 	return;
939 }
940 #endif
941 
942 /**
943  * __qdf_to_status() - OS to QDF status conversion
944  * @error : OS error
945  *
946  * Return: QDF status
947  */
948 static inline QDF_STATUS __qdf_to_status(signed int error)
949 {
950 	switch (error) {
951 	case 0:
952 		return QDF_STATUS_SUCCESS;
953 	case ENOMEM:
954 	case -ENOMEM:
955 		return QDF_STATUS_E_NOMEM;
956 	default:
957 		return QDF_STATUS_E_NOSUPPORT;
958 	}
959 }
960 
961 /**
962  * __qdf_nbuf_len() - return the amount of valid data in the skb
963  * @skb: Pointer to network buffer
964  *
965  * This API returns the amount of valid data in the skb, If there are frags
966  * then it returns total length.
967  *
968  * Return: network buffer length
969  */
970 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
971 {
972 	int i, extra_frag_len = 0;
973 
974 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
975 	if (i > 0)
976 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
977 
978 	return extra_frag_len + skb->len;
979 }
980 
981 /**
982  * __qdf_nbuf_cat() - link two nbufs
983  * @dst: Buffer to piggyback into
984  * @src: Buffer to put
985  *
986  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
987  * It is callers responsibility to free the src skb.
988  *
989  * Return: QDF_STATUS (status of the call) if failed the src skb
990  *         is released
991  */
992 static inline QDF_STATUS
993 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
994 {
995 	QDF_STATUS error = 0;
996 
997 	qdf_assert(dst && src);
998 
999 	/*
1000 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1001 	 * buffer, first check whether the current buffer is already large
1002 	 * enough.
1003 	 */
1004 	if (skb_tailroom(dst) < src->len) {
1005 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1006 		if (error)
1007 			return __qdf_to_status(error);
1008 	}
1009 
1010 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1011 	skb_put(dst, src->len);
1012 	return __qdf_to_status(error);
1013 }
1014 
1015 /*
1016  * nbuf manipulation routines
1017  */
1018 /**
1019  * __qdf_nbuf_headroom() - return the amount of tail space available
1020  * @buf: Pointer to network buffer
1021  *
1022  * Return: amount of tail room
1023  */
1024 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1025 {
1026 	return skb_headroom(skb);
1027 }
1028 
1029 /**
1030  * __qdf_nbuf_tailroom() - return the amount of tail space available
1031  * @buf: Pointer to network buffer
1032  *
1033  * Return: amount of tail room
1034  */
1035 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1036 {
1037 	return skb_tailroom(skb);
1038 }
1039 
1040 /**
1041  * __qdf_nbuf_put_tail() - Puts data in the end
1042  * @skb: Pointer to network buffer
1043  * @size: size to be pushed
1044  *
1045  * Return: data pointer of this buf where new data has to be
1046  *         put, or NULL if there is not enough room in this buf.
1047  */
1048 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1049 {
1050 	if (skb_tailroom(skb) < size) {
1051 		if (unlikely(pskb_expand_head(skb, 0,
1052 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1053 			dev_kfree_skb_any(skb);
1054 			return NULL;
1055 		}
1056 	}
1057 	return skb_put(skb, size);
1058 }
1059 
1060 /**
1061  * __qdf_nbuf_trim_tail() - trim data out from the end
1062  * @skb: Pointer to network buffer
1063  * @size: size to be popped
1064  *
1065  * Return: none
1066  */
1067 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1068 {
1069 	return skb_trim(skb, skb->len - size);
1070 }
1071 
1072 
1073 /*
1074  * prototypes. Implemented in qdf_nbuf.c
1075  */
1076 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1077 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1078 				qdf_nbuf_rx_cksum_t *cksum);
1079 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1080 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1081 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1082 void __qdf_nbuf_ref(struct sk_buff *skb);
1083 int __qdf_nbuf_shared(struct sk_buff *skb);
1084 
1085 /**
1086  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1087  * @skb: sk buff
1088  *
1089  * Return: number of fragments
1090  */
1091 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1092 {
1093 	return skb_shinfo(skb)->nr_frags;
1094 }
1095 
1096 /*
1097  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1098  */
1099 #define __qdf_nbuf_pool_delete(osdev)
1100 
1101 /**
1102  * __qdf_nbuf_copy() - returns a private copy of the skb
1103  * @skb: Pointer to network buffer
1104  *
1105  * This API returns a private copy of the skb, the skb returned is completely
1106  *  modifiable by callers
1107  *
1108  * Return: skb or NULL
1109  */
1110 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1111 {
1112 	struct sk_buff *skb_new = NULL;
1113 
1114 	skb_new = skb_copy(skb, GFP_ATOMIC);
1115 	if (skb_new) {
1116 		__qdf_nbuf_count_inc(skb_new);
1117 	}
1118 	return skb_new;
1119 }
1120 
1121 #define __qdf_nbuf_reserve      skb_reserve
1122 
1123 /**
1124  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1125  * @skb: Pointer to network buffer
1126  * @data: data pointer
1127  *
1128  * Return: none
1129  */
1130 static inline void
1131 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1132 {
1133 	skb->data = data;
1134 }
1135 
1136 /**
1137  * __qdf_nbuf_set_len() - set buffer data length
1138  * @skb: Pointer to network buffer
1139  * @len: data length
1140  *
1141  * Return: none
1142  */
1143 static inline void
1144 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1145 {
1146 	skb->len = len;
1147 }
1148 
1149 /**
1150  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1151  * @skb: Pointer to network buffer
1152  * @len: skb data length
1153  *
1154  * Return: none
1155  */
1156 static inline void
1157 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1158 {
1159 	skb_set_tail_pointer(skb, len);
1160 }
1161 
1162 /**
1163  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1164  * @skb: Pointer to network buffer
1165  * @list: list to use
1166  *
1167  * This is a lockless version, driver must acquire locks if it
1168  * needs to synchronize
1169  *
1170  * Return: none
1171  */
1172 static inline void
1173 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1174 {
1175 	__skb_unlink(skb, list);
1176 }
1177 
1178 /**
1179  * __qdf_nbuf_reset() - reset the buffer data and pointer
1180  * @buf: Network buf instance
1181  * @reserve: reserve
1182  * @align: align
1183  *
1184  * Return: none
1185  */
1186 static inline void
1187 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1188 {
1189 	int offset;
1190 
1191 	skb_push(skb, skb_headroom(skb));
1192 	skb_put(skb, skb_tailroom(skb));
1193 	memset(skb->data, 0x0, skb->len);
1194 	skb_trim(skb, 0);
1195 	skb_reserve(skb, NET_SKB_PAD);
1196 	memset(skb->cb, 0x0, sizeof(skb->cb));
1197 
1198 	/*
1199 	 * The default is for netbuf fragments to be interpreted
1200 	 * as wordstreams rather than bytestreams.
1201 	 */
1202 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1203 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1204 
1205 	/*
1206 	 * Align & make sure that the tail & data are adjusted properly
1207 	 */
1208 
1209 	if (align) {
1210 		offset = ((unsigned long)skb->data) % align;
1211 		if (offset)
1212 			skb_reserve(skb, align - offset);
1213 	}
1214 
1215 	skb_reserve(skb, reserve);
1216 }
1217 
1218 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1219 /**
1220  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1221  *                                       in kernel
1222  *
1223  * Return: true if dev_scratch is supported
1224  *         false if dev_scratch is not supported
1225  */
1226 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1227 {
1228 	return true;
1229 }
1230 
1231 /**
1232  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1233  * @skb: Pointer to network buffer
1234  *
1235  * Return: dev_scratch if dev_scratch supported
1236  *         0 if dev_scratch not supported
1237  */
1238 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1239 {
1240 	return skb->dev_scratch;
1241 }
1242 
1243 /**
1244  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1245  * @skb: Pointer to network buffer
1246  * @value: value to be set in dev_scratch of network buffer
1247  *
1248  * Return: void
1249  */
1250 static inline void
1251 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1252 {
1253 	skb->dev_scratch = value;
1254 }
1255 #else
1256 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1257 {
1258 	return false;
1259 }
1260 
1261 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1262 {
1263 	return 0;
1264 }
1265 
1266 static inline void
1267 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1268 {
1269 }
1270 #endif /* KERNEL_VERSION(4, 14, 0) */
1271 
1272 /**
1273  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1274  * @skb: Pointer to network buffer
1275  *
1276  * Return: Pointer to head buffer
1277  */
1278 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1279 {
1280 	return skb->head;
1281 }
1282 
1283 /**
1284  * __qdf_nbuf_data() - return the pointer to data header in the skb
1285  * @skb: Pointer to network buffer
1286  *
1287  * Return: Pointer to skb data
1288  */
1289 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1290 {
1291 	return skb->data;
1292 }
1293 
1294 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1295 {
1296 	return (uint8_t *)&skb->data;
1297 }
1298 
1299 /**
1300  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1301  * @skb: Pointer to network buffer
1302  *
1303  * Return: skb protocol
1304  */
1305 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1306 {
1307 	return skb->protocol;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1312  * @skb: Pointer to network buffer
1313  *
1314  * Return: skb ip_summed
1315  */
1316 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1317 {
1318 	return skb->ip_summed;
1319 }
1320 
1321 /**
1322  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1323  * @skb: Pointer to network buffer
1324  * @ip_summed: ip checksum
1325  *
1326  * Return: none
1327  */
1328 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1329 		 uint8_t ip_summed)
1330 {
1331 	skb->ip_summed = ip_summed;
1332 }
1333 
1334 /**
1335  * __qdf_nbuf_get_priority() - return the priority value of the skb
1336  * @skb: Pointer to network buffer
1337  *
1338  * Return: skb priority
1339  */
1340 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1341 {
1342 	return skb->priority;
1343 }
1344 
1345 /**
1346  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1347  * @skb: Pointer to network buffer
1348  * @p: priority
1349  *
1350  * Return: none
1351  */
1352 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1353 {
1354 	skb->priority = p;
1355 }
1356 
1357 /**
1358  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1359  * @skb: Current skb
1360  * @next_skb: Next skb
1361  *
1362  * Return: void
1363  */
1364 static inline void
1365 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1366 {
1367 	skb->next = skb_next;
1368 }
1369 
1370 /**
1371  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1372  * @skb: Current skb
1373  *
1374  * Return: the next skb pointed to by the current skb
1375  */
1376 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1377 {
1378 	return skb->next;
1379 }
1380 
1381 /**
1382  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1383  * @skb: Current skb
1384  * @next_skb: Next skb
1385  *
1386  * This fn is used to link up extensions to the head skb. Does not handle
1387  * linking to the head
1388  *
1389  * Return: none
1390  */
1391 static inline void
1392 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1393 {
1394 	skb->next = skb_next;
1395 }
1396 
1397 /**
1398  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1399  * @skb: Current skb
1400  *
1401  * Return: the next skb pointed to by the current skb
1402  */
1403 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1404 {
1405 	return skb->next;
1406 }
1407 
1408 /**
1409  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1410  * @skb_head: head_buf nbuf holding head segment (single)
1411  * @ext_list: nbuf list holding linked extensions to the head
1412  * @ext_len: Total length of all buffers in the extension list
1413  *
1414  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1415  * to the nbuf holding the head segment (seg0)
1416  *
1417  * Return: none
1418  */
1419 static inline void
1420 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1421 			struct sk_buff *ext_list, size_t ext_len)
1422 {
1423 	skb_shinfo(skb_head)->frag_list = ext_list;
1424 	skb_head->data_len += ext_len;
1425 	skb_head->len += ext_len;
1426 }
1427 
1428 /**
1429  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1430  * @head_buf: Network buf holding head segment (single)
1431  *
1432  * This ext_list is populated when we have Jumbo packet, for example in case of
1433  * monitor mode amsdu packet reception, and are stiched using frags_list.
1434  *
1435  * Return: Network buf list holding linked extensions from head buf.
1436  */
1437 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1438 {
1439 	return (skb_shinfo(head_buf)->frag_list);
1440 }
1441 
1442 /**
1443  * __qdf_nbuf_get_age() - return the checksum value of the skb
1444  * @skb: Pointer to network buffer
1445  *
1446  * Return: checksum value
1447  */
1448 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1449 {
1450 	return skb->csum;
1451 }
1452 
1453 /**
1454  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1455  * @skb: Pointer to network buffer
1456  * @v: Value
1457  *
1458  * Return: none
1459  */
1460 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1461 {
1462 	skb->csum = v;
1463 }
1464 
1465 /**
1466  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1467  * @skb: Pointer to network buffer
1468  * @adj: Adjustment value
1469  *
1470  * Return: none
1471  */
1472 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1473 {
1474 	skb->csum -= adj;
1475 }
1476 
1477 /**
1478  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1479  * @skb: Pointer to network buffer
1480  * @offset: Offset value
1481  * @len: Length
1482  * @to: Destination pointer
1483  *
1484  * Return: length of the copy bits for skb
1485  */
1486 static inline int32_t
1487 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1488 {
1489 	return skb_copy_bits(skb, offset, to, len);
1490 }
1491 
1492 /**
1493  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1494  * @skb: Pointer to network buffer
1495  * @len:  Packet length
1496  *
1497  * Return: none
1498  */
1499 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1500 {
1501 	if (skb->len > len) {
1502 		skb_trim(skb, len);
1503 	} else {
1504 		if (skb_tailroom(skb) < len - skb->len) {
1505 			if (unlikely(pskb_expand_head(skb, 0,
1506 				len - skb->len - skb_tailroom(skb),
1507 				GFP_ATOMIC))) {
1508 				QDF_DEBUG_PANIC(
1509 				   "SKB tailroom is lessthan requested length."
1510 				   " tail-room: %u, len: %u, skb->len: %u",
1511 				   skb_tailroom(skb), len, skb->len);
1512 				dev_kfree_skb_any(skb);
1513 			}
1514 		}
1515 		skb_put(skb, (len - skb->len));
1516 	}
1517 }
1518 
1519 /**
1520  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1521  * @skb: Pointer to network buffer
1522  * @protocol: Protocol type
1523  *
1524  * Return: none
1525  */
1526 static inline void
1527 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1528 {
1529 	skb->protocol = protocol;
1530 }
1531 
1532 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1533 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1534 
1535 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1536 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1537 
1538 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1539 				      uint32_t *lo, uint32_t *hi);
1540 
1541 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1542 	struct qdf_tso_info_t *tso_info);
1543 
1544 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1545 			  struct qdf_tso_seg_elem_t *tso_seg,
1546 			  bool is_last_seg);
1547 
1548 #ifdef FEATURE_TSO
1549 /**
1550  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1551  *                                    payload len
1552  * @skb: buffer
1553  *
1554  * Return: size
1555  */
1556 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1557 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1558 
1559 #else
1560 static inline
1561 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1562 {
1563 	return 0;
1564 }
1565 
1566 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1567 {
1568 	return 0;
1569 }
1570 
1571 #endif /* FEATURE_TSO */
1572 
1573 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1574 {
1575 	if (skb_is_gso(skb) &&
1576 		(skb_is_gso_v6(skb) ||
1577 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1578 		return true;
1579 	else
1580 		return false;
1581 }
1582 
1583 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1584 
1585 int __qdf_nbuf_get_users(struct sk_buff *skb);
1586 
1587 /**
1588  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1589  *			      and get hw_classify by peeking
1590  *			      into packet
1591  * @nbuf:		Network buffer (skb on Linux)
1592  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1593  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1594  *			needs to be set in case of CE classification support
1595  *			Is set by this macro.
1596  * @hw_classify:	This is a flag which is set to indicate
1597  *			CE classification is enabled.
1598  *			Do not set this bit for VLAN packets
1599  *			OR for mcast / bcast frames.
1600  *
1601  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1602  * whether to enable tx_classify bit in CE.
1603  *
1604  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1605  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1606  * it is the length and a 802.3 frame else it is Ethernet Type II
1607  * (RFC 894).
1608  * Bit 4 in pkt_subtype is the tx_classify bit
1609  *
1610  * Return:	void
1611  */
1612 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1613 				pkt_subtype, hw_classify)	\
1614 do {								\
1615 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1616 	uint16_t ether_type = ntohs(eh->h_proto);		\
1617 	bool is_mc_bc;						\
1618 								\
1619 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1620 		   is_multicast_ether_addr((uint8_t *)eh);	\
1621 								\
1622 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1623 		hw_classify = 1;				\
1624 		pkt_subtype = 0x01 <<				\
1625 			HTT_TX_CLASSIFY_BIT_S;			\
1626 	}							\
1627 								\
1628 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1629 		pkt_type = htt_pkt_type_ethernet;		\
1630 								\
1631 } while (0)
1632 
1633 /**
1634  * nbuf private buffer routines
1635  */
1636 
1637 /**
1638  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1639  * @skb: Pointer to network buffer
1640  * @addr: Pointer to store header's addr
1641  * @m_len: network buffer length
1642  *
1643  * Return: none
1644  */
1645 static inline void
1646 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1647 {
1648 	*addr = skb->data;
1649 	*len = skb->len;
1650 }
1651 
1652 /**
1653  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1654  * @head: Head pointer
1655  * @tail: Tail pointer
1656  * @qlen: Queue length
1657  */
1658 typedef struct __qdf_nbuf_qhead {
1659 	struct sk_buff *head;
1660 	struct sk_buff *tail;
1661 	unsigned int qlen;
1662 } __qdf_nbuf_queue_t;
1663 
1664 /******************Functions *************/
1665 
1666 /**
1667  * __qdf_nbuf_queue_init() - initiallize the queue head
1668  * @qhead: Queue head
1669  *
1670  * Return: QDF status
1671  */
1672 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1673 {
1674 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1675 	return QDF_STATUS_SUCCESS;
1676 }
1677 
1678 /**
1679  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1680  * @qhead: Queue head
1681  * @skb: Pointer to network buffer
1682  *
1683  * This is a lockless version, driver must acquire locks if it
1684  * needs to synchronize
1685  *
1686  * Return: none
1687  */
1688 static inline void
1689 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1690 {
1691 	skb->next = NULL;       /*Nullify the next ptr */
1692 
1693 	if (!qhead->head)
1694 		qhead->head = skb;
1695 	else
1696 		qhead->tail->next = skb;
1697 
1698 	qhead->tail = skb;
1699 	qhead->qlen++;
1700 }
1701 
1702 /**
1703  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1704  * @dest: target netbuf queue
1705  * @src:  source netbuf queue
1706  *
1707  * Return: target netbuf queue
1708  */
1709 static inline __qdf_nbuf_queue_t *
1710 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1711 {
1712 	if (!dest)
1713 		return NULL;
1714 	else if (!src || !(src->head))
1715 		return dest;
1716 
1717 	if (!(dest->head))
1718 		dest->head = src->head;
1719 	else
1720 		dest->tail->next = src->head;
1721 
1722 	dest->tail = src->tail;
1723 	dest->qlen += src->qlen;
1724 	return dest;
1725 }
1726 
1727 /**
1728  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1729  * @qhead: Queue head
1730  * @skb: Pointer to network buffer
1731  *
1732  * This is a lockless version, driver must acquire locks if it needs to
1733  * synchronize
1734  *
1735  * Return: none
1736  */
1737 static inline void
1738 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1739 {
1740 	if (!qhead->head) {
1741 		/*Empty queue Tail pointer Must be updated */
1742 		qhead->tail = skb;
1743 	}
1744 	skb->next = qhead->head;
1745 	qhead->head = skb;
1746 	qhead->qlen++;
1747 }
1748 
1749 /**
1750  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1751  * @qhead: Queue head
1752  *
1753  * This is a lockless version. Driver should take care of the locks
1754  *
1755  * Return: skb or NULL
1756  */
1757 static inline
1758 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1759 {
1760 	__qdf_nbuf_t tmp = NULL;
1761 
1762 	if (qhead->head) {
1763 		qhead->qlen--;
1764 		tmp = qhead->head;
1765 		if (qhead->head == qhead->tail) {
1766 			qhead->head = NULL;
1767 			qhead->tail = NULL;
1768 		} else {
1769 			qhead->head = tmp->next;
1770 		}
1771 		tmp->next = NULL;
1772 	}
1773 	return tmp;
1774 }
1775 
1776 /**
1777  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1778  * @qhead: head of queue
1779  *
1780  * Return: NULL if the queue is empty
1781  */
1782 static inline struct sk_buff *
1783 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1784 {
1785 	return qhead->head;
1786 }
1787 
1788 /**
1789  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1790  * @qhead: head of queue
1791  *
1792  * Return: NULL if the queue is empty
1793  */
1794 static inline struct sk_buff *
1795 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1796 {
1797 	return qhead->tail;
1798 }
1799 
1800 /**
1801  * __qdf_nbuf_queue_len() - return the queue length
1802  * @qhead: Queue head
1803  *
1804  * Return: Queue length
1805  */
1806 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1807 {
1808 	return qhead->qlen;
1809 }
1810 
1811 /**
1812  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1813  * @skb: Pointer to network buffer
1814  *
1815  * This API returns the next skb from packet chain, remember the skb is
1816  * still in the queue
1817  *
1818  * Return: NULL if no packets are there
1819  */
1820 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1821 {
1822 	return skb->next;
1823 }
1824 
1825 /**
1826  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1827  * @qhead: Queue head
1828  *
1829  * Return: true if length is 0 else false
1830  */
1831 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1832 {
1833 	return qhead->qlen == 0;
1834 }
1835 
1836 /*
1837  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1838  * Because the queue head will most likely put in some structure,
1839  * we don't use pointer type as the definition.
1840  */
1841 
1842 /*
1843  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1844  * Because the queue head will most likely put in some structure,
1845  * we don't use pointer type as the definition.
1846  */
1847 
1848 static inline void
1849 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1850 {
1851 }
1852 
1853 /**
1854  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1855  *        expands the headroom
1856  *        in the data region. In case of failure the skb is released.
1857  * @skb: sk buff
1858  * @headroom: size of headroom
1859  *
1860  * Return: skb or NULL
1861  */
1862 static inline struct sk_buff *
1863 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1864 {
1865 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1866 		dev_kfree_skb_any(skb);
1867 		skb = NULL;
1868 	}
1869 	return skb;
1870 }
1871 
1872 /**
1873  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1874  *        exapnds the tailroom
1875  *        in data region. In case of failure it releases the skb.
1876  * @skb: sk buff
1877  * @tailroom: size of tailroom
1878  *
1879  * Return: skb or NULL
1880  */
1881 static inline struct sk_buff *
1882 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1883 {
1884 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1885 		return skb;
1886 	/**
1887 	 * unlikely path
1888 	 */
1889 	dev_kfree_skb_any(skb);
1890 	return NULL;
1891 }
1892 
1893 /**
1894  * __qdf_nbuf_linearize() - skb linearize
1895  * @skb: sk buff
1896  *
1897  * create a version of the specified nbuf whose contents
1898  * can be safely modified without affecting other
1899  * users.If the nbuf is non-linear then this function
1900  * linearize. if unable to linearize returns -ENOMEM on
1901  * success 0 is returned
1902  *
1903  * Return: 0 on Success, -ENOMEM on failure is returned.
1904  */
1905 static inline int
1906 __qdf_nbuf_linearize(struct sk_buff *skb)
1907 {
1908 	return skb_linearize(skb);
1909 }
1910 
1911 /**
1912  * __qdf_nbuf_unshare() - skb unshare
1913  * @skb: sk buff
1914  *
1915  * create a version of the specified nbuf whose contents
1916  * can be safely modified without affecting other
1917  * users.If the nbuf is a clone then this function
1918  * creates a new copy of the data. If the buffer is not
1919  * a clone the original buffer is returned.
1920  *
1921  * Return: skb or NULL
1922  */
1923 static inline struct sk_buff *
1924 __qdf_nbuf_unshare(struct sk_buff *skb)
1925 {
1926 	struct sk_buff *skb_new;
1927 
1928 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
1929 
1930 	skb_new = skb_unshare(skb, GFP_ATOMIC);
1931 	if (skb_new)
1932 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
1933 
1934 	return skb_new;
1935 }
1936 
1937 /**
1938  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1939  *@buf: sk buff
1940  *
1941  * Return: true/false
1942  */
1943 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1944 {
1945 	return skb_cloned(skb);
1946 }
1947 
1948 /**
1949  * __qdf_nbuf_pool_init() - init pool
1950  * @net: net handle
1951  *
1952  * Return: QDF status
1953  */
1954 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1955 {
1956 	return QDF_STATUS_SUCCESS;
1957 }
1958 
1959 /*
1960  * adf_nbuf_pool_delete() implementation - do nothing in linux
1961  */
1962 #define __qdf_nbuf_pool_delete(osdev)
1963 
1964 /**
1965  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1966  *        release the skb.
1967  * @skb: sk buff
1968  * @headroom: size of headroom
1969  * @tailroom: size of tailroom
1970  *
1971  * Return: skb or NULL
1972  */
1973 static inline struct sk_buff *
1974 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1975 {
1976 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1977 		return skb;
1978 
1979 	dev_kfree_skb_any(skb);
1980 	return NULL;
1981 }
1982 
1983 /**
1984  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1985  * @buf: Network buf instance
1986  * @headroom: Additional headroom to be added
1987  * @tailroom: Additional tailroom to be added
1988  *
1989  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1990  *	or NULL if there is no memory
1991  */
1992 static inline struct sk_buff *
1993 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1994 {
1995 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1996 }
1997 
1998 /**
1999  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2000  * @buf: Network buf instance
2001  *
2002  * Return: True, if buf has frag_list else return False
2003  */
2004 static inline bool
2005 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2006 {
2007 	return skb_has_frag_list(buf);
2008 }
2009 
2010 /**
2011  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2012  * @buf: Network buf instance
2013  *
2014  * Return: Network buf instance
2015  */
2016 static inline struct sk_buff *
2017 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2018 {
2019 	struct sk_buff *list;
2020 
2021 	if (!__qdf_nbuf_has_fraglist(buf))
2022 		return NULL;
2023 
2024 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2025 		;
2026 
2027 	return list;
2028 }
2029 
2030 /**
2031  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2032  * @buf: Network buf instance
2033  *
2034  * Return: void
2035  */
2036 static inline void
2037 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2038 {
2039 	struct sk_buff *list;
2040 
2041 	skb_walk_frags(buf, list)
2042 		skb_get(list);
2043 }
2044 
2045 /**
2046  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2047  *
2048  * Return: true/false
2049  */
2050 static inline bool
2051 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2052 			 uint8_t **where)
2053 {
2054 	qdf_assert(0);
2055 	return false;
2056 }
2057 
2058 /**
2059  * __qdf_nbuf_reset_ctxt() - mem zero control block
2060  * @nbuf: buffer
2061  *
2062  * Return: none
2063  */
2064 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2065 {
2066 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2067 }
2068 
2069 /**
2070  * __qdf_nbuf_network_header() - get network header
2071  * @buf: buffer
2072  *
2073  * Return: network header pointer
2074  */
2075 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2076 {
2077 	return skb_network_header(buf);
2078 }
2079 
2080 /**
2081  * __qdf_nbuf_transport_header() - get transport header
2082  * @buf: buffer
2083  *
2084  * Return: transport header pointer
2085  */
2086 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2087 {
2088 	return skb_transport_header(buf);
2089 }
2090 
2091 /**
2092  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2093  *  passed as part of network buffer by network stack
2094  * @skb: sk buff
2095  *
2096  * Return: TCP MSS size
2097  *
2098  */
2099 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2100 {
2101 	return skb_shinfo(skb)->gso_size;
2102 }
2103 
2104 /**
2105  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2106  * @nbuf: sk buff
2107  *
2108  * Return: none
2109  */
2110 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2111 
2112 /*
2113  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2114  * @nbuf: sk buff
2115  *
2116  * Return: void ptr
2117  */
2118 static inline void *
2119 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2120 {
2121 	return (void *)nbuf->cb;
2122 }
2123 
2124 /**
2125  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2126  * @skb: sk buff
2127  *
2128  * Return: head size
2129  */
2130 static inline size_t
2131 __qdf_nbuf_headlen(struct sk_buff *skb)
2132 {
2133 	return skb_headlen(skb);
2134 }
2135 
2136 /**
2137  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2138  * @buf: sk buff
2139  *
2140  * Return: true/false
2141  */
2142 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2143 {
2144 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2145 }
2146 
2147 /**
2148  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2149  * @buf: sk buff
2150  *
2151  * Return: true/false
2152  */
2153 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2154 {
2155 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2156 }
2157 
2158 /**
2159  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2160  * @skb: sk buff
2161  *
2162  * Return: size of l2+l3+l4 header length
2163  */
2164 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2165 {
2166 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2167 }
2168 
2169 /**
2170  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2171  * @skb: sk buff
2172  *
2173  * Return: size of TCP header length
2174  */
2175 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2176 {
2177 	return tcp_hdrlen(skb);
2178 }
2179 
2180 /**
2181  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2182  * @buf: sk buff
2183  *
2184  * Return:  true/false
2185  */
2186 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2187 {
2188 	if (skb_is_nonlinear(skb))
2189 		return true;
2190 	else
2191 		return false;
2192 }
2193 
2194 /**
2195  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2196  * @buf: sk buff
2197  *
2198  * Return: TCP sequence number
2199  */
2200 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2201 {
2202 	return ntohl(tcp_hdr(skb)->seq);
2203 }
2204 
2205 /**
2206  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2207  *@buf: sk buff
2208  *
2209  * Return: data pointer to typecast into your priv structure
2210  */
2211 static inline uint8_t *
2212 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2213 {
2214 	return &skb->cb[8];
2215 }
2216 
2217 /**
2218  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2219  * @buf: Pointer to nbuf
2220  *
2221  * Return: None
2222  */
2223 static inline void
2224 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2225 {
2226 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2227 }
2228 
2229 /**
2230  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2231  *
2232  * @buf: sk buff
2233  * @queue_id: Queue id
2234  *
2235  * Return: void
2236  */
2237 static inline void
2238 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2239 {
2240 	skb_record_rx_queue(skb, queue_id);
2241 }
2242 
2243 /**
2244  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2245  *
2246  * @buf: sk buff
2247  *
2248  * Return: Queue mapping
2249  */
2250 static inline uint16_t
2251 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2252 {
2253 	return skb->queue_mapping;
2254 }
2255 
2256 /**
2257  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2258  *
2259  * @buf: sk buff
2260  * @val: queue_id
2261  *
2262  */
2263 static inline void
2264 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2265 {
2266 	skb_set_queue_mapping(skb, val);
2267 }
2268 
2269 /**
2270  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2271  *
2272  * @buf: sk buff
2273  *
2274  * Return: void
2275  */
2276 static inline void
2277 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2278 {
2279 	__net_timestamp(skb);
2280 }
2281 
2282 /**
2283  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2284  *
2285  * @buf: sk buff
2286  *
2287  * Return: timestamp stored in skb in ms
2288  */
2289 static inline uint64_t
2290 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2291 {
2292 	return ktime_to_ms(skb_get_ktime(skb));
2293 }
2294 
2295 /**
2296  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2297  *
2298  * @buf: sk buff
2299  *
2300  * Return: time difference in ms
2301  */
2302 static inline uint64_t
2303 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2304 {
2305 	return ktime_to_ms(net_timedelta(skb->tstamp));
2306 }
2307 
2308 /**
2309  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2310  *
2311  * @buf: sk buff
2312  *
2313  * Return: time difference in micro seconds
2314  */
2315 static inline uint64_t
2316 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2317 {
2318 	return ktime_to_us(net_timedelta(skb->tstamp));
2319 }
2320 
2321 /**
2322  * __qdf_nbuf_orphan() - orphan a nbuf
2323  * @skb: sk buff
2324  *
2325  * If a buffer currently has an owner then we call the
2326  * owner's destructor function
2327  *
2328  * Return: void
2329  */
2330 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2331 {
2332 	return skb_orphan(skb);
2333 }
2334 
2335 /**
2336  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2337  * head pointer to end pointer
2338  * @nbuf: qdf_nbuf_t
2339  *
2340  * Return: size of network buffer from head pointer to end
2341  * pointer
2342  */
2343 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2344 {
2345 	return skb_end_offset(nbuf);
2346 }
2347 
2348 /**
2349  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2350  * including the header and variable data area
2351  * @skb: sk buff
2352  *
2353  * Return: size of network buffer
2354  */
2355 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2356 {
2357 	return skb->truesize;
2358 }
2359 
2360 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2361 /**
2362  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2363  * from the total skb mem and DP tx/rx skb mem
2364  * @nbytes: number of bytes
2365  * @dir: direction
2366  * @is_mapped: is mapped or unmapped memory
2367  *
2368  * Return: none
2369  */
2370 static inline void __qdf_record_nbuf_nbytes(
2371 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2372 {
2373 	if (is_mapped) {
2374 		if (dir == QDF_DMA_TO_DEVICE) {
2375 			qdf_mem_dp_tx_skb_cnt_inc();
2376 			qdf_mem_dp_tx_skb_inc(nbytes);
2377 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2378 			qdf_mem_dp_rx_skb_cnt_inc();
2379 			qdf_mem_dp_rx_skb_inc(nbytes);
2380 		}
2381 		qdf_mem_skb_total_inc(nbytes);
2382 	} else {
2383 		if (dir == QDF_DMA_TO_DEVICE) {
2384 			qdf_mem_dp_tx_skb_cnt_dec();
2385 			qdf_mem_dp_tx_skb_dec(nbytes);
2386 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2387 			qdf_mem_dp_rx_skb_cnt_dec();
2388 			qdf_mem_dp_rx_skb_dec(nbytes);
2389 		}
2390 		qdf_mem_skb_total_dec(nbytes);
2391 	}
2392 }
2393 
2394 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2395 static inline void __qdf_record_nbuf_nbytes(
2396 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2397 {
2398 }
2399 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2400 
2401 /**
2402  * __qdf_nbuf_map_nbytes_single() - map nbytes
2403  * @osdev: os device
2404  * @buf: buffer
2405  * @dir: direction
2406  * @nbytes: number of bytes
2407  *
2408  * Return: QDF_STATUS
2409  */
2410 #ifdef A_SIMOS_DEVHOST
2411 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2412 		qdf_device_t osdev, struct sk_buff *buf,
2413 		qdf_dma_dir_t dir, int nbytes)
2414 {
2415 	qdf_dma_addr_t paddr;
2416 
2417 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2418 	return QDF_STATUS_SUCCESS;
2419 }
2420 #else
2421 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2422 		qdf_device_t osdev, struct sk_buff *buf,
2423 		qdf_dma_dir_t dir, int nbytes)
2424 {
2425 	qdf_dma_addr_t paddr;
2426 	QDF_STATUS ret;
2427 
2428 	/* assume that the OS only provides a single fragment */
2429 	QDF_NBUF_CB_PADDR(buf) = paddr =
2430 		dma_map_single(osdev->dev, buf->data,
2431 			       nbytes, __qdf_dma_dir_to_os(dir));
2432 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2433 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2434 	if (QDF_IS_STATUS_SUCCESS(ret))
2435 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2436 					 dir, true);
2437 	return ret;
2438 }
2439 #endif
2440 /**
2441  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2442  * @osdev: os device
2443  * @buf: buffer
2444  * @dir: direction
2445  * @nbytes: number of bytes
2446  *
2447  * Return: none
2448  */
2449 #if defined(A_SIMOS_DEVHOST)
2450 static inline void
2451 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2452 			       qdf_dma_dir_t dir, int nbytes)
2453 {
2454 }
2455 
2456 #else
2457 static inline void
2458 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2459 			       qdf_dma_dir_t dir, int nbytes)
2460 {
2461 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2462 
2463 	if (qdf_likely(paddr)) {
2464 		__qdf_record_nbuf_nbytes(
2465 			__qdf_nbuf_get_end_offset(buf), dir, false);
2466 		dma_unmap_single(osdev->dev, paddr, nbytes,
2467 				 __qdf_dma_dir_to_os(dir));
2468 		return;
2469 	}
2470 }
2471 #endif
2472 
2473 static inline struct sk_buff *
2474 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2475 {
2476 	return skb_dequeue(skb_queue_head);
2477 }
2478 
2479 static inline
2480 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2481 {
2482 	return skb_queue_head->qlen;
2483 }
2484 
2485 static inline
2486 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2487 					struct sk_buff *skb)
2488 {
2489 	return skb_queue_tail(skb_queue_head, skb);
2490 }
2491 
2492 static inline
2493 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2494 {
2495 	return skb_queue_head_init(skb_queue_head);
2496 }
2497 
2498 static inline
2499 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2500 {
2501 	return skb_queue_purge(skb_queue_head);
2502 }
2503 
2504 /**
2505  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2506  * @head: skb list for which lock is to be acquired
2507  *
2508  * Return: void
2509  */
2510 static inline
2511 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2512 {
2513 	spin_lock_bh(&skb_queue_head->lock);
2514 }
2515 
2516 /**
2517  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2518  * @head: skb list for which lock is to be release
2519  *
2520  * Return: void
2521  */
2522 static inline
2523 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2524 {
2525 	spin_unlock_bh(&skb_queue_head->lock);
2526 }
2527 
2528 /**
2529  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2530  * @nbuf: qdf_nbuf_t
2531  * @idx: Index for which frag size is requested
2532  *
2533  * Return: Frag size
2534  */
2535 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2536 							   uint8_t idx)
2537 {
2538 	unsigned int size = 0;
2539 
2540 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2541 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2542 	return size;
2543 }
2544 
2545 /**
2546  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2547  * @nbuf: qdf_nbuf_t
2548  * @idx: Index for which frag address is requested
2549  *
2550  * Return: Frag address in success, else NULL
2551  */
2552 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2553 						    uint8_t idx)
2554 {
2555 	__qdf_frag_t frag_addr = NULL;
2556 
2557 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2558 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2559 	return frag_addr;
2560 }
2561 
2562 /**
2563  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2564  * @nbuf: qdf_nbuf_t
2565  * @idx: Frag index
2566  * @size: Size by which frag_size needs to be increased/decreased
2567  *        +Ve means increase, -Ve means decrease
2568  * @truesize: truesize
2569  */
2570 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2571 						 int size,
2572 						 unsigned int truesize)
2573 {
2574 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2575 }
2576 
2577 /**
2578  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2579  *          and adjust length by size.
2580  * @nbuf: qdf_nbuf_t
2581  * @idx: Frag index
2582  * @offset: Frag page offset should be moved by offset.
2583  *      +Ve - Move offset forward.
2584  *      -Ve - Move offset backward.
2585  *
2586  * Return: QDF_STATUS
2587  */
2588 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2589 					    int offset);
2590 
2591 /**
2592  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
2593  * @nbuf: nbuf pointer
2594  * @idx: frag idx need to be removed
2595  * @truesize: truesize of frag
2596  *
2597  * Return : void
2598  */
2599 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
2600 /**
2601  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2602  * @buf: Frag pointer needs to be added in nbuf frag
2603  * @nbuf: qdf_nbuf_t where frag will be added
2604  * @offset: Offset in frag to be added to nbuf_frags
2605  * @frag_len: Frag length
2606  * @truesize: truesize
2607  * @take_frag_ref: Whether to take ref for frag or not
2608  *      This bool must be set as per below comdition:
2609  *      1. False: If this frag is being added in any nbuf
2610  *              for the first time after allocation.
2611  *      2. True: If frag is already attached part of any
2612  *              nbuf.
2613  *
2614  * It takes ref_count based on boolean flag take_frag_ref
2615  */
2616 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2617 			    int offset, int frag_len,
2618 			    unsigned int truesize, bool take_frag_ref);
2619 
2620 /**
2621  * __qdf_nbuf_ref_frag() - get frag reference
2622  *
2623  * Return: void
2624  */
2625 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2626 
2627 /**
2628  * __qdf_nbuf_set_mark() - Set nbuf mark
2629  * @buf: Pointer to nbuf
2630  * @mark: Value to set mark
2631  *
2632  * Return: None
2633  */
2634 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2635 {
2636 	buf->mark = mark;
2637 }
2638 
2639 /**
2640  * __qdf_nbuf_get_mark() - Get nbuf mark
2641  * @buf: Pointer to nbuf
2642  *
2643  * Return: Value of mark
2644  */
2645 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2646 {
2647 	return buf->mark;
2648 }
2649 
2650 /**
2651  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2652  * the data pointer to the end pointer
2653  * @nbuf: qdf_nbuf_t
2654  *
2655  * Return: size of skb from data pointer to end pointer
2656  */
2657 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2658 {
2659 	return (skb_end_pointer(nbuf) - nbuf->data);
2660 }
2661 
2662 /**
2663  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2664  * @skb: Pointer to network buffer
2665  *
2666  * Return: Return the number of gso segments
2667  */
2668 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2669 {
2670 	return skb_shinfo(skb)->gso_segs;
2671 }
2672 
2673 /**
2674  * __qdf_nbuf_get_gso_size() - Return the number of gso size
2675  * @skb: Pointer to network buffer
2676  *
2677  * Return: Return the number of gso segments
2678  */
2679 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
2680 {
2681 	return skb_shinfo(skb)->gso_size;
2682 }
2683 
2684 /**
2685  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
2686  * @skb: Pointer to network buffer
2687  *
2688  * Return: Return the number of gso segments
2689  */
2690 static inline void
2691 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
2692 {
2693 	skb_shinfo(skb)->gso_size = val;
2694 }
2695 
2696 /**
2697  * __qdf_nbuf_kfree() - Free nbuf using kfree
2698  * @buf: Pointer to network buffer
2699  *
2700  * This function is called to free the skb on failure cases
2701  *
2702  * Return: None
2703  */
2704 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
2705 {
2706 	kfree_skb(skb);
2707 }
2708 
2709 /**
2710  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
2711  * @buf: Pointer to network buffer
2712  *
2713  * This function is called to free the skb on failure cases
2714  *
2715  * Return: None
2716  */
2717 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
2718 {
2719 	dev_kfree_skb(skb);
2720 }
2721 
2722 /**
2723  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
2724  * @buf: Network buffer
2725  *
2726  * Return: TRUE if skb pkt type is mcast
2727  *         FALSE if not
2728  */
2729 static inline
2730 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
2731 {
2732 	return skb->pkt_type == PACKET_MULTICAST;
2733 }
2734 
2735 /**
2736  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
2737  * @buf: Network buffer
2738  *
2739  * Return: TRUE if skb pkt type is mcast
2740  *         FALSE if not
2741  */
2742 static inline
2743 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
2744 {
2745 	return skb->pkt_type == PACKET_BROADCAST;
2746 }
2747 
2748 /**
2749  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
2750  * @buf: Pointer to network buffer
2751  * @value: value to be set in dev_scratch of network buffer
2752  *
2753  * Return: void
2754  */
2755 static inline
2756 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
2757 {
2758 	skb->dev = dev;
2759 }
2760 
2761 /**
2762  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
2763  * @buf: Pointer to network buffer
2764  *
2765  * Return: dev mtu value in nbuf
2766  */
2767 static inline
2768 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
2769 {
2770 	return skb->dev->mtu;
2771 }
2772 
2773 /**
2774  * __qdf_nbuf_set_protocol_eth_tye_trans() - set protocol using eth trans os API
2775  * @buf: Pointer to network buffer
2776  *
2777  * Return: None
2778  */
2779 static inline
2780 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
2781 {
2782 	skb->protocol = eth_type_trans(skb, skb->dev);
2783 }
2784 
2785 /*
2786  * __qdf_nbuf_net_timedelta() - get time delta
2787  * @t: time as __qdf_ktime_t object
2788  *
2789  * Return: time delta as ktime_t object
2790  */
2791 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
2792 {
2793 	return net_timedelta(t);
2794 }
2795 
2796 #ifdef CONFIG_NBUF_AP_PLATFORM
2797 #include <i_qdf_nbuf_w.h>
2798 #else
2799 #include <i_qdf_nbuf_m.h>
2800 #endif
2801 #endif /*_I_QDF_NET_BUF_H */
2802