xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf.h
22  * This file provides OS dependent nbuf API's.
23  */
24 
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27 
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43 
44 /*
45  * Use socket buffer as the underlying implementation as skbuf .
46  * Linux use sk_buff to represent both packet and data,
47  * so we use sk_buffer to represent both skbuf .
48  */
49 typedef struct sk_buff *__qdf_nbuf_t;
50 
51 /**
52  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
53  *
54  * This is used for skb queue management via linux skb buff head APIs
55  */
56 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
57 
58 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
59 
60 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
61 
62 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
63  * max tx fragments added by the driver
64  * The driver will always add one tx fragment (the tx descriptor)
65  */
66 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
67 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
68 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
69 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
70 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
71 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
72 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
73 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
74 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
75 
76 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
77 
78 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
79 #define IEEE80211_RADIOTAP_HE 23
80 #define IEEE80211_RADIOTAP_HE_MU 24
81 #endif
82 
83 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
84 
85 #define IEEE80211_RADIOTAP_EXT1_USIG	1
86 #define IEEE80211_RADIOTAP_EXT1_EHT	2
87 
88 /* mark the first packet after wow wakeup */
89 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
90 
91 /* TCP Related MASK */
92 #define QDF_NBUF_PKT_TCPOP_FIN			0x01
93 #define QDF_NBUF_PKT_TCPOP_FIN_ACK		0x11
94 #define QDF_NBUF_PKT_TCPOP_RST			0x04
95 
96 /*
97  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
98  */
99 typedef union {
100 	uint64_t       u64;
101 	qdf_dma_addr_t dma_addr;
102 } qdf_paddr_t;
103 
104 /**
105  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
106  *                    - data passed between layers of the driver.
107  *
108  * Notes:
109  *   1. Hard limited to 48 bytes. Please count your bytes
110  *   2. The size of this structure has to be easily calculatable and
111  *      consistently so: do not use any conditional compile flags
112  *   3. Split into a common part followed by a tx/rx overlay
113  *   4. There is only one extra frag, which represents the HTC/HTT header
114  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
115  *      for the priv_cb_w since it must be at same offset for both
116  *      TX and RX union
117  *   6. "ipa.owned" bit must be first member in both TX and RX unions
118  *      for the priv_cb_m since it must be at same offset for both
119  *      TX and RX union.
120  *
121  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
122  *
123  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
124  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
125  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
126  * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
127  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
128  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
129  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
130  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
131  *
132  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
133  * @rx.dev.priv_cb_m.flush_ind: flush indication
134  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
135  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
136  * @rx.dev.priv_cb_m.exc_frm: exception frame
137  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
138  * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
139 					     sw execption bit from ring desc
140  * @rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
141  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
142  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
143  * @rx.dev.priv_cb_m.lro_ctx: LRO context
144  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
145  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
146  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
147  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
148  *
149  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
150  * @rx.tcp_proto: L4 protocol is TCP
151  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
152  * @rx.ipv6_proto: L3 protocol is IPV6
153  * @rx.ip_offset: offset to IP header
154  * @rx.tcp_offset: offset to TCP header
155  * @rx_ctx_id: Rx context id
156  * @num_elements_in_list: number of elements in the nbuf list
157  *
158  * @rx.tcp_udp_chksum: L4 payload checksum
159  * @rx.tcp_wim: TCP window size
160  *
161  * @rx.flow_id: 32bit flow id
162  *
163  * @rx.flag_chfrag_start: first MSDU in an AMSDU
164  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
165  * @rx.flag_chfrag_end: last MSDU in an AMSDU
166  * @rx.flag_retry: flag to indicate MSDU is retried
167  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
168  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
169  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
170  * @rx.flag_is_frag: flag to indicate skb has frag list
171  * @rx.rsrvd: reserved
172  *
173  * @rx.trace: combined structure for DP and protocol trace
174  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
175  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
176  * @rx.trace.dp_trace: flag (Datapath trace)
177  * @rx.trace.packet_track: RX_DATA packet
178  * @rx.trace.rsrvd: enable packet logging
179  *
180  * @rx.vdev_id: vdev_id for RX pkt
181  * @rx.is_raw_frame: RAW frame
182  * @rx.fcs_err: FCS error
183  * @rx.tid_val: tid value
184  * @rx.reserved: reserved
185  * @rx.ftype: mcast2ucast, TSO, SG, MESH
186  *
187  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
188  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
189  *
190  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
191  *                 + (1) CE classification enablement bit
192  *                 + (2) packet type (802.3 or Ethernet type II)
193  *                 + (3) packet offset (usually length of HTC/HTT descr)
194  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
195  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
196  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
197  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
198  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
199  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
200  * @tx.dev.priv_cb_m.reserved: reserved
201  *
202  * @tx.ftype: mcast2ucast, TSO, SG, MESH
203  * @tx.vdev_id: vdev (for protocol trace)
204  * @tx.len: length of efrag pointed by the above pointers
205  *
206  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
207  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
208  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
209  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
210  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
211  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
212  * @tx.flags.bits.flag_ext_header: extended flags
213  * @tx.flags.bits.is_critical: flag indicating a critical frame
214  * @tx.trace: combined structure for DP and protocol trace
215  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
216  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
217  * @tx.trace.is_packet_priv:
218  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
219  * @tx.trace.to_fw: Flag to indicate send this packet to FW
220  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
221  *                          + (MGMT_ACTION)] - 4 bits
222  * @tx.trace.dp_trace: flag (Datapath trace)
223  * @tx.trace.is_bcast: flag (Broadcast packet)
224  * @tx.trace.is_mcast: flag (Multicast packet)
225  * @tx.trace.packet_type: flag (Packet type)
226  * @tx.trace.htt2_frm: flag (high-latency path only)
227  * @tx.trace.print: enable packet logging
228  *
229  * @tx.vaddr: virtual address of ~
230  * @tx.paddr: physical/DMA address of ~
231  */
232 struct qdf_nbuf_cb {
233 	/* common */
234 	qdf_paddr_t paddr; /* of skb->data */
235 	/* valid only in one direction */
236 	union {
237 		/* Note: MAX: 40 bytes */
238 		struct {
239 			union {
240 				struct {
241 					void *ext_cb_ptr;
242 					void *fctx;
243 					uint16_t msdu_len : 14,
244 						 flag_intra_bss : 1,
245 						 ipa_smmu_map : 1;
246 					uint16_t peer_id;
247 					uint16_t protocol_tag;
248 					uint16_t flow_tag;
249 				} priv_cb_w;
250 				struct {
251 					/* ipa_owned bit is common between rx
252 					 * control block and tx control block.
253 					 * Do not change location of this bit.
254 					 */
255 					uint32_t ipa_owned:1,
256 						 peer_cached_buf_frm:1,
257 						 flush_ind:1,
258 						 packet_buf_pool:1,
259 						 l3_hdr_pad:3,
260 						 /* exception frame flag */
261 						 exc_frm:1,
262 						 ipa_smmu_map:1,
263 						 reo_dest_ind_or_sw_excpt:5,
264 						 lmac_id:2,
265 						 reserved1:16;
266 					uint32_t tcp_seq_num;
267 					uint32_t tcp_ack_num;
268 					union {
269 						struct {
270 							uint16_t msdu_len;
271 							uint16_t peer_id;
272 						} wifi3;
273 						struct {
274 							uint32_t map_index;
275 						} wifi2;
276 					} dp;
277 					unsigned char *lro_ctx;
278 				} priv_cb_m;
279 			} dev;
280 			uint32_t lro_eligible:1,
281 				tcp_proto:1,
282 				tcp_pure_ack:1,
283 				ipv6_proto:1,
284 				ip_offset:7,
285 				tcp_offset:7,
286 				rx_ctx_id:4,
287 				fcs_err:1,
288 				is_raw_frame:1,
289 				num_elements_in_list:8;
290 			uint32_t tcp_udp_chksum:16,
291 				 tcp_win:16;
292 			uint32_t flow_id;
293 			uint8_t flag_chfrag_start:1,
294 				flag_chfrag_cont:1,
295 				flag_chfrag_end:1,
296 				flag_retry:1,
297 				flag_da_mcbc:1,
298 				flag_da_valid:1,
299 				flag_sa_valid:1,
300 				flag_is_frag:1;
301 			union {
302 				uint8_t packet_state;
303 				uint8_t dp_trace:1,
304 					packet_track:3,
305 					rsrvd:4;
306 			} trace;
307 			uint16_t vdev_id:8,
308 				 tid_val:4,
309 				 ftype:4;
310 		} rx;
311 
312 		/* Note: MAX: 40 bytes */
313 		struct {
314 			union {
315 				struct {
316 					void *ext_cb_ptr;
317 					void *fctx;
318 				} priv_cb_w;
319 				struct {
320 					/* ipa_owned bit is common between rx
321 					 * control block and tx control block.
322 					 * Do not change location of this bit.
323 					 */
324 					struct {
325 						uint32_t owned:1,
326 							priv:31;
327 					} ipa;
328 					uint32_t data_attr;
329 					uint16_t desc_id;
330 					uint16_t mgmt_desc_id;
331 					struct {
332 						uint8_t bi_map:1,
333 							reserved:7;
334 					} dma_option;
335 					uint8_t flag_notify_comp:1,
336 						rsvd:7;
337 					uint8_t reserved[2];
338 				} priv_cb_m;
339 			} dev;
340 			uint8_t ftype;
341 			uint8_t vdev_id;
342 			uint16_t len;
343 			union {
344 				struct {
345 					uint8_t flag_efrag:1,
346 						flag_nbuf:1,
347 						num:1,
348 						flag_chfrag_start:1,
349 						flag_chfrag_cont:1,
350 						flag_chfrag_end:1,
351 						flag_ext_header:1,
352 						is_critical:1;
353 				} bits;
354 				uint8_t u8;
355 			} flags;
356 			struct {
357 				uint8_t packet_state:7,
358 					is_packet_priv:1;
359 				uint8_t packet_track:3,
360 					to_fw:1,
361 					/* used only for hl */
362 					htt2_frm:1,
363 					proto_type:3;
364 				uint8_t dp_trace:1,
365 					is_bcast:1,
366 					is_mcast:1,
367 					packet_type:4,
368 					print:1;
369 			} trace;
370 			unsigned char *vaddr;
371 			qdf_paddr_t paddr;
372 		} tx;
373 	} u;
374 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
375 
376 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
377 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
378 			(sizeof(struct qdf_nbuf_cb)) <=
379 			sizeof_field(struct sk_buff, cb));
380 #else
381 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
382 			(sizeof(struct qdf_nbuf_cb)) <=
383 			FIELD_SIZEOF(struct sk_buff, cb));
384 #endif
385 
386 /**
387  *  access macros to qdf_nbuf_cb
388  *  Note: These macros can be used as L-values as well as R-values.
389  *        When used as R-values, they effectively function as "get" macros
390  *        When used as L_values, they effectively function as "set" macros
391  */
392 
393 #define QDF_NBUF_CB_PADDR(skb) \
394 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
395 
396 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
397 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
398 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
399 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
400 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
401 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
402 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
403 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
404 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
405 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
406 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
407 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
408 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
409 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
410 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
411 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
412 
413 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
414 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
415 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
416 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
417 
418 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
419 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
420 
421 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
422 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
423 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
424 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
425 
426 #define QDF_NBUF_CB_RX_FTYPE(skb) \
427 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
428 
429 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
430 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
431 
432 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
433 	(((struct qdf_nbuf_cb *) \
434 	((skb)->cb))->u.rx.flag_chfrag_start)
435 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
436 	(((struct qdf_nbuf_cb *) \
437 	((skb)->cb))->u.rx.flag_chfrag_cont)
438 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
439 		(((struct qdf_nbuf_cb *) \
440 		((skb)->cb))->u.rx.flag_chfrag_end)
441 
442 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
443 	(((struct qdf_nbuf_cb *) \
444 	((skb)->cb))->u.rx.flag_da_mcbc)
445 
446 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
447 	(((struct qdf_nbuf_cb *) \
448 	((skb)->cb))->u.rx.flag_da_valid)
449 
450 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
451 	(((struct qdf_nbuf_cb *) \
452 	((skb)->cb))->u.rx.flag_sa_valid)
453 
454 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
455 	(((struct qdf_nbuf_cb *) \
456 	((skb)->cb))->u.rx.flag_retry)
457 
458 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
459 	(((struct qdf_nbuf_cb *) \
460 	((skb)->cb))->u.rx.is_raw_frame)
461 
462 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
463 	(((struct qdf_nbuf_cb *) \
464 	((skb)->cb))->u.rx.tid_val)
465 
466 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
467 	(((struct qdf_nbuf_cb *) \
468 	((skb)->cb))->u.rx.flag_is_frag)
469 
470 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
471 	(((struct qdf_nbuf_cb *) \
472 	((skb)->cb))->u.rx.fcs_err)
473 
474 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
475 	qdf_nbuf_set_state(skb, PACKET_STATE)
476 
477 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
478 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
479 
480 #define QDF_NBUF_CB_TX_FTYPE(skb) \
481 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
482 
483 
484 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
485 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
486 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
487 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
488 
489 /* Tx Flags Accessor Macros*/
490 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
491 	(((struct qdf_nbuf_cb *) \
492 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
493 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
494 	(((struct qdf_nbuf_cb *) \
495 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
496 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
497 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
498 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
499 	(((struct qdf_nbuf_cb *) \
500 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
501 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
502 	(((struct qdf_nbuf_cb *) \
503 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
504 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
505 		(((struct qdf_nbuf_cb *) \
506 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
507 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
508 		(((struct qdf_nbuf_cb *) \
509 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
510 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
511 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
512 
513 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
514 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
515 /* End of Tx Flags Accessor Macros */
516 
517 /* Tx trace accessor macros */
518 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
519 	(((struct qdf_nbuf_cb *) \
520 		((skb)->cb))->u.tx.trace.packet_state)
521 
522 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
523 	(((struct qdf_nbuf_cb *) \
524 		((skb)->cb))->u.tx.trace.is_packet_priv)
525 
526 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
527 	(((struct qdf_nbuf_cb *) \
528 		((skb)->cb))->u.tx.trace.packet_track)
529 
530 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
531 	(((struct qdf_nbuf_cb *) \
532 		((skb)->cb))->u.tx.trace.to_fw)
533 
534 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
535 		(((struct qdf_nbuf_cb *) \
536 			((skb)->cb))->u.rx.trace.packet_track)
537 
538 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
539 	(((struct qdf_nbuf_cb *) \
540 		((skb)->cb))->u.tx.trace.proto_type)
541 
542 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
543 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
544 
545 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
546 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
547 
548 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
549 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
550 
551 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
552 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
553 
554 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
555 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
556 
557 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
558 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
559 
560 #define QDF_NBUF_CB_SET_BCAST(skb) \
561 	(((struct qdf_nbuf_cb *) \
562 		((skb)->cb))->u.tx.trace.is_bcast = true)
563 
564 #define QDF_NBUF_CB_SET_MCAST(skb) \
565 	(((struct qdf_nbuf_cb *) \
566 		((skb)->cb))->u.tx.trace.is_mcast = true)
567 /* End of Tx trace accessor macros */
568 
569 
570 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
571 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
572 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
573 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
574 
575 /* assume the OS provides a single fragment */
576 #define __qdf_nbuf_get_num_frags(skb)		   \
577 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
578 
579 #define __qdf_nbuf_reset_num_frags(skb) \
580 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
581 
582 /**
583  *   end of nbuf->cb access macros
584  */
585 
586 typedef void (*qdf_nbuf_trace_update_t)(char *);
587 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
588 
589 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
590 
591 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
592 	(QDF_NBUF_CB_PADDR(skb) = paddr)
593 
594 #define __qdf_nbuf_frag_push_head(					\
595 	skb, frag_len, frag_vaddr, frag_paddr)				\
596 	do {					\
597 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
598 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
599 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
600 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
601 	} while (0)
602 
603 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
604 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
605 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
606 
607 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
608 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
609 
610 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
611 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
612 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
613 	 /* assume that the OS only provides a single fragment */	\
614 	 QDF_NBUF_CB_PADDR(skb))
615 
616 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
617 
618 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
619 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
620 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
621 
622 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
623 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
624 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
625 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
626 
627 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
628 	do {								\
629 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
630 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
631 		if (frag_num)						\
632 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
633 							      is_wstrm; \
634 		else					\
635 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
636 							      is_wstrm; \
637 	} while (0)
638 
639 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
640 	do { \
641 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
642 	} while (0)
643 
644 #define __qdf_nbuf_get_vdev_ctx(skb) \
645 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
646 
647 #define __qdf_nbuf_set_tx_ftype(skb, type) \
648 	do { \
649 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
650 	} while (0)
651 
652 #define __qdf_nbuf_get_tx_ftype(skb) \
653 		 QDF_NBUF_CB_TX_FTYPE((skb))
654 
655 
656 #define __qdf_nbuf_set_rx_ftype(skb, type) \
657 	do { \
658 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
659 	} while (0)
660 
661 #define __qdf_nbuf_get_rx_ftype(skb) \
662 		 QDF_NBUF_CB_RX_FTYPE((skb))
663 
664 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
665 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
666 
667 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
668 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
669 
670 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
671 	do { \
672 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
673 	} while (0)
674 
675 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
676 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
677 
678 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
679 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
680 
681 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
682 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
683 
684 #define __qdf_nbuf_set_da_mcbc(skb, val) \
685 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
686 
687 #define __qdf_nbuf_is_da_mcbc(skb) \
688 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
689 
690 #define __qdf_nbuf_set_da_valid(skb, val) \
691 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
692 
693 #define __qdf_nbuf_is_da_valid(skb) \
694 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
695 
696 #define __qdf_nbuf_set_sa_valid(skb, val) \
697 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
698 
699 #define __qdf_nbuf_is_sa_valid(skb) \
700 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
701 
702 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
703 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
704 
705 #define __qdf_nbuf_is_rx_retry_flag(skb) \
706 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
707 
708 #define __qdf_nbuf_set_raw_frame(skb, val) \
709 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
710 
711 #define __qdf_nbuf_is_raw_frame(skb) \
712 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
713 
714 #define __qdf_nbuf_get_tid_val(skb) \
715 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
716 
717 #define __qdf_nbuf_set_tid_val(skb, val) \
718 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
719 
720 #define __qdf_nbuf_set_is_frag(skb, val) \
721 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
722 
723 #define __qdf_nbuf_is_frag(skb) \
724 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
725 
726 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
727 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
728 
729 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
730 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
731 
732 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
733 	do { \
734 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
735 	} while (0)
736 
737 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
738 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
739 
740 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
741 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
742 
743 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
744 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
745 
746 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
747 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
748 
749 #define __qdf_nbuf_trace_get_proto_type(skb) \
750 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
751 
752 #define __qdf_nbuf_data_attr_get(skb)		\
753 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
754 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
755 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
756 
757 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
758 		skb_queue_walk_safe(queue, var, tvar)
759 
760 /**
761  * __qdf_nbuf_num_frags_init() - init extra frags
762  * @skb: sk buffer
763  *
764  * Return: none
765  */
766 static inline
767 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
768 {
769 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
770 }
771 
772 /*
773  * prototypes. Implemented in qdf_nbuf.c
774  */
775 
776 /**
777  * __qdf_nbuf_alloc() - Allocate nbuf
778  * @osdev: Device handle
779  * @size: Netbuf requested size
780  * @reserve: headroom to start with
781  * @align: Align
782  * @prio: Priority
783  * @func: Function name of the call site
784  * @line: line number of the call site
785  *
786  * This allocates an nbuf aligns if needed and reserves some space in the front,
787  * since the reserve is done after alignment the reserve value if being
788  * unaligned will result in an unaligned address.
789  *
790  * Return: nbuf or %NULL if no memory
791  */
792 __qdf_nbuf_t
793 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
794 		 int prio, const char *func, uint32_t line);
795 
796 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
797 				     const char *func, uint32_t line);
798 
799 /**
800  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
801  * @size: Size to be allocated for skb
802  * @reserve: Reserve headroom size
803  * @align: Align data
804  * @func: Function name of the call site
805  * @line: Line number of the callsite
806  *
807  * This API allocates a nbuf and aligns it if needed and reserves some headroom
808  * space after the alignment where nbuf is not allocated from skb recycler pool.
809  *
810  * Return: Allocated nbuf pointer
811  */
812 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
813 					  const char *func, uint32_t line);
814 
815 /**
816  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
817  * @skb: Pointer to network buffer
818  *
819  * if GFP_ATOMIC is overkill then we can check whether its
820  * called from interrupt context and then do it or else in
821  * normal case use GFP_KERNEL
822  *
823  * example     use "in_irq() || irqs_disabled()"
824  *
825  * Return: cloned skb
826  */
827 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
828 
829 void __qdf_nbuf_free(struct sk_buff *skb);
830 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
831 			struct sk_buff *skb, qdf_dma_dir_t dir);
832 void __qdf_nbuf_unmap(__qdf_device_t osdev,
833 			struct sk_buff *skb, qdf_dma_dir_t dir);
834 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
835 				 struct sk_buff *skb, qdf_dma_dir_t dir);
836 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
837 			struct sk_buff *skb, qdf_dma_dir_t dir);
838 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
839 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
840 
841 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
842 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
843 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
844 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
845 	qdf_dma_dir_t dir, int nbytes);
846 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
847 	qdf_dma_dir_t dir, int nbytes);
848 
849 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
850 	qdf_dma_dir_t dir);
851 
852 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
853 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
854 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
855 QDF_STATUS __qdf_nbuf_frag_map(
856 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
857 	int offset, qdf_dma_dir_t dir, int cur_frag);
858 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
859 
860 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
861 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
862 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
863 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
864 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
865 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
866 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
867 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
868 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
869 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
870 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
871 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
872 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
873 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
874 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
875 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
876 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
877 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
878 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
879 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
880 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
881 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
882 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
883 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
884 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
885 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
886 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
887 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
888 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
889 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
890 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
891 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
892 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
893 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
894 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
895 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
896 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
897 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
898 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
899 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
900 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
901 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
902 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
903 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
904 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
905 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
906 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
907 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
908 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
909 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
910 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
911 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
912 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
913 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
914 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
915 
916 #ifdef QDF_NBUF_GLOBAL_COUNT
917 int __qdf_nbuf_count_get(void);
918 void __qdf_nbuf_count_inc(struct sk_buff *skb);
919 void __qdf_nbuf_count_dec(struct sk_buff *skb);
920 void __qdf_nbuf_mod_init(void);
921 void __qdf_nbuf_mod_exit(void);
922 
923 #else
924 
925 static inline int __qdf_nbuf_count_get(void)
926 {
927 	return 0;
928 }
929 
930 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
931 {
932 	return;
933 }
934 
935 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
936 {
937 	return;
938 }
939 
940 static inline void __qdf_nbuf_mod_init(void)
941 {
942 	return;
943 }
944 
945 static inline void __qdf_nbuf_mod_exit(void)
946 {
947 	return;
948 }
949 #endif
950 
951 /**
952  * __qdf_to_status() - OS to QDF status conversion
953  * @error : OS error
954  *
955  * Return: QDF status
956  */
957 static inline QDF_STATUS __qdf_to_status(signed int error)
958 {
959 	switch (error) {
960 	case 0:
961 		return QDF_STATUS_SUCCESS;
962 	case ENOMEM:
963 	case -ENOMEM:
964 		return QDF_STATUS_E_NOMEM;
965 	default:
966 		return QDF_STATUS_E_NOSUPPORT;
967 	}
968 }
969 
970 /**
971  * __qdf_nbuf_len() - return the amount of valid data in the skb
972  * @skb: Pointer to network buffer
973  *
974  * This API returns the amount of valid data in the skb, If there are frags
975  * then it returns total length.
976  *
977  * Return: network buffer length
978  */
979 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
980 {
981 	int i, extra_frag_len = 0;
982 
983 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
984 	if (i > 0)
985 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
986 
987 	return extra_frag_len + skb->len;
988 }
989 
990 /**
991  * __qdf_nbuf_cat() - link two nbufs
992  * @dst: Buffer to piggyback into
993  * @src: Buffer to put
994  *
995  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
996  * It is callers responsibility to free the src skb.
997  *
998  * Return: QDF_STATUS (status of the call) if failed the src skb
999  *         is released
1000  */
1001 static inline QDF_STATUS
1002 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1003 {
1004 	QDF_STATUS error = 0;
1005 
1006 	qdf_assert(dst && src);
1007 
1008 	/*
1009 	 * Since pskb_expand_head unconditionally reallocates the skb->head
1010 	 * buffer, first check whether the current buffer is already large
1011 	 * enough.
1012 	 */
1013 	if (skb_tailroom(dst) < src->len) {
1014 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1015 		if (error)
1016 			return __qdf_to_status(error);
1017 	}
1018 
1019 	memcpy(skb_tail_pointer(dst), src->data, src->len);
1020 	skb_put(dst, src->len);
1021 	return __qdf_to_status(error);
1022 }
1023 
1024 /*
1025  * nbuf manipulation routines
1026  */
1027 /**
1028  * __qdf_nbuf_headroom() - return the amount of tail space available
1029  * @buf: Pointer to network buffer
1030  *
1031  * Return: amount of tail room
1032  */
1033 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1034 {
1035 	return skb_headroom(skb);
1036 }
1037 
1038 /**
1039  * __qdf_nbuf_tailroom() - return the amount of tail space available
1040  * @buf: Pointer to network buffer
1041  *
1042  * Return: amount of tail room
1043  */
1044 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1045 {
1046 	return skb_tailroom(skb);
1047 }
1048 
1049 /**
1050  * __qdf_nbuf_put_tail() - Puts data in the end
1051  * @skb: Pointer to network buffer
1052  * @size: size to be pushed
1053  *
1054  * Return: data pointer of this buf where new data has to be
1055  *         put, or NULL if there is not enough room in this buf.
1056  */
1057 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1058 {
1059 	if (skb_tailroom(skb) < size) {
1060 		if (unlikely(pskb_expand_head(skb, 0,
1061 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1062 			dev_kfree_skb_any(skb);
1063 			return NULL;
1064 		}
1065 	}
1066 	return skb_put(skb, size);
1067 }
1068 
1069 /**
1070  * __qdf_nbuf_trim_tail() - trim data out from the end
1071  * @skb: Pointer to network buffer
1072  * @size: size to be popped
1073  *
1074  * Return: none
1075  */
1076 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1077 {
1078 	return skb_trim(skb, skb->len - size);
1079 }
1080 
1081 
1082 /*
1083  * prototypes. Implemented in qdf_nbuf.c
1084  */
1085 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1086 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1087 				qdf_nbuf_rx_cksum_t *cksum);
1088 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1089 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1090 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1091 void __qdf_nbuf_ref(struct sk_buff *skb);
1092 int __qdf_nbuf_shared(struct sk_buff *skb);
1093 
1094 /**
1095  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1096  * @skb: sk buff
1097  *
1098  * Return: number of fragments
1099  */
1100 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1101 {
1102 	return skb_shinfo(skb)->nr_frags;
1103 }
1104 
1105 /**
1106  * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1107  * @skb: sk buff
1108  *
1109  * This API returns a total number of fragments from the fraglist
1110  * Return: total number of fragments
1111  */
1112 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1113 {
1114 	uint32_t num_frag = 0;
1115 	struct sk_buff *list = NULL;
1116 
1117 	num_frag = skb_shinfo(skb)->nr_frags;
1118 	skb_walk_frags(skb, list)
1119 		num_frag += skb_shinfo(list)->nr_frags;
1120 
1121 	return num_frag;
1122 }
1123 
1124 /*
1125  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1126  */
1127 #define __qdf_nbuf_pool_delete(osdev)
1128 
1129 /**
1130  * __qdf_nbuf_copy() - returns a private copy of the skb
1131  * @skb: Pointer to network buffer
1132  *
1133  * This API returns a private copy of the skb, the skb returned is completely
1134  *  modifiable by callers
1135  *
1136  * Return: skb or NULL
1137  */
1138 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1139 {
1140 	struct sk_buff *skb_new = NULL;
1141 
1142 	skb_new = skb_copy(skb, GFP_ATOMIC);
1143 	if (skb_new) {
1144 		__qdf_nbuf_count_inc(skb_new);
1145 	}
1146 	return skb_new;
1147 }
1148 
1149 #define __qdf_nbuf_reserve      skb_reserve
1150 
1151 /**
1152  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1153  * @skb: Pointer to network buffer
1154  * @data: data pointer
1155  *
1156  * Return: none
1157  */
1158 static inline void
1159 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1160 {
1161 	skb->data = data;
1162 }
1163 
1164 /**
1165  * __qdf_nbuf_set_len() - set buffer data length
1166  * @skb: Pointer to network buffer
1167  * @len: data length
1168  *
1169  * Return: none
1170  */
1171 static inline void
1172 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1173 {
1174 	skb->len = len;
1175 }
1176 
1177 /**
1178  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1179  * @skb: Pointer to network buffer
1180  * @len: skb data length
1181  *
1182  * Return: none
1183  */
1184 static inline void
1185 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1186 {
1187 	skb_set_tail_pointer(skb, len);
1188 }
1189 
1190 /**
1191  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1192  * @skb: Pointer to network buffer
1193  * @list: list to use
1194  *
1195  * This is a lockless version, driver must acquire locks if it
1196  * needs to synchronize
1197  *
1198  * Return: none
1199  */
1200 static inline void
1201 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1202 {
1203 	__skb_unlink(skb, list);
1204 }
1205 
1206 /**
1207  * __qdf_nbuf_reset() - reset the buffer data and pointer
1208  * @buf: Network buf instance
1209  * @reserve: reserve
1210  * @align: align
1211  *
1212  * Return: none
1213  */
1214 static inline void
1215 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1216 {
1217 	int offset;
1218 
1219 	skb_push(skb, skb_headroom(skb));
1220 	skb_put(skb, skb_tailroom(skb));
1221 	memset(skb->data, 0x0, skb->len);
1222 	skb_trim(skb, 0);
1223 	skb_reserve(skb, NET_SKB_PAD);
1224 	memset(skb->cb, 0x0, sizeof(skb->cb));
1225 
1226 	/*
1227 	 * The default is for netbuf fragments to be interpreted
1228 	 * as wordstreams rather than bytestreams.
1229 	 */
1230 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1231 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1232 
1233 	/*
1234 	 * Align & make sure that the tail & data are adjusted properly
1235 	 */
1236 
1237 	if (align) {
1238 		offset = ((unsigned long)skb->data) % align;
1239 		if (offset)
1240 			skb_reserve(skb, align - offset);
1241 	}
1242 
1243 	skb_reserve(skb, reserve);
1244 }
1245 
1246 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1247 /**
1248  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1249  *                                       in kernel
1250  *
1251  * Return: true if dev_scratch is supported
1252  *         false if dev_scratch is not supported
1253  */
1254 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1255 {
1256 	return true;
1257 }
1258 
1259 /**
1260  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1261  * @skb: Pointer to network buffer
1262  *
1263  * Return: dev_scratch if dev_scratch supported
1264  *         0 if dev_scratch not supported
1265  */
1266 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1267 {
1268 	return skb->dev_scratch;
1269 }
1270 
1271 /**
1272  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1273  * @skb: Pointer to network buffer
1274  * @value: value to be set in dev_scratch of network buffer
1275  *
1276  * Return: void
1277  */
1278 static inline void
1279 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1280 {
1281 	skb->dev_scratch = value;
1282 }
1283 #else
1284 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1285 {
1286 	return false;
1287 }
1288 
1289 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1290 {
1291 	return 0;
1292 }
1293 
1294 static inline void
1295 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1296 {
1297 }
1298 #endif /* KERNEL_VERSION(4, 14, 0) */
1299 
1300 /**
1301  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1302  * @skb: Pointer to network buffer
1303  *
1304  * Return: Pointer to head buffer
1305  */
1306 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1307 {
1308 	return skb->head;
1309 }
1310 
1311 /**
1312  * __qdf_nbuf_data() - return the pointer to data header in the skb
1313  * @skb: Pointer to network buffer
1314  *
1315  * Return: Pointer to skb data
1316  */
1317 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1318 {
1319 	return skb->data;
1320 }
1321 
1322 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1323 {
1324 	return (uint8_t *)&skb->data;
1325 }
1326 
1327 /**
1328  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1329  * @skb: Pointer to network buffer
1330  *
1331  * Return: skb protocol
1332  */
1333 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1334 {
1335 	return skb->protocol;
1336 }
1337 
1338 /**
1339  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1340  * @skb: Pointer to network buffer
1341  *
1342  * Return: skb ip_summed
1343  */
1344 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1345 {
1346 	return skb->ip_summed;
1347 }
1348 
1349 /**
1350  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1351  * @skb: Pointer to network buffer
1352  * @ip_summed: ip checksum
1353  *
1354  * Return: none
1355  */
1356 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1357 		 uint8_t ip_summed)
1358 {
1359 	skb->ip_summed = ip_summed;
1360 }
1361 
1362 /**
1363  * __qdf_nbuf_get_priority() - return the priority value of the skb
1364  * @skb: Pointer to network buffer
1365  *
1366  * Return: skb priority
1367  */
1368 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1369 {
1370 	return skb->priority;
1371 }
1372 
1373 /**
1374  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1375  * @skb: Pointer to network buffer
1376  * @p: priority
1377  *
1378  * Return: none
1379  */
1380 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1381 {
1382 	skb->priority = p;
1383 }
1384 
1385 /**
1386  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1387  * @skb: Current skb
1388  * @next_skb: Next skb
1389  *
1390  * Return: void
1391  */
1392 static inline void
1393 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1394 {
1395 	skb->next = skb_next;
1396 }
1397 
1398 /**
1399  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1400  * @skb: Current skb
1401  *
1402  * Return: the next skb pointed to by the current skb
1403  */
1404 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1405 {
1406 	return skb->next;
1407 }
1408 
1409 /**
1410  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1411  * @skb: Current skb
1412  * @next_skb: Next skb
1413  *
1414  * This fn is used to link up extensions to the head skb. Does not handle
1415  * linking to the head
1416  *
1417  * Return: none
1418  */
1419 static inline void
1420 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1421 {
1422 	skb->next = skb_next;
1423 }
1424 
1425 /**
1426  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1427  * @skb: Current skb
1428  *
1429  * Return: the next skb pointed to by the current skb
1430  */
1431 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1432 {
1433 	return skb->next;
1434 }
1435 
1436 /**
1437  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1438  * @skb_head: head_buf nbuf holding head segment (single)
1439  * @ext_list: nbuf list holding linked extensions to the head
1440  * @ext_len: Total length of all buffers in the extension list
1441  *
1442  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1443  * to the nbuf holding the head segment (seg0)
1444  *
1445  * Return: none
1446  */
1447 static inline void
1448 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1449 			struct sk_buff *ext_list, size_t ext_len)
1450 {
1451 	skb_shinfo(skb_head)->frag_list = ext_list;
1452 	skb_head->data_len += ext_len;
1453 	skb_head->len += ext_len;
1454 }
1455 
1456 /**
1457  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1458  * @head_buf: Network buf holding head segment (single)
1459  *
1460  * This ext_list is populated when we have Jumbo packet, for example in case of
1461  * monitor mode amsdu packet reception, and are stiched using frags_list.
1462  *
1463  * Return: Network buf list holding linked extensions from head buf.
1464  */
1465 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1466 {
1467 	return (skb_shinfo(head_buf)->frag_list);
1468 }
1469 
1470 /**
1471  * __qdf_nbuf_get_age() - return the checksum value of the skb
1472  * @skb: Pointer to network buffer
1473  *
1474  * Return: checksum value
1475  */
1476 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1477 {
1478 	return skb->csum;
1479 }
1480 
1481 /**
1482  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1483  * @skb: Pointer to network buffer
1484  * @v: Value
1485  *
1486  * Return: none
1487  */
1488 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1489 {
1490 	skb->csum = v;
1491 }
1492 
1493 /**
1494  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1495  * @skb: Pointer to network buffer
1496  * @adj: Adjustment value
1497  *
1498  * Return: none
1499  */
1500 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1501 {
1502 	skb->csum -= adj;
1503 }
1504 
1505 /**
1506  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1507  * @skb: Pointer to network buffer
1508  * @offset: Offset value
1509  * @len: Length
1510  * @to: Destination pointer
1511  *
1512  * Return: length of the copy bits for skb
1513  */
1514 static inline int32_t
1515 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1516 {
1517 	return skb_copy_bits(skb, offset, to, len);
1518 }
1519 
1520 /**
1521  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1522  * @skb: Pointer to network buffer
1523  * @len:  Packet length
1524  *
1525  * Return: none
1526  */
1527 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1528 {
1529 	if (skb->len > len) {
1530 		skb_trim(skb, len);
1531 	} else {
1532 		if (skb_tailroom(skb) < len - skb->len) {
1533 			if (unlikely(pskb_expand_head(skb, 0,
1534 				len - skb->len - skb_tailroom(skb),
1535 				GFP_ATOMIC))) {
1536 				QDF_DEBUG_PANIC(
1537 				   "SKB tailroom is lessthan requested length."
1538 				   " tail-room: %u, len: %u, skb->len: %u",
1539 				   skb_tailroom(skb), len, skb->len);
1540 				dev_kfree_skb_any(skb);
1541 			}
1542 		}
1543 		skb_put(skb, (len - skb->len));
1544 	}
1545 }
1546 
1547 /**
1548  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1549  * @skb: Pointer to network buffer
1550  * @protocol: Protocol type
1551  *
1552  * Return: none
1553  */
1554 static inline void
1555 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1556 {
1557 	skb->protocol = protocol;
1558 }
1559 
1560 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1561 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1562 
1563 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1564 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1565 
1566 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1567 				      uint32_t *lo, uint32_t *hi);
1568 
1569 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1570 	struct qdf_tso_info_t *tso_info);
1571 
1572 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1573 			  struct qdf_tso_seg_elem_t *tso_seg,
1574 			  bool is_last_seg);
1575 
1576 #ifdef FEATURE_TSO
1577 /**
1578  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1579  *                                    payload len
1580  * @skb: buffer
1581  *
1582  * Return: size
1583  */
1584 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1585 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1586 
1587 #else
1588 static inline
1589 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1590 {
1591 	return 0;
1592 }
1593 
1594 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1595 {
1596 	return 0;
1597 }
1598 
1599 #endif /* FEATURE_TSO */
1600 
1601 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1602 {
1603 	if (skb_is_gso(skb) &&
1604 		(skb_is_gso_v6(skb) ||
1605 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1606 		return true;
1607 	else
1608 		return false;
1609 }
1610 
1611 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1612 
1613 int __qdf_nbuf_get_users(struct sk_buff *skb);
1614 
1615 /**
1616  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1617  *			      and get hw_classify by peeking
1618  *			      into packet
1619  * @nbuf:		Network buffer (skb on Linux)
1620  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1621  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1622  *			needs to be set in case of CE classification support
1623  *			Is set by this macro.
1624  * @hw_classify:	This is a flag which is set to indicate
1625  *			CE classification is enabled.
1626  *			Do not set this bit for VLAN packets
1627  *			OR for mcast / bcast frames.
1628  *
1629  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1630  * whether to enable tx_classify bit in CE.
1631  *
1632  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1633  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1634  * it is the length and a 802.3 frame else it is Ethernet Type II
1635  * (RFC 894).
1636  * Bit 4 in pkt_subtype is the tx_classify bit
1637  *
1638  * Return:	void
1639  */
1640 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1641 				pkt_subtype, hw_classify)	\
1642 do {								\
1643 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1644 	uint16_t ether_type = ntohs(eh->h_proto);		\
1645 	bool is_mc_bc;						\
1646 								\
1647 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1648 		   is_multicast_ether_addr((uint8_t *)eh);	\
1649 								\
1650 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1651 		hw_classify = 1;				\
1652 		pkt_subtype = 0x01 <<				\
1653 			HTT_TX_CLASSIFY_BIT_S;			\
1654 	}							\
1655 								\
1656 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1657 		pkt_type = htt_pkt_type_ethernet;		\
1658 								\
1659 } while (0)
1660 
1661 /**
1662  * nbuf private buffer routines
1663  */
1664 
1665 /**
1666  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1667  * @skb: Pointer to network buffer
1668  * @addr: Pointer to store header's addr
1669  * @m_len: network buffer length
1670  *
1671  * Return: none
1672  */
1673 static inline void
1674 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1675 {
1676 	*addr = skb->data;
1677 	*len = skb->len;
1678 }
1679 
1680 /**
1681  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1682  * @head: Head pointer
1683  * @tail: Tail pointer
1684  * @qlen: Queue length
1685  */
1686 typedef struct __qdf_nbuf_qhead {
1687 	struct sk_buff *head;
1688 	struct sk_buff *tail;
1689 	unsigned int qlen;
1690 } __qdf_nbuf_queue_t;
1691 
1692 /******************Functions *************/
1693 
1694 /**
1695  * __qdf_nbuf_queue_init() - initiallize the queue head
1696  * @qhead: Queue head
1697  *
1698  * Return: QDF status
1699  */
1700 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1701 {
1702 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1703 	return QDF_STATUS_SUCCESS;
1704 }
1705 
1706 /**
1707  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1708  * @qhead: Queue head
1709  * @skb: Pointer to network buffer
1710  *
1711  * This is a lockless version, driver must acquire locks if it
1712  * needs to synchronize
1713  *
1714  * Return: none
1715  */
1716 static inline void
1717 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1718 {
1719 	skb->next = NULL;       /*Nullify the next ptr */
1720 
1721 	if (!qhead->head)
1722 		qhead->head = skb;
1723 	else
1724 		qhead->tail->next = skb;
1725 
1726 	qhead->tail = skb;
1727 	qhead->qlen++;
1728 }
1729 
1730 /**
1731  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1732  * @dest: target netbuf queue
1733  * @src:  source netbuf queue
1734  *
1735  * Return: target netbuf queue
1736  */
1737 static inline __qdf_nbuf_queue_t *
1738 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1739 {
1740 	if (!dest)
1741 		return NULL;
1742 	else if (!src || !(src->head))
1743 		return dest;
1744 
1745 	if (!(dest->head))
1746 		dest->head = src->head;
1747 	else
1748 		dest->tail->next = src->head;
1749 
1750 	dest->tail = src->tail;
1751 	dest->qlen += src->qlen;
1752 	return dest;
1753 }
1754 
1755 /**
1756  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1757  * @qhead: Queue head
1758  * @skb: Pointer to network buffer
1759  *
1760  * This is a lockless version, driver must acquire locks if it needs to
1761  * synchronize
1762  *
1763  * Return: none
1764  */
1765 static inline void
1766 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1767 {
1768 	if (!qhead->head) {
1769 		/*Empty queue Tail pointer Must be updated */
1770 		qhead->tail = skb;
1771 	}
1772 	skb->next = qhead->head;
1773 	qhead->head = skb;
1774 	qhead->qlen++;
1775 }
1776 
1777 static inline struct sk_buff *
1778 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
1779 {
1780 	__qdf_nbuf_t tmp_tail, node = NULL;
1781 
1782 	if (qhead->head) {
1783 		tmp_tail = qhead->tail;
1784 		node = qhead->head;
1785 		if (qhead->head == qhead->tail) {
1786 			qhead->head = NULL;
1787 			qhead->tail = NULL;
1788 			return node;
1789 		} else {
1790 			while (tmp_tail != node->next)
1791 			       node = node->next;
1792 			qhead->tail = node;
1793 			return node->next;
1794 		}
1795 	}
1796 	return node;
1797 }
1798 
1799 /**
1800  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1801  * @qhead: Queue head
1802  *
1803  * This is a lockless version. Driver should take care of the locks
1804  *
1805  * Return: skb or NULL
1806  */
1807 static inline
1808 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1809 {
1810 	__qdf_nbuf_t tmp = NULL;
1811 
1812 	if (qhead->head) {
1813 		qhead->qlen--;
1814 		tmp = qhead->head;
1815 		if (qhead->head == qhead->tail) {
1816 			qhead->head = NULL;
1817 			qhead->tail = NULL;
1818 		} else {
1819 			qhead->head = tmp->next;
1820 		}
1821 		tmp->next = NULL;
1822 	}
1823 	return tmp;
1824 }
1825 
1826 /**
1827  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1828  * @qhead: head of queue
1829  *
1830  * Return: NULL if the queue is empty
1831  */
1832 static inline struct sk_buff *
1833 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1834 {
1835 	return qhead->head;
1836 }
1837 
1838 /**
1839  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1840  * @qhead: head of queue
1841  *
1842  * Return: NULL if the queue is empty
1843  */
1844 static inline struct sk_buff *
1845 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1846 {
1847 	return qhead->tail;
1848 }
1849 
1850 /**
1851  * __qdf_nbuf_queue_len() - return the queue length
1852  * @qhead: Queue head
1853  *
1854  * Return: Queue length
1855  */
1856 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1857 {
1858 	return qhead->qlen;
1859 }
1860 
1861 /**
1862  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1863  * @skb: Pointer to network buffer
1864  *
1865  * This API returns the next skb from packet chain, remember the skb is
1866  * still in the queue
1867  *
1868  * Return: NULL if no packets are there
1869  */
1870 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1871 {
1872 	return skb->next;
1873 }
1874 
1875 /**
1876  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1877  * @qhead: Queue head
1878  *
1879  * Return: true if length is 0 else false
1880  */
1881 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1882 {
1883 	return qhead->qlen == 0;
1884 }
1885 
1886 /*
1887  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1888  * Because the queue head will most likely put in some structure,
1889  * we don't use pointer type as the definition.
1890  */
1891 
1892 /*
1893  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1894  * Because the queue head will most likely put in some structure,
1895  * we don't use pointer type as the definition.
1896  */
1897 
1898 static inline void
1899 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1900 {
1901 }
1902 
1903 /**
1904  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1905  *        expands the headroom
1906  *        in the data region. In case of failure the skb is released.
1907  * @skb: sk buff
1908  * @headroom: size of headroom
1909  *
1910  * Return: skb or NULL
1911  */
1912 static inline struct sk_buff *
1913 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1914 {
1915 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1916 		dev_kfree_skb_any(skb);
1917 		skb = NULL;
1918 	}
1919 	return skb;
1920 }
1921 
1922 /**
1923  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1924  *        exapnds the tailroom
1925  *        in data region. In case of failure it releases the skb.
1926  * @skb: sk buff
1927  * @tailroom: size of tailroom
1928  *
1929  * Return: skb or NULL
1930  */
1931 static inline struct sk_buff *
1932 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1933 {
1934 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1935 		return skb;
1936 	/**
1937 	 * unlikely path
1938 	 */
1939 	dev_kfree_skb_any(skb);
1940 	return NULL;
1941 }
1942 
1943 /**
1944  * __qdf_nbuf_linearize() - skb linearize
1945  * @skb: sk buff
1946  *
1947  * create a version of the specified nbuf whose contents
1948  * can be safely modified without affecting other
1949  * users.If the nbuf is non-linear then this function
1950  * linearize. if unable to linearize returns -ENOMEM on
1951  * success 0 is returned
1952  *
1953  * Return: 0 on Success, -ENOMEM on failure is returned.
1954  */
1955 static inline int
1956 __qdf_nbuf_linearize(struct sk_buff *skb)
1957 {
1958 	return skb_linearize(skb);
1959 }
1960 
1961 /**
1962  * __qdf_nbuf_unshare() - skb unshare
1963  * @skb: sk buff
1964  *
1965  * create a version of the specified nbuf whose contents
1966  * can be safely modified without affecting other
1967  * users.If the nbuf is a clone then this function
1968  * creates a new copy of the data. If the buffer is not
1969  * a clone the original buffer is returned.
1970  *
1971  * Return: skb or NULL
1972  */
1973 static inline struct sk_buff *
1974 __qdf_nbuf_unshare(struct sk_buff *skb)
1975 {
1976 	struct sk_buff *skb_new;
1977 
1978 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
1979 
1980 	skb_new = skb_unshare(skb, GFP_ATOMIC);
1981 	if (skb_new)
1982 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
1983 
1984 	return skb_new;
1985 }
1986 
1987 /**
1988  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1989  *@buf: sk buff
1990  *
1991  * Return: true/false
1992  */
1993 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1994 {
1995 	return skb_cloned(skb);
1996 }
1997 
1998 /**
1999  * __qdf_nbuf_pool_init() - init pool
2000  * @net: net handle
2001  *
2002  * Return: QDF status
2003  */
2004 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2005 {
2006 	return QDF_STATUS_SUCCESS;
2007 }
2008 
2009 /*
2010  * adf_nbuf_pool_delete() implementation - do nothing in linux
2011  */
2012 #define __qdf_nbuf_pool_delete(osdev)
2013 
2014 /**
2015  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2016  *        release the skb.
2017  * @skb: sk buff
2018  * @headroom: size of headroom
2019  * @tailroom: size of tailroom
2020  *
2021  * Return: skb or NULL
2022  */
2023 static inline struct sk_buff *
2024 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2025 {
2026 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2027 		return skb;
2028 
2029 	dev_kfree_skb_any(skb);
2030 	return NULL;
2031 }
2032 
2033 /**
2034  * __qdf_nbuf_copy_expand() - copy and expand nbuf
2035  * @buf: Network buf instance
2036  * @headroom: Additional headroom to be added
2037  * @tailroom: Additional tailroom to be added
2038  *
2039  * Return: New nbuf that is a copy of buf, with additional head and tailroom
2040  *	or NULL if there is no memory
2041  */
2042 static inline struct sk_buff *
2043 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2044 {
2045 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2046 }
2047 
2048 /**
2049  * __qdf_nbuf_has_fraglist() - check buf has fraglist
2050  * @buf: Network buf instance
2051  *
2052  * Return: True, if buf has frag_list else return False
2053  */
2054 static inline bool
2055 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2056 {
2057 	return skb_has_frag_list(buf);
2058 }
2059 
2060 /**
2061  * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2062  * @buf: Network buf instance
2063  *
2064  * Return: Network buf instance
2065  */
2066 static inline struct sk_buff *
2067 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2068 {
2069 	struct sk_buff *list;
2070 
2071 	if (!__qdf_nbuf_has_fraglist(buf))
2072 		return NULL;
2073 
2074 	for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2075 		;
2076 
2077 	return list;
2078 }
2079 
2080 /**
2081  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2082  * @buf: Network buf instance
2083  *
2084  * Return: void
2085  */
2086 static inline void
2087 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2088 {
2089 	struct sk_buff *list;
2090 
2091 	skb_walk_frags(buf, list)
2092 		skb_get(list);
2093 }
2094 
2095 /**
2096  * __qdf_nbuf_tx_cksum_info() - tx checksum info
2097  *
2098  * Return: true/false
2099  */
2100 static inline bool
2101 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2102 			 uint8_t **where)
2103 {
2104 	qdf_assert(0);
2105 	return false;
2106 }
2107 
2108 /**
2109  * __qdf_nbuf_reset_ctxt() - mem zero control block
2110  * @nbuf: buffer
2111  *
2112  * Return: none
2113  */
2114 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2115 {
2116 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2117 }
2118 
2119 /**
2120  * __qdf_nbuf_network_header() - get network header
2121  * @buf: buffer
2122  *
2123  * Return: network header pointer
2124  */
2125 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2126 {
2127 	return skb_network_header(buf);
2128 }
2129 
2130 /**
2131  * __qdf_nbuf_transport_header() - get transport header
2132  * @buf: buffer
2133  *
2134  * Return: transport header pointer
2135  */
2136 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2137 {
2138 	return skb_transport_header(buf);
2139 }
2140 
2141 /**
2142  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2143  *  passed as part of network buffer by network stack
2144  * @skb: sk buff
2145  *
2146  * Return: TCP MSS size
2147  *
2148  */
2149 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2150 {
2151 	return skb_shinfo(skb)->gso_size;
2152 }
2153 
2154 /**
2155  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2156  * @nbuf: sk buff
2157  *
2158  * Return: none
2159  */
2160 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2161 
2162 /*
2163  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2164  * @nbuf: sk buff
2165  *
2166  * Return: void ptr
2167  */
2168 static inline void *
2169 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2170 {
2171 	return (void *)nbuf->cb;
2172 }
2173 
2174 /**
2175  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2176  * @skb: sk buff
2177  *
2178  * Return: head size
2179  */
2180 static inline size_t
2181 __qdf_nbuf_headlen(struct sk_buff *skb)
2182 {
2183 	return skb_headlen(skb);
2184 }
2185 
2186 /**
2187  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2188  * @buf: sk buff
2189  *
2190  * Return: true/false
2191  */
2192 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2193 {
2194 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2195 }
2196 
2197 /**
2198  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2199  * @buf: sk buff
2200  *
2201  * Return: true/false
2202  */
2203 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2204 {
2205 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2206 }
2207 
2208 /**
2209  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2210  * @skb: sk buff
2211  *
2212  * Return: size of l2+l3+l4 header length
2213  */
2214 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2215 {
2216 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2217 }
2218 
2219 /**
2220  * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2221  * @skb: sk buff
2222  *
2223  * Return: size of TCP header length
2224  */
2225 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2226 {
2227 	return tcp_hdrlen(skb);
2228 }
2229 
2230 /**
2231  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2232  * @buf: sk buff
2233  *
2234  * Return:  true/false
2235  */
2236 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2237 {
2238 	if (skb_is_nonlinear(skb))
2239 		return true;
2240 	else
2241 		return false;
2242 }
2243 
2244 /**
2245  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2246  * @buf: sk buff
2247  *
2248  * Return: TCP sequence number
2249  */
2250 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2251 {
2252 	return ntohl(tcp_hdr(skb)->seq);
2253 }
2254 
2255 /**
2256  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2257  *@buf: sk buff
2258  *
2259  * Return: data pointer to typecast into your priv structure
2260  */
2261 static inline char *
2262 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2263 {
2264 	return &skb->cb[8];
2265 }
2266 
2267 /**
2268  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2269  * @buf: Pointer to nbuf
2270  *
2271  * Return: None
2272  */
2273 static inline void
2274 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2275 {
2276 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2277 }
2278 
2279 /**
2280  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2281  *
2282  * @buf: sk buff
2283  * @queue_id: Queue id
2284  *
2285  * Return: void
2286  */
2287 static inline void
2288 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2289 {
2290 	skb_record_rx_queue(skb, queue_id);
2291 }
2292 
2293 /**
2294  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2295  *
2296  * @buf: sk buff
2297  *
2298  * Return: Queue mapping
2299  */
2300 static inline uint16_t
2301 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2302 {
2303 	return skb->queue_mapping;
2304 }
2305 
2306 /**
2307  * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2308  *
2309  * @buf: sk buff
2310  * @val: queue_id
2311  *
2312  */
2313 static inline void
2314 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2315 {
2316 	skb_set_queue_mapping(skb, val);
2317 }
2318 
2319 /**
2320  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2321  *
2322  * @buf: sk buff
2323  *
2324  * Return: void
2325  */
2326 static inline void
2327 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2328 {
2329 	__net_timestamp(skb);
2330 }
2331 
2332 /**
2333  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2334  *
2335  * @buf: sk buff
2336  *
2337  * Return: timestamp stored in skb in ms
2338  */
2339 static inline uint64_t
2340 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2341 {
2342 	return ktime_to_ms(skb_get_ktime(skb));
2343 }
2344 
2345 /**
2346  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2347  *
2348  * @buf: sk buff
2349  *
2350  * Return: time difference in ms
2351  */
2352 static inline uint64_t
2353 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2354 {
2355 	return ktime_to_ms(net_timedelta(skb->tstamp));
2356 }
2357 
2358 /**
2359  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2360  *
2361  * @buf: sk buff
2362  *
2363  * Return: time difference in micro seconds
2364  */
2365 static inline uint64_t
2366 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2367 {
2368 	return ktime_to_us(net_timedelta(skb->tstamp));
2369 }
2370 
2371 /**
2372  * __qdf_nbuf_orphan() - orphan a nbuf
2373  * @skb: sk buff
2374  *
2375  * If a buffer currently has an owner then we call the
2376  * owner's destructor function
2377  *
2378  * Return: void
2379  */
2380 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2381 {
2382 	return skb_orphan(skb);
2383 }
2384 
2385 /**
2386  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2387  * head pointer to end pointer
2388  * @nbuf: qdf_nbuf_t
2389  *
2390  * Return: size of network buffer from head pointer to end
2391  * pointer
2392  */
2393 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2394 {
2395 	return skb_end_offset(nbuf);
2396 }
2397 
2398 /**
2399  * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2400  * including the header and variable data area
2401  * @skb: sk buff
2402  *
2403  * Return: size of network buffer
2404  */
2405 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2406 {
2407 	return skb->truesize;
2408 }
2409 
2410 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2411 /**
2412  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2413  * from the total skb mem and DP tx/rx skb mem
2414  * @nbytes: number of bytes
2415  * @dir: direction
2416  * @is_mapped: is mapped or unmapped memory
2417  *
2418  * Return: none
2419  */
2420 static inline void __qdf_record_nbuf_nbytes(
2421 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2422 {
2423 	if (is_mapped) {
2424 		if (dir == QDF_DMA_TO_DEVICE) {
2425 			qdf_mem_dp_tx_skb_cnt_inc();
2426 			qdf_mem_dp_tx_skb_inc(nbytes);
2427 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2428 			qdf_mem_dp_rx_skb_cnt_inc();
2429 			qdf_mem_dp_rx_skb_inc(nbytes);
2430 		}
2431 		qdf_mem_skb_total_inc(nbytes);
2432 	} else {
2433 		if (dir == QDF_DMA_TO_DEVICE) {
2434 			qdf_mem_dp_tx_skb_cnt_dec();
2435 			qdf_mem_dp_tx_skb_dec(nbytes);
2436 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2437 			qdf_mem_dp_rx_skb_cnt_dec();
2438 			qdf_mem_dp_rx_skb_dec(nbytes);
2439 		}
2440 		qdf_mem_skb_total_dec(nbytes);
2441 	}
2442 }
2443 
2444 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2445 static inline void __qdf_record_nbuf_nbytes(
2446 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2447 {
2448 }
2449 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2450 
2451 /**
2452  * __qdf_nbuf_map_nbytes_single() - map nbytes
2453  * @osdev: os device
2454  * @buf: buffer
2455  * @dir: direction
2456  * @nbytes: number of bytes
2457  *
2458  * Return: QDF_STATUS
2459  */
2460 #ifdef A_SIMOS_DEVHOST
2461 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2462 		qdf_device_t osdev, struct sk_buff *buf,
2463 		qdf_dma_dir_t dir, int nbytes)
2464 {
2465 	qdf_dma_addr_t paddr;
2466 
2467 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2468 	return QDF_STATUS_SUCCESS;
2469 }
2470 #else
2471 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2472 		qdf_device_t osdev, struct sk_buff *buf,
2473 		qdf_dma_dir_t dir, int nbytes)
2474 {
2475 	qdf_dma_addr_t paddr;
2476 	QDF_STATUS ret;
2477 
2478 	/* assume that the OS only provides a single fragment */
2479 	QDF_NBUF_CB_PADDR(buf) = paddr =
2480 		dma_map_single(osdev->dev, buf->data,
2481 			       nbytes, __qdf_dma_dir_to_os(dir));
2482 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2483 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2484 	if (QDF_IS_STATUS_SUCCESS(ret))
2485 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2486 					 dir, true);
2487 	return ret;
2488 }
2489 #endif
2490 /**
2491  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2492  * @osdev: os device
2493  * @buf: buffer
2494  * @dir: direction
2495  * @nbytes: number of bytes
2496  *
2497  * Return: none
2498  */
2499 #if defined(A_SIMOS_DEVHOST)
2500 static inline void
2501 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2502 			       qdf_dma_dir_t dir, int nbytes)
2503 {
2504 }
2505 
2506 #else
2507 static inline void
2508 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2509 			       qdf_dma_dir_t dir, int nbytes)
2510 {
2511 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2512 
2513 	if (qdf_likely(paddr)) {
2514 		__qdf_record_nbuf_nbytes(
2515 			__qdf_nbuf_get_end_offset(buf), dir, false);
2516 		dma_unmap_single(osdev->dev, paddr, nbytes,
2517 				 __qdf_dma_dir_to_os(dir));
2518 		return;
2519 	}
2520 }
2521 #endif
2522 
2523 static inline struct sk_buff *
2524 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2525 {
2526 	return skb_dequeue(skb_queue_head);
2527 }
2528 
2529 static inline
2530 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2531 {
2532 	return skb_queue_head->qlen;
2533 }
2534 
2535 static inline
2536 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2537 					struct sk_buff *skb)
2538 {
2539 	return skb_queue_tail(skb_queue_head, skb);
2540 }
2541 
2542 static inline
2543 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2544 {
2545 	return skb_queue_head_init(skb_queue_head);
2546 }
2547 
2548 static inline
2549 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2550 {
2551 	return skb_queue_purge(skb_queue_head);
2552 }
2553 
2554 /**
2555  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2556  * @head: skb list for which lock is to be acquired
2557  *
2558  * Return: void
2559  */
2560 static inline
2561 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2562 {
2563 	spin_lock_bh(&skb_queue_head->lock);
2564 }
2565 
2566 /**
2567  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2568  * @head: skb list for which lock is to be release
2569  *
2570  * Return: void
2571  */
2572 static inline
2573 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2574 {
2575 	spin_unlock_bh(&skb_queue_head->lock);
2576 }
2577 
2578 /**
2579  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2580  * @nbuf: qdf_nbuf_t
2581  * @idx: Index for which frag size is requested
2582  *
2583  * Return: Frag size
2584  */
2585 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2586 							   uint8_t idx)
2587 {
2588 	unsigned int size = 0;
2589 
2590 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2591 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2592 	return size;
2593 }
2594 
2595 /**
2596  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2597  * @nbuf: qdf_nbuf_t
2598  * @idx: Index for which frag address is requested
2599  *
2600  * Return: Frag address in success, else NULL
2601  */
2602 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2603 						    uint8_t idx)
2604 {
2605 	__qdf_frag_t frag_addr = NULL;
2606 
2607 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2608 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2609 	return frag_addr;
2610 }
2611 
2612 /**
2613  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2614  * @nbuf: qdf_nbuf_t
2615  * @idx: Frag index
2616  * @size: Size by which frag_size needs to be increased/decreased
2617  *        +Ve means increase, -Ve means decrease
2618  * @truesize: truesize
2619  */
2620 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2621 						 int size,
2622 						 unsigned int truesize)
2623 {
2624 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2625 }
2626 
2627 /**
2628  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2629  *          and adjust length by size.
2630  * @nbuf: qdf_nbuf_t
2631  * @idx: Frag index
2632  * @offset: Frag page offset should be moved by offset.
2633  *      +Ve - Move offset forward.
2634  *      -Ve - Move offset backward.
2635  *
2636  * Return: QDF_STATUS
2637  */
2638 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2639 					    int offset);
2640 
2641 /**
2642  * __qdf_nbuf_remove_frag() - Remove frag from nbuf
2643  * @nbuf: nbuf pointer
2644  * @idx: frag idx need to be removed
2645  * @truesize: truesize of frag
2646  *
2647  * Return : void
2648  */
2649 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
2650 /**
2651  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2652  * @buf: Frag pointer needs to be added in nbuf frag
2653  * @nbuf: qdf_nbuf_t where frag will be added
2654  * @offset: Offset in frag to be added to nbuf_frags
2655  * @frag_len: Frag length
2656  * @truesize: truesize
2657  * @take_frag_ref: Whether to take ref for frag or not
2658  *      This bool must be set as per below comdition:
2659  *      1. False: If this frag is being added in any nbuf
2660  *              for the first time after allocation.
2661  *      2. True: If frag is already attached part of any
2662  *              nbuf.
2663  *
2664  * It takes ref_count based on boolean flag take_frag_ref
2665  */
2666 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2667 			    int offset, int frag_len,
2668 			    unsigned int truesize, bool take_frag_ref);
2669 
2670 /**
2671  * __qdf_nbuf_ref_frag() - get frag reference
2672  *
2673  * Return: void
2674  */
2675 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
2676 
2677 /**
2678  * __qdf_nbuf_set_mark() - Set nbuf mark
2679  * @buf: Pointer to nbuf
2680  * @mark: Value to set mark
2681  *
2682  * Return: None
2683  */
2684 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2685 {
2686 	buf->mark = mark;
2687 }
2688 
2689 /**
2690  * __qdf_nbuf_get_mark() - Get nbuf mark
2691  * @buf: Pointer to nbuf
2692  *
2693  * Return: Value of mark
2694  */
2695 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2696 {
2697 	return buf->mark;
2698 }
2699 
2700 /**
2701  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2702  * the data pointer to the end pointer
2703  * @nbuf: qdf_nbuf_t
2704  *
2705  * Return: size of skb from data pointer to end pointer
2706  */
2707 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2708 {
2709 	return (skb_end_pointer(nbuf) - nbuf->data);
2710 }
2711 
2712 /**
2713  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2714  * @skb: Pointer to network buffer
2715  *
2716  * Return: Return the number of gso segments
2717  */
2718 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2719 {
2720 	return skb_shinfo(skb)->gso_segs;
2721 }
2722 
2723 /**
2724  * __qdf_nbuf_get_gso_size() - Return the number of gso size
2725  * @skb: Pointer to network buffer
2726  *
2727  * Return: Return the number of gso segments
2728  */
2729 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
2730 {
2731 	return skb_shinfo(skb)->gso_size;
2732 }
2733 
2734 /**
2735  * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
2736  * @skb: Pointer to network buffer
2737  *
2738  * Return: Return the number of gso segments
2739  */
2740 static inline void
2741 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
2742 {
2743 	skb_shinfo(skb)->gso_size = val;
2744 }
2745 
2746 /**
2747  * __qdf_nbuf_kfree() - Free nbuf using kfree
2748  * @buf: Pointer to network buffer
2749  *
2750  * This function is called to free the skb on failure cases
2751  *
2752  * Return: None
2753  */
2754 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
2755 {
2756 	kfree_skb(skb);
2757 }
2758 
2759 /**
2760  * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
2761  * @buf: Pointer to network buffer
2762  *
2763  * This function is called to free the skb on failure cases
2764  *
2765  * Return: None
2766  */
2767 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
2768 {
2769 	dev_kfree_skb(skb);
2770 }
2771 
2772 /**
2773  * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
2774  * @buf: Network buffer
2775  *
2776  * Return: TRUE if skb pkt type is mcast
2777  *         FALSE if not
2778  */
2779 static inline
2780 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
2781 {
2782 	return skb->pkt_type == PACKET_MULTICAST;
2783 }
2784 
2785 /**
2786  * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
2787  * @buf: Network buffer
2788  *
2789  * Return: TRUE if skb pkt type is mcast
2790  *         FALSE if not
2791  */
2792 static inline
2793 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
2794 {
2795 	return skb->pkt_type == PACKET_BROADCAST;
2796 }
2797 
2798 /**
2799  * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
2800  * @buf: Pointer to network buffer
2801  * @value: value to be set in dev_scratch of network buffer
2802  *
2803  * Return: void
2804  */
2805 static inline
2806 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
2807 {
2808 	skb->dev = dev;
2809 }
2810 
2811 /**
2812  * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
2813  * @buf: Pointer to network buffer
2814  *
2815  * Return: dev mtu value in nbuf
2816  */
2817 static inline
2818 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
2819 {
2820 	return skb->dev->mtu;
2821 }
2822 
2823 /**
2824  * __qdf_nbuf_set_protocol_eth_tye_trans() - set protocol using eth trans os API
2825  * @buf: Pointer to network buffer
2826  *
2827  * Return: None
2828  */
2829 static inline
2830 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
2831 {
2832 	skb->protocol = eth_type_trans(skb, skb->dev);
2833 }
2834 
2835 /*
2836  * __qdf_nbuf_net_timedelta() - get time delta
2837  * @t: time as __qdf_ktime_t object
2838  *
2839  * Return: time delta as ktime_t object
2840  */
2841 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
2842 {
2843 	return net_timedelta(t);
2844 }
2845 
2846 #ifdef CONFIG_NBUF_AP_PLATFORM
2847 #include <i_qdf_nbuf_w.h>
2848 #else
2849 #include <i_qdf_nbuf_m.h>
2850 #endif
2851 #endif /*_I_QDF_NET_BUF_H */
2852