xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 #include <qdf_nbuf_frag.h>
41 #include "qdf_time.h"
42 
43 /*
44  * Use socket buffer as the underlying implementation as skbuf .
45  * Linux use sk_buff to represent both packet and data,
46  * so we use sk_buffer to represent both skbuf .
47  */
48 typedef struct sk_buff *__qdf_nbuf_t;
49 
50 /**
51  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
52  *
53  * This is used for skb queue management via linux skb buff head APIs
54  */
55 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
56 
57 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
58 
59 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
60 
61 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
62  * max tx fragments added by the driver
63  * The driver will always add one tx fragment (the tx descriptor)
64  */
65 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
66 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
67 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
68 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
69 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
70 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
71 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
72 
73 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
74 
75 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
76 #define IEEE80211_RADIOTAP_HE 23
77 #define IEEE80211_RADIOTAP_HE_MU 24
78 #endif
79 
80 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
81 
82 /* mark the first packet after wow wakeup */
83 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
84 
85 /*
86  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
87  */
88 typedef union {
89 	uint64_t       u64;
90 	qdf_dma_addr_t dma_addr;
91 } qdf_paddr_t;
92 
93 /**
94  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
95  *                    - data passed between layers of the driver.
96  *
97  * Notes:
98  *   1. Hard limited to 48 bytes. Please count your bytes
99  *   2. The size of this structure has to be easily calculatable and
100  *      consistently so: do not use any conditional compile flags
101  *   3. Split into a common part followed by a tx/rx overlay
102  *   4. There is only one extra frag, which represents the HTC/HTT header
103  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
104  *      for the priv_cb_w since it must be at same offset for both
105  *      TX and RX union
106  *   6. "ipa.owned" bit must be first member in both TX and RX unions
107  *      for the priv_cb_m since it must be at same offset for both
108  *      TX and RX union.
109  *
110  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
111  *
112  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
113  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
114  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
115  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
116  * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
117  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
118  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
119  *
120  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
121  * @rx.dev.priv_cb_m.flush_ind: flush indication
122  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
123  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
124  * @rx.dev.priv_cb_m.exc_frm: exception frame
125  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
126  * @rx.dev.priv_cb_m.reo_dest_ind: reo destination indication
127  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
128  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
129  * @rx.dev.priv_cb_m.lro_ctx: LRO context
130  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
131  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
132  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
133  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
134  *
135  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
136  * @rx.tcp_proto: L4 protocol is TCP
137  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
138  * @rx.ipv6_proto: L3 protocol is IPV6
139  * @rx.ip_offset: offset to IP header
140  * @rx.tcp_offset: offset to TCP header
141  * @rx_ctx_id: Rx context id
142  * @num_elements_in_list: number of elements in the nbuf list
143  *
144  * @rx.tcp_udp_chksum: L4 payload checksum
145  * @rx.tcp_wim: TCP window size
146  *
147  * @rx.flow_id: 32bit flow id
148  *
149  * @rx.flag_chfrag_start: first MSDU in an AMSDU
150  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
151  * @rx.flag_chfrag_end: last MSDU in an AMSDU
152  * @rx.flag_retry: flag to indicate MSDU is retried
153  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
154  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
155  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
156  * @rx.flag_is_frag: flag to indicate skb has frag list
157  * @rx.rsrvd: reserved
158  *
159  * @rx.trace: combined structure for DP and protocol trace
160  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
161  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
162  * @rx.trace.dp_trace: flag (Datapath trace)
163  * @rx.trace.packet_track: RX_DATA packet
164  * @rx.trace.rsrvd: enable packet logging
165  *
166  * @rx.vdev_id: vdev_id for RX pkt
167  * @rx.is_raw_frame: RAW frame
168  * @rx.fcs_err: FCS error
169  * @rx.tid_val: tid value
170  * @rx.reserved: reserved
171  * @rx.ftype: mcast2ucast, TSO, SG, MESH
172  *
173  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
174  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
175  *
176  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
177  *                 + (1) CE classification enablement bit
178  *                 + (2) packet type (802.3 or Ethernet type II)
179  *                 + (3) packet offset (usually length of HTC/HTT descr)
180  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
181  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
182  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
183  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
184  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
185  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
186  * @tx.dev.priv_cb_m.reserved: reserved
187  *
188  * @tx.ftype: mcast2ucast, TSO, SG, MESH
189  * @tx.vdev_id: vdev (for protocol trace)
190  * @tx.len: length of efrag pointed by the above pointers
191  *
192  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
193  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
194  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
195  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
196  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
197  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
198  * @tx.flags.bits.flag_ext_header: extended flags
199  * @tx.flags.bits.reserved: reserved
200  * @tx.trace: combined structure for DP and protocol trace
201  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
202  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
203  * @tx.trace.is_packet_priv:
204  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
205  * @tx.trace.to_fw: Flag to indicate send this packet to FW
206  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
207  *                          + (MGMT_ACTION)] - 4 bits
208  * @tx.trace.dp_trace: flag (Datapath trace)
209  * @tx.trace.is_bcast: flag (Broadcast packet)
210  * @tx.trace.is_mcast: flag (Multicast packet)
211  * @tx.trace.packet_type: flag (Packet type)
212  * @tx.trace.htt2_frm: flag (high-latency path only)
213  * @tx.trace.print: enable packet logging
214  *
215  * @tx.vaddr: virtual address of ~
216  * @tx.paddr: physical/DMA address of ~
217  */
218 struct qdf_nbuf_cb {
219 	/* common */
220 	qdf_paddr_t paddr; /* of skb->data */
221 	/* valid only in one direction */
222 	union {
223 		/* Note: MAX: 40 bytes */
224 		struct {
225 			union {
226 				struct {
227 					void *ext_cb_ptr;
228 					void *fctx;
229 					uint16_t msdu_len : 14,
230 						 flag_intra_bss : 1,
231 						 reserved : 1;
232 					uint16_t peer_id;
233 					uint16_t protocol_tag;
234 					uint16_t flow_tag;
235 				} priv_cb_w;
236 				struct {
237 					/* ipa_owned bit is common between rx
238 					 * control block and tx control block.
239 					 * Do not change location of this bit.
240 					 */
241 					uint32_t ipa_owned:1,
242 						 peer_cached_buf_frm:1,
243 						 flush_ind:1,
244 						 packet_buf_pool:1,
245 						 l3_hdr_pad:3,
246 						 /* exception frame flag */
247 						 exc_frm:1,
248 						 ipa_smmu_map:1,
249 						 reo_dest_ind:5,
250 						 reserved:2,
251 						 reserved1:16;
252 					uint32_t tcp_seq_num;
253 					uint32_t tcp_ack_num;
254 					union {
255 						struct {
256 							uint16_t msdu_len;
257 							uint16_t peer_id;
258 						} wifi3;
259 						struct {
260 							uint32_t map_index;
261 						} wifi2;
262 					} dp;
263 					unsigned char *lro_ctx;
264 				} priv_cb_m;
265 			} dev;
266 			uint32_t lro_eligible:1,
267 				tcp_proto:1,
268 				tcp_pure_ack:1,
269 				ipv6_proto:1,
270 				ip_offset:7,
271 				tcp_offset:7,
272 				rx_ctx_id:4,
273 				fcs_err:1,
274 				is_raw_frame:1,
275 				num_elements_in_list:8;
276 			uint32_t tcp_udp_chksum:16,
277 				 tcp_win:16;
278 			uint32_t flow_id;
279 			uint8_t flag_chfrag_start:1,
280 				flag_chfrag_cont:1,
281 				flag_chfrag_end:1,
282 				flag_retry:1,
283 				flag_da_mcbc:1,
284 				flag_da_valid:1,
285 				flag_sa_valid:1,
286 				flag_is_frag:1;
287 			union {
288 				uint8_t packet_state;
289 				uint8_t dp_trace:1,
290 					packet_track:3,
291 					rsrvd:4;
292 			} trace;
293 			uint16_t vdev_id:8,
294 				 tid_val:4,
295 				 ftype:4;
296 		} rx;
297 
298 		/* Note: MAX: 40 bytes */
299 		struct {
300 			union {
301 				struct {
302 					void *ext_cb_ptr;
303 					void *fctx;
304 				} priv_cb_w;
305 				struct {
306 					/* ipa_owned bit is common between rx
307 					 * control block and tx control block.
308 					 * Do not change location of this bit.
309 					 */
310 					struct {
311 						uint32_t owned:1,
312 							priv:31;
313 					} ipa;
314 					uint32_t data_attr;
315 					uint16_t desc_id;
316 					uint16_t mgmt_desc_id;
317 					struct {
318 						uint8_t bi_map:1,
319 							reserved:7;
320 					} dma_option;
321 					uint8_t flag_notify_comp:1,
322 						rsvd:7;
323 					uint8_t reserved[2];
324 				} priv_cb_m;
325 			} dev;
326 			uint8_t ftype;
327 			uint8_t vdev_id;
328 			uint16_t len;
329 			union {
330 				struct {
331 					uint8_t flag_efrag:1,
332 						flag_nbuf:1,
333 						num:1,
334 						flag_chfrag_start:1,
335 						flag_chfrag_cont:1,
336 						flag_chfrag_end:1,
337 						flag_ext_header:1,
338 						reserved:1;
339 				} bits;
340 				uint8_t u8;
341 			} flags;
342 			struct {
343 				uint8_t packet_state:7,
344 					is_packet_priv:1;
345 				uint8_t packet_track:3,
346 					to_fw:1,
347 					proto_type:4;
348 				uint8_t dp_trace:1,
349 					is_bcast:1,
350 					is_mcast:1,
351 					packet_type:3,
352 					/* used only for hl*/
353 					htt2_frm:1,
354 					print:1;
355 			} trace;
356 			unsigned char *vaddr;
357 			qdf_paddr_t paddr;
358 		} tx;
359 	} u;
360 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
361 
362 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
363 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
364 			(sizeof(struct qdf_nbuf_cb)) <=
365 			sizeof_field(struct sk_buff, cb));
366 #else
367 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
368 			(sizeof(struct qdf_nbuf_cb)) <=
369 			FIELD_SIZEOF(struct sk_buff, cb));
370 #endif
371 
372 /**
373  *  access macros to qdf_nbuf_cb
374  *  Note: These macros can be used as L-values as well as R-values.
375  *        When used as R-values, they effectively function as "get" macros
376  *        When used as L_values, they effectively function as "set" macros
377  */
378 
379 #define QDF_NBUF_CB_PADDR(skb) \
380 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
381 
382 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
383 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
384 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
385 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
386 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
387 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
388 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
389 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
390 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
391 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
392 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
393 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
394 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
395 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
396 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
397 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
398 
399 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
400 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
401 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
402 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
403 
404 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
405 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
406 
407 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
408 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
409 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
410 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
411 
412 #define QDF_NBUF_CB_RX_FTYPE(skb) \
413 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
414 
415 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
416 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
417 
418 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
419 	(((struct qdf_nbuf_cb *) \
420 	((skb)->cb))->u.rx.flag_chfrag_start)
421 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
422 	(((struct qdf_nbuf_cb *) \
423 	((skb)->cb))->u.rx.flag_chfrag_cont)
424 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
425 		(((struct qdf_nbuf_cb *) \
426 		((skb)->cb))->u.rx.flag_chfrag_end)
427 
428 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
429 	(((struct qdf_nbuf_cb *) \
430 	((skb)->cb))->u.rx.flag_da_mcbc)
431 
432 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
433 	(((struct qdf_nbuf_cb *) \
434 	((skb)->cb))->u.rx.flag_da_valid)
435 
436 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
437 	(((struct qdf_nbuf_cb *) \
438 	((skb)->cb))->u.rx.flag_sa_valid)
439 
440 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
441 	(((struct qdf_nbuf_cb *) \
442 	((skb)->cb))->u.rx.flag_retry)
443 
444 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
445 	(((struct qdf_nbuf_cb *) \
446 	((skb)->cb))->u.rx.is_raw_frame)
447 
448 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
449 	(((struct qdf_nbuf_cb *) \
450 	((skb)->cb))->u.rx.tid_val)
451 
452 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
453 	(((struct qdf_nbuf_cb *) \
454 	((skb)->cb))->u.rx.flag_is_frag)
455 
456 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 	((skb)->cb))->u.rx.fcs_err)
459 
460 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
461 	qdf_nbuf_set_state(skb, PACKET_STATE)
462 
463 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
464 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
465 
466 #define QDF_NBUF_CB_TX_FTYPE(skb) \
467 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
468 
469 
470 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
471 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
472 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
473 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
474 
475 /* Tx Flags Accessor Macros*/
476 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
477 	(((struct qdf_nbuf_cb *) \
478 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
479 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
480 	(((struct qdf_nbuf_cb *) \
481 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
482 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
483 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
484 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
485 	(((struct qdf_nbuf_cb *) \
486 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
487 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
488 	(((struct qdf_nbuf_cb *) \
489 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
490 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
491 		(((struct qdf_nbuf_cb *) \
492 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
493 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
494 		(((struct qdf_nbuf_cb *) \
495 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
496 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
497 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
498 /* End of Tx Flags Accessor Macros */
499 
500 /* Tx trace accessor macros */
501 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
502 	(((struct qdf_nbuf_cb *) \
503 		((skb)->cb))->u.tx.trace.packet_state)
504 
505 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
506 	(((struct qdf_nbuf_cb *) \
507 		((skb)->cb))->u.tx.trace.is_packet_priv)
508 
509 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
510 	(((struct qdf_nbuf_cb *) \
511 		((skb)->cb))->u.tx.trace.packet_track)
512 
513 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
514 	(((struct qdf_nbuf_cb *) \
515 		((skb)->cb))->u.tx.trace.to_fw)
516 
517 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
518 		(((struct qdf_nbuf_cb *) \
519 			((skb)->cb))->u.rx.trace.packet_track)
520 
521 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
522 	(((struct qdf_nbuf_cb *) \
523 		((skb)->cb))->u.tx.trace.proto_type)
524 
525 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
526 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
527 
528 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
529 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
530 
531 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
532 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
533 
534 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
535 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
536 
537 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
538 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
539 
540 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
541 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
542 
543 #define QDF_NBUF_CB_SET_BCAST(skb) \
544 	(((struct qdf_nbuf_cb *) \
545 		((skb)->cb))->u.tx.trace.is_bcast = true)
546 
547 #define QDF_NBUF_CB_SET_MCAST(skb) \
548 	(((struct qdf_nbuf_cb *) \
549 		((skb)->cb))->u.tx.trace.is_mcast = true)
550 /* End of Tx trace accessor macros */
551 
552 
553 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
554 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
555 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
556 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
557 
558 /* assume the OS provides a single fragment */
559 #define __qdf_nbuf_get_num_frags(skb)		   \
560 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
561 
562 #define __qdf_nbuf_reset_num_frags(skb) \
563 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
564 
565 /**
566  *   end of nbuf->cb access macros
567  */
568 
569 typedef void (*qdf_nbuf_trace_update_t)(char *);
570 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
571 
572 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
573 
574 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
575 	(QDF_NBUF_CB_PADDR(skb) = paddr)
576 
577 #define __qdf_nbuf_frag_push_head(					\
578 	skb, frag_len, frag_vaddr, frag_paddr)				\
579 	do {					\
580 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
581 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
582 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
583 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
584 	} while (0)
585 
586 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
587 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
588 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
589 
590 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
591 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
592 
593 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
594 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
595 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
596 	 /* assume that the OS only provides a single fragment */	\
597 	 QDF_NBUF_CB_PADDR(skb))
598 
599 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
600 
601 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
602 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
603 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
604 
605 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
606 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
607 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
608 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
609 
610 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
611 	do {								\
612 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
613 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
614 		if (frag_num)						\
615 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
616 							      is_wstrm; \
617 		else					\
618 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
619 							      is_wstrm; \
620 	} while (0)
621 
622 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
623 	do { \
624 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
625 	} while (0)
626 
627 #define __qdf_nbuf_get_vdev_ctx(skb) \
628 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
629 
630 #define __qdf_nbuf_set_tx_ftype(skb, type) \
631 	do { \
632 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
633 	} while (0)
634 
635 #define __qdf_nbuf_get_tx_ftype(skb) \
636 		 QDF_NBUF_CB_TX_FTYPE((skb))
637 
638 
639 #define __qdf_nbuf_set_rx_ftype(skb, type) \
640 	do { \
641 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
642 	} while (0)
643 
644 #define __qdf_nbuf_get_rx_ftype(skb) \
645 		 QDF_NBUF_CB_RX_FTYPE((skb))
646 
647 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
648 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
649 
650 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
651 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
652 
653 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
654 	do { \
655 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
656 	} while (0)
657 
658 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
659 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
660 
661 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
662 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
663 
664 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
665 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
666 
667 #define __qdf_nbuf_set_da_mcbc(skb, val) \
668 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
669 
670 #define __qdf_nbuf_is_da_mcbc(skb) \
671 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
672 
673 #define __qdf_nbuf_set_da_valid(skb, val) \
674 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
675 
676 #define __qdf_nbuf_is_da_valid(skb) \
677 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
678 
679 #define __qdf_nbuf_set_sa_valid(skb, val) \
680 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
681 
682 #define __qdf_nbuf_is_sa_valid(skb) \
683 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
684 
685 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
686 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
687 
688 #define __qdf_nbuf_is_rx_retry_flag(skb) \
689 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
690 
691 #define __qdf_nbuf_set_raw_frame(skb, val) \
692 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
693 
694 #define __qdf_nbuf_is_raw_frame(skb) \
695 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
696 
697 #define __qdf_nbuf_get_tid_val(skb) \
698 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
699 
700 #define __qdf_nbuf_set_tid_val(skb, val) \
701 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
702 
703 #define __qdf_nbuf_set_is_frag(skb, val) \
704 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
705 
706 #define __qdf_nbuf_is_frag(skb) \
707 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
708 
709 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
710 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
711 
712 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
713 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
714 
715 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
716 	do { \
717 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
718 	} while (0)
719 
720 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
721 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
722 
723 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
724 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
725 
726 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
727 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
728 
729 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
730 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
731 
732 #define __qdf_nbuf_trace_get_proto_type(skb) \
733 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
734 
735 #define __qdf_nbuf_data_attr_get(skb)		\
736 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
737 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
738 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
739 
740 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
741 		skb_queue_walk_safe(queue, var, tvar)
742 
743 /**
744  * __qdf_nbuf_num_frags_init() - init extra frags
745  * @skb: sk buffer
746  *
747  * Return: none
748  */
749 static inline
750 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
751 {
752 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
753 }
754 
755 /*
756  * prototypes. Implemented in qdf_nbuf.c
757  */
758 
759 /**
760  * __qdf_nbuf_alloc() - Allocate nbuf
761  * @osdev: Device handle
762  * @size: Netbuf requested size
763  * @reserve: headroom to start with
764  * @align: Align
765  * @prio: Priority
766  * @func: Function name of the call site
767  * @line: line number of the call site
768  *
769  * This allocates an nbuf aligns if needed and reserves some space in the front,
770  * since the reserve is done after alignment the reserve value if being
771  * unaligned will result in an unaligned address.
772  *
773  * Return: nbuf or %NULL if no memory
774  */
775 __qdf_nbuf_t
776 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
777 		 int prio, const char *func, uint32_t line);
778 
779 /**
780  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
781  * @size: Size to be allocated for skb
782  * @reserve: Reserve headroom size
783  * @align: Align data
784  * @func: Function name of the call site
785  * @line: Line number of the callsite
786  *
787  * This API allocates a nbuf and aligns it if needed and reserves some headroom
788  * space after the alignment where nbuf is not allocated from skb recycler pool.
789  *
790  * Return: Allocated nbuf pointer
791  */
792 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
793 					  const char *func, uint32_t line);
794 
795 /**
796  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
797  * @skb: Pointer to network buffer
798  *
799  * if GFP_ATOMIC is overkill then we can check whether its
800  * called from interrupt context and then do it or else in
801  * normal case use GFP_KERNEL
802  *
803  * example     use "in_irq() || irqs_disabled()"
804  *
805  * Return: cloned skb
806  */
807 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
808 
809 void __qdf_nbuf_free(struct sk_buff *skb);
810 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
811 			struct sk_buff *skb, qdf_dma_dir_t dir);
812 void __qdf_nbuf_unmap(__qdf_device_t osdev,
813 			struct sk_buff *skb, qdf_dma_dir_t dir);
814 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
815 				 struct sk_buff *skb, qdf_dma_dir_t dir);
816 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
817 			struct sk_buff *skb, qdf_dma_dir_t dir);
818 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
819 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
820 
821 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
822 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
823 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
824 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
825 	qdf_dma_dir_t dir, int nbytes);
826 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
827 	qdf_dma_dir_t dir, int nbytes);
828 
829 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
830 	qdf_dma_dir_t dir);
831 
832 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
833 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
834 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
835 QDF_STATUS __qdf_nbuf_frag_map(
836 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
837 	int offset, qdf_dma_dir_t dir, int cur_frag);
838 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
839 
840 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
841 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
842 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
843 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
844 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
845 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
846 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
847 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
848 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
849 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
850 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
851 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
852 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
853 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
854 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
855 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
856 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
857 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
858 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
859 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
860 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
861 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
862 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
863 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
864 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
865 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
866 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
867 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
868 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
869 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
870 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
871 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
872 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
873 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
874 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
875 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
876 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
877 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
878 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
879 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
880 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
881 
882 #ifdef QDF_NBUF_GLOBAL_COUNT
883 int __qdf_nbuf_count_get(void);
884 void __qdf_nbuf_count_inc(struct sk_buff *skb);
885 void __qdf_nbuf_count_dec(struct sk_buff *skb);
886 void __qdf_nbuf_mod_init(void);
887 void __qdf_nbuf_mod_exit(void);
888 
889 #else
890 
891 static inline int __qdf_nbuf_count_get(void)
892 {
893 	return 0;
894 }
895 
896 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
897 {
898 	return;
899 }
900 
901 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
902 {
903 	return;
904 }
905 
906 static inline void __qdf_nbuf_mod_init(void)
907 {
908 	return;
909 }
910 
911 static inline void __qdf_nbuf_mod_exit(void)
912 {
913 	return;
914 }
915 #endif
916 
917 /**
918  * __qdf_to_status() - OS to QDF status conversion
919  * @error : OS error
920  *
921  * Return: QDF status
922  */
923 static inline QDF_STATUS __qdf_to_status(signed int error)
924 {
925 	switch (error) {
926 	case 0:
927 		return QDF_STATUS_SUCCESS;
928 	case ENOMEM:
929 	case -ENOMEM:
930 		return QDF_STATUS_E_NOMEM;
931 	default:
932 		return QDF_STATUS_E_NOSUPPORT;
933 	}
934 }
935 
936 /**
937  * __qdf_nbuf_len() - return the amount of valid data in the skb
938  * @skb: Pointer to network buffer
939  *
940  * This API returns the amount of valid data in the skb, If there are frags
941  * then it returns total length.
942  *
943  * Return: network buffer length
944  */
945 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
946 {
947 	int i, extra_frag_len = 0;
948 
949 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
950 	if (i > 0)
951 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
952 
953 	return extra_frag_len + skb->len;
954 }
955 
956 /**
957  * __qdf_nbuf_cat() - link two nbufs
958  * @dst: Buffer to piggyback into
959  * @src: Buffer to put
960  *
961  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
962  * It is callers responsibility to free the src skb.
963  *
964  * Return: QDF_STATUS (status of the call) if failed the src skb
965  *         is released
966  */
967 static inline QDF_STATUS
968 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
969 {
970 	QDF_STATUS error = 0;
971 
972 	qdf_assert(dst && src);
973 
974 	/*
975 	 * Since pskb_expand_head unconditionally reallocates the skb->head
976 	 * buffer, first check whether the current buffer is already large
977 	 * enough.
978 	 */
979 	if (skb_tailroom(dst) < src->len) {
980 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
981 		if (error)
982 			return __qdf_to_status(error);
983 	}
984 
985 	memcpy(skb_tail_pointer(dst), src->data, src->len);
986 	skb_put(dst, src->len);
987 	return __qdf_to_status(error);
988 }
989 
990 /*
991  * nbuf manipulation routines
992  */
993 /**
994  * __qdf_nbuf_headroom() - return the amount of tail space available
995  * @buf: Pointer to network buffer
996  *
997  * Return: amount of tail room
998  */
999 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1000 {
1001 	return skb_headroom(skb);
1002 }
1003 
1004 /**
1005  * __qdf_nbuf_tailroom() - return the amount of tail space available
1006  * @buf: Pointer to network buffer
1007  *
1008  * Return: amount of tail room
1009  */
1010 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1011 {
1012 	return skb_tailroom(skb);
1013 }
1014 
1015 /**
1016  * __qdf_nbuf_put_tail() - Puts data in the end
1017  * @skb: Pointer to network buffer
1018  * @size: size to be pushed
1019  *
1020  * Return: data pointer of this buf where new data has to be
1021  *         put, or NULL if there is not enough room in this buf.
1022  */
1023 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1024 {
1025 	if (skb_tailroom(skb) < size) {
1026 		if (unlikely(pskb_expand_head(skb, 0,
1027 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1028 			dev_kfree_skb_any(skb);
1029 			return NULL;
1030 		}
1031 	}
1032 	return skb_put(skb, size);
1033 }
1034 
1035 /**
1036  * __qdf_nbuf_trim_tail() - trim data out from the end
1037  * @skb: Pointer to network buffer
1038  * @size: size to be popped
1039  *
1040  * Return: none
1041  */
1042 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1043 {
1044 	return skb_trim(skb, skb->len - size);
1045 }
1046 
1047 
1048 /*
1049  * prototypes. Implemented in qdf_nbuf.c
1050  */
1051 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1052 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1053 				qdf_nbuf_rx_cksum_t *cksum);
1054 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1055 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1056 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1057 void __qdf_nbuf_ref(struct sk_buff *skb);
1058 int __qdf_nbuf_shared(struct sk_buff *skb);
1059 
1060 /**
1061  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1062  * @skb: sk buff
1063  *
1064  * Return: number of fragments
1065  */
1066 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1067 {
1068 	return skb_shinfo(skb)->nr_frags;
1069 }
1070 
1071 /*
1072  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1073  */
1074 #define __qdf_nbuf_pool_delete(osdev)
1075 
1076 /**
1077  * __qdf_nbuf_copy() - returns a private copy of the skb
1078  * @skb: Pointer to network buffer
1079  *
1080  * This API returns a private copy of the skb, the skb returned is completely
1081  *  modifiable by callers
1082  *
1083  * Return: skb or NULL
1084  */
1085 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1086 {
1087 	struct sk_buff *skb_new = NULL;
1088 
1089 	skb_new = skb_copy(skb, GFP_ATOMIC);
1090 	if (skb_new) {
1091 		__qdf_nbuf_count_inc(skb_new);
1092 	}
1093 	return skb_new;
1094 }
1095 
1096 #define __qdf_nbuf_reserve      skb_reserve
1097 
1098 /**
1099  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1100  * @skb: Pointer to network buffer
1101  * @data: data pointer
1102  *
1103  * Return: none
1104  */
1105 static inline void
1106 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1107 {
1108 	skb->data = data;
1109 }
1110 
1111 /**
1112  * __qdf_nbuf_set_len() - set buffer data length
1113  * @skb: Pointer to network buffer
1114  * @len: data length
1115  *
1116  * Return: none
1117  */
1118 static inline void
1119 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1120 {
1121 	skb->len = len;
1122 }
1123 
1124 /**
1125  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1126  * @skb: Pointer to network buffer
1127  * @len: skb data length
1128  *
1129  * Return: none
1130  */
1131 static inline void
1132 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1133 {
1134 	skb_set_tail_pointer(skb, len);
1135 }
1136 
1137 /**
1138  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1139  * @skb: Pointer to network buffer
1140  * @list: list to use
1141  *
1142  * This is a lockless version, driver must acquire locks if it
1143  * needs to synchronize
1144  *
1145  * Return: none
1146  */
1147 static inline void
1148 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1149 {
1150 	__skb_unlink(skb, list);
1151 }
1152 
1153 /**
1154  * __qdf_nbuf_reset() - reset the buffer data and pointer
1155  * @buf: Network buf instance
1156  * @reserve: reserve
1157  * @align: align
1158  *
1159  * Return: none
1160  */
1161 static inline void
1162 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1163 {
1164 	int offset;
1165 
1166 	skb_push(skb, skb_headroom(skb));
1167 	skb_put(skb, skb_tailroom(skb));
1168 	memset(skb->data, 0x0, skb->len);
1169 	skb_trim(skb, 0);
1170 	skb_reserve(skb, NET_SKB_PAD);
1171 	memset(skb->cb, 0x0, sizeof(skb->cb));
1172 
1173 	/*
1174 	 * The default is for netbuf fragments to be interpreted
1175 	 * as wordstreams rather than bytestreams.
1176 	 */
1177 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1178 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1179 
1180 	/*
1181 	 * Align & make sure that the tail & data are adjusted properly
1182 	 */
1183 
1184 	if (align) {
1185 		offset = ((unsigned long)skb->data) % align;
1186 		if (offset)
1187 			skb_reserve(skb, align - offset);
1188 	}
1189 
1190 	skb_reserve(skb, reserve);
1191 }
1192 
1193 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1194 /**
1195  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1196  *                                       in kernel
1197  *
1198  * Return: true if dev_scratch is supported
1199  *         false if dev_scratch is not supported
1200  */
1201 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1202 {
1203 	return true;
1204 }
1205 
1206 /**
1207  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1208  * @skb: Pointer to network buffer
1209  *
1210  * Return: dev_scratch if dev_scratch supported
1211  *         0 if dev_scratch not supported
1212  */
1213 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1214 {
1215 	return skb->dev_scratch;
1216 }
1217 
1218 /**
1219  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1220  * @skb: Pointer to network buffer
1221  * @value: value to be set in dev_scratch of network buffer
1222  *
1223  * Return: void
1224  */
1225 static inline void
1226 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1227 {
1228 	skb->dev_scratch = value;
1229 }
1230 #else
1231 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1232 {
1233 	return false;
1234 }
1235 
1236 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1237 {
1238 	return 0;
1239 }
1240 
1241 static inline void
1242 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1243 {
1244 }
1245 #endif /* KERNEL_VERSION(4, 14, 0) */
1246 
1247 /**
1248  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1249  * @skb: Pointer to network buffer
1250  *
1251  * Return: Pointer to head buffer
1252  */
1253 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1254 {
1255 	return skb->head;
1256 }
1257 
1258 /**
1259  * __qdf_nbuf_data() - return the pointer to data header in the skb
1260  * @skb: Pointer to network buffer
1261  *
1262  * Return: Pointer to skb data
1263  */
1264 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1265 {
1266 	return skb->data;
1267 }
1268 
1269 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1270 {
1271 	return (uint8_t *)&skb->data;
1272 }
1273 
1274 /**
1275  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1276  * @skb: Pointer to network buffer
1277  *
1278  * Return: skb protocol
1279  */
1280 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1281 {
1282 	return skb->protocol;
1283 }
1284 
1285 /**
1286  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1287  * @skb: Pointer to network buffer
1288  *
1289  * Return: skb ip_summed
1290  */
1291 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1292 {
1293 	return skb->ip_summed;
1294 }
1295 
1296 /**
1297  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1298  * @skb: Pointer to network buffer
1299  * @ip_summed: ip checksum
1300  *
1301  * Return: none
1302  */
1303 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1304 		 uint8_t ip_summed)
1305 {
1306 	skb->ip_summed = ip_summed;
1307 }
1308 
1309 /**
1310  * __qdf_nbuf_get_priority() - return the priority value of the skb
1311  * @skb: Pointer to network buffer
1312  *
1313  * Return: skb priority
1314  */
1315 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1316 {
1317 	return skb->priority;
1318 }
1319 
1320 /**
1321  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1322  * @skb: Pointer to network buffer
1323  * @p: priority
1324  *
1325  * Return: none
1326  */
1327 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1328 {
1329 	skb->priority = p;
1330 }
1331 
1332 /**
1333  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1334  * @skb: Current skb
1335  * @next_skb: Next skb
1336  *
1337  * Return: void
1338  */
1339 static inline void
1340 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1341 {
1342 	skb->next = skb_next;
1343 }
1344 
1345 /**
1346  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1347  * @skb: Current skb
1348  *
1349  * Return: the next skb pointed to by the current skb
1350  */
1351 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1352 {
1353 	return skb->next;
1354 }
1355 
1356 /**
1357  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1358  * @skb: Current skb
1359  * @next_skb: Next skb
1360  *
1361  * This fn is used to link up extensions to the head skb. Does not handle
1362  * linking to the head
1363  *
1364  * Return: none
1365  */
1366 static inline void
1367 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1368 {
1369 	skb->next = skb_next;
1370 }
1371 
1372 /**
1373  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1374  * @skb: Current skb
1375  *
1376  * Return: the next skb pointed to by the current skb
1377  */
1378 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1379 {
1380 	return skb->next;
1381 }
1382 
1383 /**
1384  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1385  * @skb_head: head_buf nbuf holding head segment (single)
1386  * @ext_list: nbuf list holding linked extensions to the head
1387  * @ext_len: Total length of all buffers in the extension list
1388  *
1389  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1390  * to the nbuf holding the head segment (seg0)
1391  *
1392  * Return: none
1393  */
1394 static inline void
1395 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1396 			struct sk_buff *ext_list, size_t ext_len)
1397 {
1398 	skb_shinfo(skb_head)->frag_list = ext_list;
1399 	skb_head->data_len += ext_len;
1400 	skb_head->len += ext_len;
1401 }
1402 
1403 /**
1404  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1405  * @head_buf: Network buf holding head segment (single)
1406  *
1407  * This ext_list is populated when we have Jumbo packet, for example in case of
1408  * monitor mode amsdu packet reception, and are stiched using frags_list.
1409  *
1410  * Return: Network buf list holding linked extensions from head buf.
1411  */
1412 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1413 {
1414 	return (skb_shinfo(head_buf)->frag_list);
1415 }
1416 
1417 /**
1418  * __qdf_nbuf_get_age() - return the checksum value of the skb
1419  * @skb: Pointer to network buffer
1420  *
1421  * Return: checksum value
1422  */
1423 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1424 {
1425 	return skb->csum;
1426 }
1427 
1428 /**
1429  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1430  * @skb: Pointer to network buffer
1431  * @v: Value
1432  *
1433  * Return: none
1434  */
1435 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1436 {
1437 	skb->csum = v;
1438 }
1439 
1440 /**
1441  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1442  * @skb: Pointer to network buffer
1443  * @adj: Adjustment value
1444  *
1445  * Return: none
1446  */
1447 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1448 {
1449 	skb->csum -= adj;
1450 }
1451 
1452 /**
1453  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1454  * @skb: Pointer to network buffer
1455  * @offset: Offset value
1456  * @len: Length
1457  * @to: Destination pointer
1458  *
1459  * Return: length of the copy bits for skb
1460  */
1461 static inline int32_t
1462 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1463 {
1464 	return skb_copy_bits(skb, offset, to, len);
1465 }
1466 
1467 /**
1468  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1469  * @skb: Pointer to network buffer
1470  * @len:  Packet length
1471  *
1472  * Return: none
1473  */
1474 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1475 {
1476 	if (skb->len > len) {
1477 		skb_trim(skb, len);
1478 	} else {
1479 		if (skb_tailroom(skb) < len - skb->len) {
1480 			if (unlikely(pskb_expand_head(skb, 0,
1481 				len - skb->len - skb_tailroom(skb),
1482 				GFP_ATOMIC))) {
1483 				QDF_DEBUG_PANIC(
1484 				   "SKB tailroom is lessthan requested length."
1485 				   " tail-room: %u, len: %u, skb->len: %u",
1486 				   skb_tailroom(skb), len, skb->len);
1487 				dev_kfree_skb_any(skb);
1488 			}
1489 		}
1490 		skb_put(skb, (len - skb->len));
1491 	}
1492 }
1493 
1494 /**
1495  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1496  * @skb: Pointer to network buffer
1497  * @protocol: Protocol type
1498  *
1499  * Return: none
1500  */
1501 static inline void
1502 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1503 {
1504 	skb->protocol = protocol;
1505 }
1506 
1507 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1508 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1509 
1510 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1511 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1512 
1513 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1514 				      uint32_t *lo, uint32_t *hi);
1515 
1516 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1517 	struct qdf_tso_info_t *tso_info);
1518 
1519 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1520 			  struct qdf_tso_seg_elem_t *tso_seg,
1521 			  bool is_last_seg);
1522 
1523 #ifdef FEATURE_TSO
1524 /**
1525  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1526  *                                    payload len
1527  * @skb: buffer
1528  *
1529  * Return: size
1530  */
1531 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1532 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1533 
1534 #else
1535 static inline
1536 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1537 {
1538 	return 0;
1539 }
1540 
1541 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1542 {
1543 	return 0;
1544 }
1545 
1546 #endif /* FEATURE_TSO */
1547 
1548 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1549 {
1550 	if (skb_is_gso(skb) &&
1551 		(skb_is_gso_v6(skb) ||
1552 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1553 		return true;
1554 	else
1555 		return false;
1556 }
1557 
1558 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1559 
1560 int __qdf_nbuf_get_users(struct sk_buff *skb);
1561 
1562 /**
1563  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1564  *			      and get hw_classify by peeking
1565  *			      into packet
1566  * @nbuf:		Network buffer (skb on Linux)
1567  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1568  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1569  *			needs to be set in case of CE classification support
1570  *			Is set by this macro.
1571  * @hw_classify:	This is a flag which is set to indicate
1572  *			CE classification is enabled.
1573  *			Do not set this bit for VLAN packets
1574  *			OR for mcast / bcast frames.
1575  *
1576  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1577  * whether to enable tx_classify bit in CE.
1578  *
1579  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1580  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1581  * it is the length and a 802.3 frame else it is Ethernet Type II
1582  * (RFC 894).
1583  * Bit 4 in pkt_subtype is the tx_classify bit
1584  *
1585  * Return:	void
1586  */
1587 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1588 				pkt_subtype, hw_classify)	\
1589 do {								\
1590 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1591 	uint16_t ether_type = ntohs(eh->h_proto);		\
1592 	bool is_mc_bc;						\
1593 								\
1594 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1595 		   is_multicast_ether_addr((uint8_t *)eh);	\
1596 								\
1597 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1598 		hw_classify = 1;				\
1599 		pkt_subtype = 0x01 <<				\
1600 			HTT_TX_CLASSIFY_BIT_S;			\
1601 	}							\
1602 								\
1603 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1604 		pkt_type = htt_pkt_type_ethernet;		\
1605 								\
1606 } while (0)
1607 
1608 /**
1609  * nbuf private buffer routines
1610  */
1611 
1612 /**
1613  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1614  * @skb: Pointer to network buffer
1615  * @addr: Pointer to store header's addr
1616  * @m_len: network buffer length
1617  *
1618  * Return: none
1619  */
1620 static inline void
1621 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1622 {
1623 	*addr = skb->data;
1624 	*len = skb->len;
1625 }
1626 
1627 /**
1628  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1629  * @head: Head pointer
1630  * @tail: Tail pointer
1631  * @qlen: Queue length
1632  */
1633 typedef struct __qdf_nbuf_qhead {
1634 	struct sk_buff *head;
1635 	struct sk_buff *tail;
1636 	unsigned int qlen;
1637 } __qdf_nbuf_queue_t;
1638 
1639 /******************Functions *************/
1640 
1641 /**
1642  * __qdf_nbuf_queue_init() - initiallize the queue head
1643  * @qhead: Queue head
1644  *
1645  * Return: QDF status
1646  */
1647 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1648 {
1649 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1650 	return QDF_STATUS_SUCCESS;
1651 }
1652 
1653 /**
1654  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1655  * @qhead: Queue head
1656  * @skb: Pointer to network buffer
1657  *
1658  * This is a lockless version, driver must acquire locks if it
1659  * needs to synchronize
1660  *
1661  * Return: none
1662  */
1663 static inline void
1664 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1665 {
1666 	skb->next = NULL;       /*Nullify the next ptr */
1667 
1668 	if (!qhead->head)
1669 		qhead->head = skb;
1670 	else
1671 		qhead->tail->next = skb;
1672 
1673 	qhead->tail = skb;
1674 	qhead->qlen++;
1675 }
1676 
1677 /**
1678  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1679  * @dest: target netbuf queue
1680  * @src:  source netbuf queue
1681  *
1682  * Return: target netbuf queue
1683  */
1684 static inline __qdf_nbuf_queue_t *
1685 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1686 {
1687 	if (!dest)
1688 		return NULL;
1689 	else if (!src || !(src->head))
1690 		return dest;
1691 
1692 	if (!(dest->head))
1693 		dest->head = src->head;
1694 	else
1695 		dest->tail->next = src->head;
1696 
1697 	dest->tail = src->tail;
1698 	dest->qlen += src->qlen;
1699 	return dest;
1700 }
1701 
1702 /**
1703  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1704  * @qhead: Queue head
1705  * @skb: Pointer to network buffer
1706  *
1707  * This is a lockless version, driver must acquire locks if it needs to
1708  * synchronize
1709  *
1710  * Return: none
1711  */
1712 static inline void
1713 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1714 {
1715 	if (!qhead->head) {
1716 		/*Empty queue Tail pointer Must be updated */
1717 		qhead->tail = skb;
1718 	}
1719 	skb->next = qhead->head;
1720 	qhead->head = skb;
1721 	qhead->qlen++;
1722 }
1723 
1724 /**
1725  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1726  * @qhead: Queue head
1727  *
1728  * This is a lockless version. Driver should take care of the locks
1729  *
1730  * Return: skb or NULL
1731  */
1732 static inline
1733 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1734 {
1735 	__qdf_nbuf_t tmp = NULL;
1736 
1737 	if (qhead->head) {
1738 		qhead->qlen--;
1739 		tmp = qhead->head;
1740 		if (qhead->head == qhead->tail) {
1741 			qhead->head = NULL;
1742 			qhead->tail = NULL;
1743 		} else {
1744 			qhead->head = tmp->next;
1745 		}
1746 		tmp->next = NULL;
1747 	}
1748 	return tmp;
1749 }
1750 
1751 /**
1752  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1753  * @qhead: head of queue
1754  *
1755  * Return: NULL if the queue is empty
1756  */
1757 static inline struct sk_buff *
1758 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1759 {
1760 	return qhead->head;
1761 }
1762 
1763 /**
1764  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1765  * @qhead: head of queue
1766  *
1767  * Return: NULL if the queue is empty
1768  */
1769 static inline struct sk_buff *
1770 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1771 {
1772 	return qhead->tail;
1773 }
1774 
1775 /**
1776  * __qdf_nbuf_queue_len() - return the queue length
1777  * @qhead: Queue head
1778  *
1779  * Return: Queue length
1780  */
1781 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1782 {
1783 	return qhead->qlen;
1784 }
1785 
1786 /**
1787  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1788  * @skb: Pointer to network buffer
1789  *
1790  * This API returns the next skb from packet chain, remember the skb is
1791  * still in the queue
1792  *
1793  * Return: NULL if no packets are there
1794  */
1795 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1796 {
1797 	return skb->next;
1798 }
1799 
1800 /**
1801  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1802  * @qhead: Queue head
1803  *
1804  * Return: true if length is 0 else false
1805  */
1806 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1807 {
1808 	return qhead->qlen == 0;
1809 }
1810 
1811 /*
1812  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1813  * Because the queue head will most likely put in some structure,
1814  * we don't use pointer type as the definition.
1815  */
1816 
1817 /*
1818  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1819  * Because the queue head will most likely put in some structure,
1820  * we don't use pointer type as the definition.
1821  */
1822 
1823 static inline void
1824 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1825 {
1826 }
1827 
1828 /**
1829  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1830  *        expands the headroom
1831  *        in the data region. In case of failure the skb is released.
1832  * @skb: sk buff
1833  * @headroom: size of headroom
1834  *
1835  * Return: skb or NULL
1836  */
1837 static inline struct sk_buff *
1838 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1839 {
1840 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1841 		dev_kfree_skb_any(skb);
1842 		skb = NULL;
1843 	}
1844 	return skb;
1845 }
1846 
1847 /**
1848  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1849  *        exapnds the tailroom
1850  *        in data region. In case of failure it releases the skb.
1851  * @skb: sk buff
1852  * @tailroom: size of tailroom
1853  *
1854  * Return: skb or NULL
1855  */
1856 static inline struct sk_buff *
1857 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1858 {
1859 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1860 		return skb;
1861 	/**
1862 	 * unlikely path
1863 	 */
1864 	dev_kfree_skb_any(skb);
1865 	return NULL;
1866 }
1867 
1868 /**
1869  * __qdf_nbuf_linearize() - skb linearize
1870  * @skb: sk buff
1871  *
1872  * create a version of the specified nbuf whose contents
1873  * can be safely modified without affecting other
1874  * users.If the nbuf is non-linear then this function
1875  * linearize. if unable to linearize returns -ENOMEM on
1876  * success 0 is returned
1877  *
1878  * Return: 0 on Success, -ENOMEM on failure is returned.
1879  */
1880 static inline int
1881 __qdf_nbuf_linearize(struct sk_buff *skb)
1882 {
1883 	return skb_linearize(skb);
1884 }
1885 
1886 /**
1887  * __qdf_nbuf_unshare() - skb unshare
1888  * @skb: sk buff
1889  *
1890  * create a version of the specified nbuf whose contents
1891  * can be safely modified without affecting other
1892  * users.If the nbuf is a clone then this function
1893  * creates a new copy of the data. If the buffer is not
1894  * a clone the original buffer is returned.
1895  *
1896  * Return: skb or NULL
1897  */
1898 static inline struct sk_buff *
1899 __qdf_nbuf_unshare(struct sk_buff *skb)
1900 {
1901 	struct sk_buff *skb_new;
1902 
1903 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
1904 
1905 	skb_new = skb_unshare(skb, GFP_ATOMIC);
1906 	if (skb_new)
1907 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
1908 
1909 	return skb_new;
1910 }
1911 
1912 /**
1913  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1914  *@buf: sk buff
1915  *
1916  * Return: true/false
1917  */
1918 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1919 {
1920 	return skb_cloned(skb);
1921 }
1922 
1923 /**
1924  * __qdf_nbuf_pool_init() - init pool
1925  * @net: net handle
1926  *
1927  * Return: QDF status
1928  */
1929 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1930 {
1931 	return QDF_STATUS_SUCCESS;
1932 }
1933 
1934 /*
1935  * adf_nbuf_pool_delete() implementation - do nothing in linux
1936  */
1937 #define __qdf_nbuf_pool_delete(osdev)
1938 
1939 /**
1940  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1941  *        release the skb.
1942  * @skb: sk buff
1943  * @headroom: size of headroom
1944  * @tailroom: size of tailroom
1945  *
1946  * Return: skb or NULL
1947  */
1948 static inline struct sk_buff *
1949 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1950 {
1951 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1952 		return skb;
1953 
1954 	dev_kfree_skb_any(skb);
1955 	return NULL;
1956 }
1957 
1958 /**
1959  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1960  * @buf: Network buf instance
1961  * @headroom: Additional headroom to be added
1962  * @tailroom: Additional tailroom to be added
1963  *
1964  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1965  *	or NULL if there is no memory
1966  */
1967 static inline struct sk_buff *
1968 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1969 {
1970 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1971 }
1972 
1973 /**
1974  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
1975  * @buf: Network buf instance
1976  *
1977  * Return: void
1978  */
1979 static inline void
1980 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
1981 {
1982 	struct sk_buff *list;
1983 
1984 	skb_walk_frags(buf, list)
1985 		skb_get(list);
1986 }
1987 
1988 /**
1989  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1990  *
1991  * Return: true/false
1992  */
1993 static inline bool
1994 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1995 			 uint8_t **where)
1996 {
1997 	qdf_assert(0);
1998 	return false;
1999 }
2000 
2001 /**
2002  * __qdf_nbuf_reset_ctxt() - mem zero control block
2003  * @nbuf: buffer
2004  *
2005  * Return: none
2006  */
2007 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2008 {
2009 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2010 }
2011 
2012 /**
2013  * __qdf_nbuf_network_header() - get network header
2014  * @buf: buffer
2015  *
2016  * Return: network header pointer
2017  */
2018 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2019 {
2020 	return skb_network_header(buf);
2021 }
2022 
2023 /**
2024  * __qdf_nbuf_transport_header() - get transport header
2025  * @buf: buffer
2026  *
2027  * Return: transport header pointer
2028  */
2029 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2030 {
2031 	return skb_transport_header(buf);
2032 }
2033 
2034 /**
2035  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2036  *  passed as part of network buffer by network stack
2037  * @skb: sk buff
2038  *
2039  * Return: TCP MSS size
2040  *
2041  */
2042 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2043 {
2044 	return skb_shinfo(skb)->gso_size;
2045 }
2046 
2047 /**
2048  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2049  * @nbuf: sk buff
2050  *
2051  * Return: none
2052  */
2053 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2054 
2055 /*
2056  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2057  * @nbuf: sk buff
2058  *
2059  * Return: void ptr
2060  */
2061 static inline void *
2062 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2063 {
2064 	return (void *)nbuf->cb;
2065 }
2066 
2067 /**
2068  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2069  * @skb: sk buff
2070  *
2071  * Return: head size
2072  */
2073 static inline size_t
2074 __qdf_nbuf_headlen(struct sk_buff *skb)
2075 {
2076 	return skb_headlen(skb);
2077 }
2078 
2079 /**
2080  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2081  * @buf: sk buff
2082  *
2083  * Return: true/false
2084  */
2085 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2086 {
2087 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2088 }
2089 
2090 /**
2091  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2092  * @buf: sk buff
2093  *
2094  * Return: true/false
2095  */
2096 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2097 {
2098 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2099 }
2100 
2101 /**
2102  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2103  * @skb: sk buff
2104  *
2105  * Return: size of l2+l3+l4 header length
2106  */
2107 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2108 {
2109 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2110 }
2111 
2112 /**
2113  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2114  * @buf: sk buff
2115  *
2116  * Return:  true/false
2117  */
2118 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2119 {
2120 	if (skb_is_nonlinear(skb))
2121 		return true;
2122 	else
2123 		return false;
2124 }
2125 
2126 /**
2127  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2128  * @buf: sk buff
2129  *
2130  * Return: TCP sequence number
2131  */
2132 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2133 {
2134 	return ntohl(tcp_hdr(skb)->seq);
2135 }
2136 
2137 /**
2138  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2139  *@buf: sk buff
2140  *
2141  * Return: data pointer to typecast into your priv structure
2142  */
2143 static inline uint8_t *
2144 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2145 {
2146 	return &skb->cb[8];
2147 }
2148 
2149 /**
2150  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2151  * @buf: Pointer to nbuf
2152  *
2153  * Return: None
2154  */
2155 static inline void
2156 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2157 {
2158 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2159 }
2160 
2161 /**
2162  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2163  *
2164  * @buf: sk buff
2165  * @queue_id: Queue id
2166  *
2167  * Return: void
2168  */
2169 static inline void
2170 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2171 {
2172 	skb_record_rx_queue(skb, queue_id);
2173 }
2174 
2175 /**
2176  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2177  *
2178  * @buf: sk buff
2179  *
2180  * Return: Queue mapping
2181  */
2182 static inline uint16_t
2183 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2184 {
2185 	return skb->queue_mapping;
2186 }
2187 
2188 /**
2189  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2190  *
2191  * @buf: sk buff
2192  *
2193  * Return: void
2194  */
2195 static inline void
2196 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2197 {
2198 	__net_timestamp(skb);
2199 }
2200 
2201 /**
2202  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2203  *
2204  * @buf: sk buff
2205  *
2206  * Return: timestamp stored in skb in ms
2207  */
2208 static inline uint64_t
2209 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2210 {
2211 	return ktime_to_ms(skb_get_ktime(skb));
2212 }
2213 
2214 /**
2215  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2216  *
2217  * @buf: sk buff
2218  *
2219  * Return: time difference in ms
2220  */
2221 static inline uint64_t
2222 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2223 {
2224 	return ktime_to_ms(net_timedelta(skb->tstamp));
2225 }
2226 
2227 /**
2228  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2229  *
2230  * @buf: sk buff
2231  *
2232  * Return: time difference in micro seconds
2233  */
2234 static inline uint64_t
2235 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2236 {
2237 	return ktime_to_us(net_timedelta(skb->tstamp));
2238 }
2239 
2240 /**
2241  * __qdf_nbuf_orphan() - orphan a nbuf
2242  * @skb: sk buff
2243  *
2244  * If a buffer currently has an owner then we call the
2245  * owner's destructor function
2246  *
2247  * Return: void
2248  */
2249 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2250 {
2251 	return skb_orphan(skb);
2252 }
2253 
2254 /**
2255  * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2256  * head pointer to end pointer
2257  * @nbuf: qdf_nbuf_t
2258  *
2259  * Return: size of network buffer from head pointer to end
2260  * pointer
2261  */
2262 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2263 {
2264 	return skb_end_offset(nbuf);
2265 }
2266 
2267 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2268 /**
2269  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2270  * from the total skb mem and DP tx/rx skb mem
2271  * @nbytes: number of bytes
2272  * @dir: direction
2273  * @is_mapped: is mapped or unmapped memory
2274  *
2275  * Return: none
2276  */
2277 static inline void __qdf_record_nbuf_nbytes(
2278 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2279 {
2280 	if (is_mapped) {
2281 		if (dir == QDF_DMA_TO_DEVICE) {
2282 			qdf_mem_dp_tx_skb_cnt_inc();
2283 			qdf_mem_dp_tx_skb_inc(nbytes);
2284 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2285 			qdf_mem_dp_rx_skb_cnt_inc();
2286 			qdf_mem_dp_rx_skb_inc(nbytes);
2287 		}
2288 		qdf_mem_skb_total_inc(nbytes);
2289 	} else {
2290 		if (dir == QDF_DMA_TO_DEVICE) {
2291 			qdf_mem_dp_tx_skb_cnt_dec();
2292 			qdf_mem_dp_tx_skb_dec(nbytes);
2293 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2294 			qdf_mem_dp_rx_skb_cnt_dec();
2295 			qdf_mem_dp_rx_skb_dec(nbytes);
2296 		}
2297 		qdf_mem_skb_total_dec(nbytes);
2298 	}
2299 }
2300 
2301 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2302 static inline void __qdf_record_nbuf_nbytes(
2303 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2304 {
2305 }
2306 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2307 
2308 /**
2309  * __qdf_nbuf_map_nbytes_single() - map nbytes
2310  * @osdev: os device
2311  * @buf: buffer
2312  * @dir: direction
2313  * @nbytes: number of bytes
2314  *
2315  * Return: QDF_STATUS
2316  */
2317 #ifdef A_SIMOS_DEVHOST
2318 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2319 		qdf_device_t osdev, struct sk_buff *buf,
2320 		qdf_dma_dir_t dir, int nbytes)
2321 {
2322 	qdf_dma_addr_t paddr;
2323 
2324 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2325 	return QDF_STATUS_SUCCESS;
2326 }
2327 #else
2328 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2329 		qdf_device_t osdev, struct sk_buff *buf,
2330 		qdf_dma_dir_t dir, int nbytes)
2331 {
2332 	qdf_dma_addr_t paddr;
2333 	QDF_STATUS ret;
2334 
2335 	/* assume that the OS only provides a single fragment */
2336 	QDF_NBUF_CB_PADDR(buf) = paddr =
2337 		dma_map_single(osdev->dev, buf->data,
2338 			       nbytes, __qdf_dma_dir_to_os(dir));
2339 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2340 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2341 	if (QDF_IS_STATUS_SUCCESS(ret))
2342 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
2343 					 dir, true);
2344 	return ret;
2345 }
2346 #endif
2347 /**
2348  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2349  * @osdev: os device
2350  * @buf: buffer
2351  * @dir: direction
2352  * @nbytes: number of bytes
2353  *
2354  * Return: none
2355  */
2356 #if defined(A_SIMOS_DEVHOST)
2357 static inline void
2358 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2359 			       qdf_dma_dir_t dir, int nbytes)
2360 {
2361 }
2362 
2363 #else
2364 static inline void
2365 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2366 			       qdf_dma_dir_t dir, int nbytes)
2367 {
2368 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2369 
2370 	if (qdf_likely(paddr)) {
2371 		__qdf_record_nbuf_nbytes(
2372 			__qdf_nbuf_get_end_offset(buf), dir, false);
2373 		dma_unmap_single(osdev->dev, paddr, nbytes,
2374 				 __qdf_dma_dir_to_os(dir));
2375 		return;
2376 	}
2377 }
2378 #endif
2379 
2380 static inline struct sk_buff *
2381 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2382 {
2383 	return skb_dequeue(skb_queue_head);
2384 }
2385 
2386 static inline
2387 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2388 {
2389 	return skb_queue_head->qlen;
2390 }
2391 
2392 static inline
2393 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2394 					struct sk_buff *skb)
2395 {
2396 	return skb_queue_tail(skb_queue_head, skb);
2397 }
2398 
2399 static inline
2400 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2401 {
2402 	return skb_queue_head_init(skb_queue_head);
2403 }
2404 
2405 static inline
2406 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2407 {
2408 	return skb_queue_purge(skb_queue_head);
2409 }
2410 
2411 /**
2412  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2413  * @head: skb list for which lock is to be acquired
2414  *
2415  * Return: void
2416  */
2417 static inline
2418 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2419 {
2420 	spin_lock_bh(&skb_queue_head->lock);
2421 }
2422 
2423 /**
2424  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2425  * @head: skb list for which lock is to be release
2426  *
2427  * Return: void
2428  */
2429 static inline
2430 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2431 {
2432 	spin_unlock_bh(&skb_queue_head->lock);
2433 }
2434 
2435 /**
2436  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2437  * @nbuf: qdf_nbuf_t
2438  * @idx: Index for which frag size is requested
2439  *
2440  * Return: Frag size
2441  */
2442 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2443 							   uint8_t idx)
2444 {
2445 	unsigned int size = 0;
2446 
2447 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2448 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2449 	return size;
2450 }
2451 
2452 /**
2453  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2454  * @nbuf: qdf_nbuf_t
2455  * @idx: Index for which frag address is requested
2456  *
2457  * Return: Frag address in success, else NULL
2458  */
2459 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2460 						    uint8_t idx)
2461 {
2462 	__qdf_frag_t frag_addr = NULL;
2463 
2464 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2465 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2466 	return frag_addr;
2467 }
2468 
2469 /**
2470  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2471  * @nbuf: qdf_nbuf_t
2472  * @idx: Frag index
2473  * @size: Size by which frag_size needs to be increased/decreased
2474  *        +Ve means increase, -Ve means decrease
2475  * @truesize: truesize
2476  */
2477 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2478 						 int size,
2479 						 unsigned int truesize)
2480 {
2481 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2482 }
2483 
2484 /**
2485  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2486  *          and adjust length by size.
2487  * @nbuf: qdf_nbuf_t
2488  * @idx: Frag index
2489  * @offset: Frag page offset should be moved by offset.
2490  *      +Ve - Move offset forward.
2491  *      -Ve - Move offset backward.
2492  *
2493  * Return: QDF_STATUS
2494  */
2495 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2496 					    int offset);
2497 
2498 /**
2499  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2500  * @buf: Frag pointer needs to be added in nbuf frag
2501  * @nbuf: qdf_nbuf_t where frag will be added
2502  * @offset: Offset in frag to be added to nbuf_frags
2503  * @frag_len: Frag length
2504  * @truesize: truesize
2505  * @take_frag_ref: Whether to take ref for frag or not
2506  *      This bool must be set as per below comdition:
2507  *      1. False: If this frag is being added in any nbuf
2508  *              for the first time after allocation.
2509  *      2. True: If frag is already attached part of any
2510  *              nbuf.
2511  *
2512  * It takes ref_count based on boolean flag take_frag_ref
2513  */
2514 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2515 			    int offset, int frag_len,
2516 			    unsigned int truesize, bool take_frag_ref);
2517 
2518 /**
2519  * __qdf_nbuf_set_mark() - Set nbuf mark
2520  * @buf: Pointer to nbuf
2521  * @mark: Value to set mark
2522  *
2523  * Return: None
2524  */
2525 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2526 {
2527 	buf->mark = mark;
2528 }
2529 
2530 /**
2531  * __qdf_nbuf_get_mark() - Get nbuf mark
2532  * @buf: Pointer to nbuf
2533  *
2534  * Return: Value of mark
2535  */
2536 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2537 {
2538 	return buf->mark;
2539 }
2540 
2541 /**
2542  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2543  * the data pointer to the end pointer
2544  * @nbuf: qdf_nbuf_t
2545  *
2546  * Return: size of skb from data pointer to end pointer
2547  */
2548 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2549 {
2550 	return (skb_end_pointer(nbuf) - nbuf->data);
2551 }
2552 
2553 /**
2554  * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
2555  * @skb: Pointer to network buffer
2556  *
2557  * Return: Return the number of gso segments
2558  */
2559 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
2560 {
2561 	return skb_shinfo(skb)->gso_segs;
2562 }
2563 
2564 /*
2565  * __qdf_nbuf_net_timedelta() - get time delta
2566  * @t: time as __qdf_ktime_t object
2567  *
2568  * Return: time delta as ktime_t object
2569  */
2570 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
2571 {
2572 	return net_timedelta(t);
2573 }
2574 
2575 #ifdef CONFIG_NBUF_AP_PLATFORM
2576 #include <i_qdf_nbuf_w.h>
2577 #else
2578 #include <i_qdf_nbuf_m.h>
2579 #endif
2580 #endif /*_I_QDF_NET_BUF_H */
2581