xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 #include <qdf_nbuf_frag.h>
41 
42 /*
43  * Use socket buffer as the underlying implementation as skbuf .
44  * Linux use sk_buff to represent both packet and data,
45  * so we use sk_buffer to represent both skbuf .
46  */
47 typedef struct sk_buff *__qdf_nbuf_t;
48 
49 /**
50  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
51  *
52  * This is used for skb queue management via linux skb buff head APIs
53  */
54 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
55 
56 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
57 
58 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
59 
60 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
61  * max tx fragments added by the driver
62  * The driver will always add one tx fragment (the tx descriptor)
63  */
64 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
65 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
66 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
67 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
68 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
69 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
70 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
71 
72 
73 /* mark the first packet after wow wakeup */
74 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
75 
76 /*
77  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
78  */
79 typedef union {
80 	uint64_t       u64;
81 	qdf_dma_addr_t dma_addr;
82 } qdf_paddr_t;
83 
84 /**
85  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
86  *                    - data passed between layers of the driver.
87  *
88  * Notes:
89  *   1. Hard limited to 48 bytes. Please count your bytes
90  *   2. The size of this structure has to be easily calculatable and
91  *      consistently so: do not use any conditional compile flags
92  *   3. Split into a common part followed by a tx/rx overlay
93  *   4. There is only one extra frag, which represents the HTC/HTT header
94  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
95  *      for the priv_cb_w since it must be at same offset for both
96  *      TX and RX union
97  *   6. "ipa.owned" bit must be first member in both TX and RX unions
98  *      for the priv_cb_m since it must be at same offset for both
99  *      TX and RX union.
100  *
101  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
102  *
103  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
104  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
105  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
106  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
107  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
108  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
109  *
110  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
111  * @rx.dev.priv_cb_m.flush_ind: flush indication
112  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
113  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
114  * @rx.dev.priv_cb_m.exc_frm: exception frame
115  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
116  * @rx.dev.priv_cb_m.reo_dest_ind: reo destination indication
117  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
118  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
119  * @rx.dev.priv_cb_m.lro_ctx: LRO context
120  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
121  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
122  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
123  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
124  *
125  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
126  * @rx.tcp_proto: L4 protocol is TCP
127  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
128  * @rx.ipv6_proto: L3 protocol is IPV6
129  * @rx.ip_offset: offset to IP header
130  * @rx.tcp_offset: offset to TCP header
131  * @rx_ctx_id: Rx context id
132  * @num_elements_in_list: number of elements in the nbuf list
133  *
134  * @rx.tcp_udp_chksum: L4 payload checksum
135  * @rx.tcp_wim: TCP window size
136  *
137  * @rx.flow_id: 32bit flow id
138  *
139  * @rx.flag_chfrag_start: first MSDU in an AMSDU
140  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
141  * @rx.flag_chfrag_end: last MSDU in an AMSDU
142  * @rx.flag_retry: flag to indicate MSDU is retried
143  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
144  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
145  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
146  * @rx.flag_is_frag: flag to indicate skb has frag list
147  * @rx.rsrvd: reserved
148  *
149  * @rx.trace: combined structure for DP and protocol trace
150  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
151  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
152  * @rx.trace.dp_trace: flag (Datapath trace)
153  * @rx.trace.packet_track: RX_DATA packet
154  * @rx.trace.rsrvd: enable packet logging
155  *
156  * @rx.vdev_id: vdev_id for RX pkt
157  * @rx.is_raw_frame: RAW frame
158  * @rx.fcs_err: FCS error
159  * @rx.tid_val: tid value
160  * @rx.reserved: reserved
161  * @rx.ftype: mcast2ucast, TSO, SG, MESH
162  *
163  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
164  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
165  *
166  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
167  *                 + (1) CE classification enablement bit
168  *                 + (2) packet type (802.3 or Ethernet type II)
169  *                 + (3) packet offset (usually length of HTC/HTT descr)
170  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
171  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
172  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
173  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
174  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
175  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
176  * @tx.dev.priv_cb_m.reserved: reserved
177  *
178  * @tx.ftype: mcast2ucast, TSO, SG, MESH
179  * @tx.vdev_id: vdev (for protocol trace)
180  * @tx.len: length of efrag pointed by the above pointers
181  *
182  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
183  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
184  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
185  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
186  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
187  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
188  * @tx.flags.bits.flag_ext_header: extended flags
189  * @tx.flags.bits.reserved: reserved
190  * @tx.trace: combined structure for DP and protocol trace
191  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
192  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
193  * @tx.trace.is_packet_priv:
194  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
195  * @tx.trace.to_fw: Flag to indicate send this packet to FW
196  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
197  *                          + (MGMT_ACTION)] - 4 bits
198  * @tx.trace.dp_trace: flag (Datapath trace)
199  * @tx.trace.is_bcast: flag (Broadcast packet)
200  * @tx.trace.is_mcast: flag (Multicast packet)
201  * @tx.trace.packet_type: flag (Packet type)
202  * @tx.trace.htt2_frm: flag (high-latency path only)
203  * @tx.trace.print: enable packet logging
204  *
205  * @tx.vaddr: virtual address of ~
206  * @tx.paddr: physical/DMA address of ~
207  */
208 struct qdf_nbuf_cb {
209 	/* common */
210 	qdf_paddr_t paddr; /* of skb->data */
211 	/* valid only in one direction */
212 	union {
213 		/* Note: MAX: 40 bytes */
214 		struct {
215 			union {
216 				struct {
217 					void *ext_cb_ptr;
218 					void *fctx;
219 					uint16_t msdu_len;
220 					uint16_t peer_id;
221 					uint16_t protocol_tag;
222 					uint16_t flow_tag;
223 				} priv_cb_w;
224 				struct {
225 					/* ipa_owned bit is common between rx
226 					 * control block and tx control block.
227 					 * Do not change location of this bit.
228 					 */
229 					uint32_t ipa_owned:1,
230 						 peer_cached_buf_frm:1,
231 						 flush_ind:1,
232 						 packet_buf_pool:1,
233 						 l3_hdr_pad:3,
234 						 /* exception frame flag */
235 						 exc_frm:1,
236 						 ipa_smmu_map:1,
237 						 reo_dest_ind:5,
238 						 reserved:2,
239 						 reserved1:16;
240 					uint32_t tcp_seq_num;
241 					uint32_t tcp_ack_num;
242 					union {
243 						struct {
244 							uint16_t msdu_len;
245 							uint16_t peer_id;
246 						} wifi3;
247 						struct {
248 							uint32_t map_index;
249 						} wifi2;
250 					} dp;
251 					unsigned char *lro_ctx;
252 				} priv_cb_m;
253 			} dev;
254 			uint32_t lro_eligible:1,
255 				tcp_proto:1,
256 				tcp_pure_ack:1,
257 				ipv6_proto:1,
258 				ip_offset:7,
259 				tcp_offset:7,
260 				rx_ctx_id:4,
261 				fcs_err:1,
262 				is_raw_frame:1,
263 				num_elements_in_list:8;
264 			uint32_t tcp_udp_chksum:16,
265 				 tcp_win:16;
266 			uint32_t flow_id;
267 			uint8_t flag_chfrag_start:1,
268 				flag_chfrag_cont:1,
269 				flag_chfrag_end:1,
270 				flag_retry:1,
271 				flag_da_mcbc:1,
272 				flag_da_valid:1,
273 				flag_sa_valid:1,
274 				flag_is_frag:1;
275 			union {
276 				uint8_t packet_state;
277 				uint8_t dp_trace:1,
278 					packet_track:3,
279 					rsrvd:4;
280 			} trace;
281 			uint16_t vdev_id:8,
282 				 tid_val:4,
283 				 ftype:4;
284 		} rx;
285 
286 		/* Note: MAX: 40 bytes */
287 		struct {
288 			union {
289 				struct {
290 					void *ext_cb_ptr;
291 					void *fctx;
292 				} priv_cb_w;
293 				struct {
294 					/* ipa_owned bit is common between rx
295 					 * control block and tx control block.
296 					 * Do not change location of this bit.
297 					 */
298 					struct {
299 						uint32_t owned:1,
300 							priv:31;
301 					} ipa;
302 					uint32_t data_attr;
303 					uint16_t desc_id;
304 					uint16_t mgmt_desc_id;
305 					struct {
306 						uint8_t bi_map:1,
307 							reserved:7;
308 					} dma_option;
309 					uint8_t flag_notify_comp:1,
310 						rsvd:7;
311 					uint8_t reserved[2];
312 				} priv_cb_m;
313 			} dev;
314 			uint8_t ftype;
315 			uint8_t vdev_id;
316 			uint16_t len;
317 			union {
318 				struct {
319 					uint8_t flag_efrag:1,
320 						flag_nbuf:1,
321 						num:1,
322 						flag_chfrag_start:1,
323 						flag_chfrag_cont:1,
324 						flag_chfrag_end:1,
325 						flag_ext_header:1,
326 						reserved:1;
327 				} bits;
328 				uint8_t u8;
329 			} flags;
330 			struct {
331 				uint8_t packet_state:7,
332 					is_packet_priv:1;
333 				uint8_t packet_track:3,
334 					to_fw:1,
335 					proto_type:4;
336 				uint8_t dp_trace:1,
337 					is_bcast:1,
338 					is_mcast:1,
339 					packet_type:3,
340 					/* used only for hl*/
341 					htt2_frm:1,
342 					print:1;
343 			} trace;
344 			unsigned char *vaddr;
345 			qdf_paddr_t paddr;
346 		} tx;
347 	} u;
348 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
349 
350 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
351 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
352 			(sizeof(struct qdf_nbuf_cb)) <=
353 			sizeof_field(struct sk_buff, cb));
354 #else
355 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
356 			(sizeof(struct qdf_nbuf_cb)) <=
357 			FIELD_SIZEOF(struct sk_buff, cb));
358 #endif
359 
360 /**
361  *  access macros to qdf_nbuf_cb
362  *  Note: These macros can be used as L-values as well as R-values.
363  *        When used as R-values, they effectively function as "get" macros
364  *        When used as L_values, they effectively function as "set" macros
365  */
366 
367 #define QDF_NBUF_CB_PADDR(skb) \
368 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
369 
370 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
371 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
372 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
373 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
374 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
375 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
376 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
377 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
378 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
379 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
380 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
381 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
382 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
383 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
384 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
385 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
386 
387 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
388 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
389 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
390 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
391 
392 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
393 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
394 
395 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
396 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
397 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
398 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
399 
400 #define QDF_NBUF_CB_RX_FTYPE(skb) \
401 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
402 
403 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
404 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
405 
406 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
407 	(((struct qdf_nbuf_cb *) \
408 	((skb)->cb))->u.rx.flag_chfrag_start)
409 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
410 	(((struct qdf_nbuf_cb *) \
411 	((skb)->cb))->u.rx.flag_chfrag_cont)
412 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
413 		(((struct qdf_nbuf_cb *) \
414 		((skb)->cb))->u.rx.flag_chfrag_end)
415 
416 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
417 	(((struct qdf_nbuf_cb *) \
418 	((skb)->cb))->u.rx.flag_da_mcbc)
419 
420 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
421 	(((struct qdf_nbuf_cb *) \
422 	((skb)->cb))->u.rx.flag_da_valid)
423 
424 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
425 	(((struct qdf_nbuf_cb *) \
426 	((skb)->cb))->u.rx.flag_sa_valid)
427 
428 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
429 	(((struct qdf_nbuf_cb *) \
430 	((skb)->cb))->u.rx.flag_retry)
431 
432 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
433 	(((struct qdf_nbuf_cb *) \
434 	((skb)->cb))->u.rx.is_raw_frame)
435 
436 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
437 	(((struct qdf_nbuf_cb *) \
438 	((skb)->cb))->u.rx.tid_val)
439 
440 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
441 	(((struct qdf_nbuf_cb *) \
442 	((skb)->cb))->u.rx.flag_is_frag)
443 
444 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
445 	(((struct qdf_nbuf_cb *) \
446 	((skb)->cb))->u.rx.fcs_err)
447 
448 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
449 	qdf_nbuf_set_state(skb, PACKET_STATE)
450 
451 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
452 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
453 
454 #define QDF_NBUF_CB_TX_FTYPE(skb) \
455 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
456 
457 
458 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
459 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
460 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
461 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
462 
463 /* Tx Flags Accessor Macros*/
464 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
465 	(((struct qdf_nbuf_cb *) \
466 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
467 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
468 	(((struct qdf_nbuf_cb *) \
469 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
470 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
471 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
472 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
473 	(((struct qdf_nbuf_cb *) \
474 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
475 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
476 	(((struct qdf_nbuf_cb *) \
477 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
478 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
479 		(((struct qdf_nbuf_cb *) \
480 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
481 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
482 		(((struct qdf_nbuf_cb *) \
483 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
484 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
485 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
486 /* End of Tx Flags Accessor Macros */
487 
488 /* Tx trace accessor macros */
489 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
490 	(((struct qdf_nbuf_cb *) \
491 		((skb)->cb))->u.tx.trace.packet_state)
492 
493 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
494 	(((struct qdf_nbuf_cb *) \
495 		((skb)->cb))->u.tx.trace.is_packet_priv)
496 
497 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
498 	(((struct qdf_nbuf_cb *) \
499 		((skb)->cb))->u.tx.trace.packet_track)
500 
501 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
502 	(((struct qdf_nbuf_cb *) \
503 		((skb)->cb))->u.tx.trace.to_fw)
504 
505 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
506 		(((struct qdf_nbuf_cb *) \
507 			((skb)->cb))->u.rx.trace.packet_track)
508 
509 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
510 	(((struct qdf_nbuf_cb *) \
511 		((skb)->cb))->u.tx.trace.proto_type)
512 
513 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
514 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
515 
516 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
517 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
518 
519 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
520 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
521 
522 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
523 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
524 
525 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
526 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
527 
528 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
529 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
530 
531 #define QDF_NBUF_CB_SET_BCAST(skb) \
532 	(((struct qdf_nbuf_cb *) \
533 		((skb)->cb))->u.tx.trace.is_bcast = true)
534 
535 #define QDF_NBUF_CB_SET_MCAST(skb) \
536 	(((struct qdf_nbuf_cb *) \
537 		((skb)->cb))->u.tx.trace.is_mcast = true)
538 /* End of Tx trace accessor macros */
539 
540 
541 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
542 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
543 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
544 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
545 
546 /* assume the OS provides a single fragment */
547 #define __qdf_nbuf_get_num_frags(skb)		   \
548 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
549 
550 #define __qdf_nbuf_reset_num_frags(skb) \
551 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
552 
553 /**
554  *   end of nbuf->cb access macros
555  */
556 
557 typedef void (*qdf_nbuf_trace_update_t)(char *);
558 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
559 
560 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
561 
562 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
563 	(QDF_NBUF_CB_PADDR(skb) = paddr)
564 
565 #define __qdf_nbuf_frag_push_head(					\
566 	skb, frag_len, frag_vaddr, frag_paddr)				\
567 	do {					\
568 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
569 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
570 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
571 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
572 	} while (0)
573 
574 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
575 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
576 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
577 
578 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
579 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
580 
581 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
582 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
583 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
584 	 /* assume that the OS only provides a single fragment */	\
585 	 QDF_NBUF_CB_PADDR(skb))
586 
587 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
588 
589 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
590 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
591 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
592 
593 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
594 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
595 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
596 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
597 
598 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
599 	do {								\
600 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
601 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
602 		if (frag_num)						\
603 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
604 							      is_wstrm; \
605 		else					\
606 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
607 							      is_wstrm; \
608 	} while (0)
609 
610 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
611 	do { \
612 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
613 	} while (0)
614 
615 #define __qdf_nbuf_get_vdev_ctx(skb) \
616 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
617 
618 #define __qdf_nbuf_set_tx_ftype(skb, type) \
619 	do { \
620 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
621 	} while (0)
622 
623 #define __qdf_nbuf_get_tx_ftype(skb) \
624 		 QDF_NBUF_CB_TX_FTYPE((skb))
625 
626 
627 #define __qdf_nbuf_set_rx_ftype(skb, type) \
628 	do { \
629 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
630 	} while (0)
631 
632 #define __qdf_nbuf_get_rx_ftype(skb) \
633 		 QDF_NBUF_CB_RX_FTYPE((skb))
634 
635 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
636 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
637 
638 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
639 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
640 
641 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
642 	do { \
643 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
644 	} while (0)
645 
646 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
647 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
648 
649 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
650 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
651 
652 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
653 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
654 
655 #define __qdf_nbuf_set_da_mcbc(skb, val) \
656 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
657 
658 #define __qdf_nbuf_is_da_mcbc(skb) \
659 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
660 
661 #define __qdf_nbuf_set_da_valid(skb, val) \
662 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
663 
664 #define __qdf_nbuf_is_da_valid(skb) \
665 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
666 
667 #define __qdf_nbuf_set_sa_valid(skb, val) \
668 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
669 
670 #define __qdf_nbuf_is_sa_valid(skb) \
671 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
672 
673 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
674 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
675 
676 #define __qdf_nbuf_is_rx_retry_flag(skb) \
677 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
678 
679 #define __qdf_nbuf_set_raw_frame(skb, val) \
680 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
681 
682 #define __qdf_nbuf_is_raw_frame(skb) \
683 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
684 
685 #define __qdf_nbuf_get_tid_val(skb) \
686 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
687 
688 #define __qdf_nbuf_set_tid_val(skb, val) \
689 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
690 
691 #define __qdf_nbuf_set_is_frag(skb, val) \
692 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
693 
694 #define __qdf_nbuf_is_frag(skb) \
695 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
696 
697 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
698 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
699 
700 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
701 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
702 
703 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
704 	do { \
705 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
706 	} while (0)
707 
708 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
709 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
710 
711 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
712 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
713 
714 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
715 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
716 
717 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
718 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
719 
720 #define __qdf_nbuf_trace_get_proto_type(skb) \
721 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
722 
723 #define __qdf_nbuf_data_attr_get(skb)		\
724 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
725 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
726 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
727 
728 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
729 		skb_queue_walk_safe(queue, var, tvar)
730 
731 /**
732  * __qdf_nbuf_num_frags_init() - init extra frags
733  * @skb: sk buffer
734  *
735  * Return: none
736  */
737 static inline
738 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
739 {
740 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
741 }
742 
743 /*
744  * prototypes. Implemented in qdf_nbuf.c
745  */
746 
747 /**
748  * __qdf_nbuf_alloc() - Allocate nbuf
749  * @osdev: Device handle
750  * @size: Netbuf requested size
751  * @reserve: headroom to start with
752  * @align: Align
753  * @prio: Priority
754  * @func: Function name of the call site
755  * @line: line number of the call site
756  *
757  * This allocates an nbuf aligns if needed and reserves some space in the front,
758  * since the reserve is done after alignment the reserve value if being
759  * unaligned will result in an unaligned address.
760  *
761  * Return: nbuf or %NULL if no memory
762  */
763 __qdf_nbuf_t
764 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
765 		 int prio, const char *func, uint32_t line);
766 
767 /**
768  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
769  * @size: Size to be allocated for skb
770  * @reserve: Reserve headroom size
771  * @align: Align data
772  * @func: Function name of the call site
773  * @line: Line number of the callsite
774  *
775  * This API allocates a nbuf and aligns it if needed and reserves some headroom
776  * space after the alignment where nbuf is not allocated from skb recycler pool.
777  *
778  * Return: Allocated nbuf pointer
779  */
780 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
781 					  const char *func, uint32_t line);
782 
783 /**
784  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
785  * @skb: Pointer to network buffer
786  *
787  * if GFP_ATOMIC is overkill then we can check whether its
788  * called from interrupt context and then do it or else in
789  * normal case use GFP_KERNEL
790  *
791  * example     use "in_irq() || irqs_disabled()"
792  *
793  * Return: cloned skb
794  */
795 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
796 
797 void __qdf_nbuf_free(struct sk_buff *skb);
798 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
799 			struct sk_buff *skb, qdf_dma_dir_t dir);
800 void __qdf_nbuf_unmap(__qdf_device_t osdev,
801 			struct sk_buff *skb, qdf_dma_dir_t dir);
802 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
803 				 struct sk_buff *skb, qdf_dma_dir_t dir);
804 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
805 			struct sk_buff *skb, qdf_dma_dir_t dir);
806 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
807 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
808 
809 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
810 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
811 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
812 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
813 	qdf_dma_dir_t dir, int nbytes);
814 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
815 	qdf_dma_dir_t dir, int nbytes);
816 
817 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
818 	qdf_dma_dir_t dir);
819 
820 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
821 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
822 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
823 QDF_STATUS __qdf_nbuf_frag_map(
824 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
825 	int offset, qdf_dma_dir_t dir, int cur_frag);
826 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
827 
828 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
829 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
830 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
831 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
832 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
833 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
834 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
835 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
836 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
837 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
838 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
839 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
840 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
841 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
842 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
843 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
844 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
845 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
846 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
847 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
848 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
849 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
850 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
851 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
852 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
853 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
854 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
855 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
856 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
857 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
858 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
859 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
860 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
861 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
862 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
863 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
864 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
865 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
866 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
867 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
868 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
869 
870 #ifdef QDF_NBUF_GLOBAL_COUNT
871 int __qdf_nbuf_count_get(void);
872 void __qdf_nbuf_count_inc(struct sk_buff *skb);
873 void __qdf_nbuf_count_dec(struct sk_buff *skb);
874 void __qdf_nbuf_mod_init(void);
875 void __qdf_nbuf_mod_exit(void);
876 
877 #else
878 
879 static inline int __qdf_nbuf_count_get(void)
880 {
881 	return 0;
882 }
883 
884 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
885 {
886 	return;
887 }
888 
889 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
890 {
891 	return;
892 }
893 
894 static inline void __qdf_nbuf_mod_init(void)
895 {
896 	return;
897 }
898 
899 static inline void __qdf_nbuf_mod_exit(void)
900 {
901 	return;
902 }
903 #endif
904 
905 /**
906  * __qdf_to_status() - OS to QDF status conversion
907  * @error : OS error
908  *
909  * Return: QDF status
910  */
911 static inline QDF_STATUS __qdf_to_status(signed int error)
912 {
913 	switch (error) {
914 	case 0:
915 		return QDF_STATUS_SUCCESS;
916 	case ENOMEM:
917 	case -ENOMEM:
918 		return QDF_STATUS_E_NOMEM;
919 	default:
920 		return QDF_STATUS_E_NOSUPPORT;
921 	}
922 }
923 
924 /**
925  * __qdf_nbuf_len() - return the amount of valid data in the skb
926  * @skb: Pointer to network buffer
927  *
928  * This API returns the amount of valid data in the skb, If there are frags
929  * then it returns total length.
930  *
931  * Return: network buffer length
932  */
933 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
934 {
935 	int i, extra_frag_len = 0;
936 
937 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
938 	if (i > 0)
939 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
940 
941 	return extra_frag_len + skb->len;
942 }
943 
944 /**
945  * __qdf_nbuf_cat() - link two nbufs
946  * @dst: Buffer to piggyback into
947  * @src: Buffer to put
948  *
949  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
950  * It is callers responsibility to free the src skb.
951  *
952  * Return: QDF_STATUS (status of the call) if failed the src skb
953  *         is released
954  */
955 static inline QDF_STATUS
956 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
957 {
958 	QDF_STATUS error = 0;
959 
960 	qdf_assert(dst && src);
961 
962 	/*
963 	 * Since pskb_expand_head unconditionally reallocates the skb->head
964 	 * buffer, first check whether the current buffer is already large
965 	 * enough.
966 	 */
967 	if (skb_tailroom(dst) < src->len) {
968 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
969 		if (error)
970 			return __qdf_to_status(error);
971 	}
972 
973 	memcpy(skb_tail_pointer(dst), src->data, src->len);
974 	skb_put(dst, src->len);
975 	return __qdf_to_status(error);
976 }
977 
978 /*
979  * nbuf manipulation routines
980  */
981 /**
982  * __qdf_nbuf_headroom() - return the amount of tail space available
983  * @buf: Pointer to network buffer
984  *
985  * Return: amount of tail room
986  */
987 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
988 {
989 	return skb_headroom(skb);
990 }
991 
992 /**
993  * __qdf_nbuf_tailroom() - return the amount of tail space available
994  * @buf: Pointer to network buffer
995  *
996  * Return: amount of tail room
997  */
998 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
999 {
1000 	return skb_tailroom(skb);
1001 }
1002 
1003 /**
1004  * __qdf_nbuf_put_tail() - Puts data in the end
1005  * @skb: Pointer to network buffer
1006  * @size: size to be pushed
1007  *
1008  * Return: data pointer of this buf where new data has to be
1009  *         put, or NULL if there is not enough room in this buf.
1010  */
1011 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1012 {
1013 	if (skb_tailroom(skb) < size) {
1014 		if (unlikely(pskb_expand_head(skb, 0,
1015 			size - skb_tailroom(skb), GFP_ATOMIC))) {
1016 			dev_kfree_skb_any(skb);
1017 			return NULL;
1018 		}
1019 	}
1020 	return skb_put(skb, size);
1021 }
1022 
1023 /**
1024  * __qdf_nbuf_trim_tail() - trim data out from the end
1025  * @skb: Pointer to network buffer
1026  * @size: size to be popped
1027  *
1028  * Return: none
1029  */
1030 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1031 {
1032 	return skb_trim(skb, skb->len - size);
1033 }
1034 
1035 
1036 /*
1037  * prototypes. Implemented in qdf_nbuf.c
1038  */
1039 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1040 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1041 				qdf_nbuf_rx_cksum_t *cksum);
1042 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1043 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1044 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1045 void __qdf_nbuf_ref(struct sk_buff *skb);
1046 int __qdf_nbuf_shared(struct sk_buff *skb);
1047 
1048 /**
1049  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1050  * @skb: sk buff
1051  *
1052  * Return: number of fragments
1053  */
1054 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1055 {
1056 	return skb_shinfo(skb)->nr_frags;
1057 }
1058 
1059 /*
1060  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1061  */
1062 #define __qdf_nbuf_pool_delete(osdev)
1063 
1064 /**
1065  * __qdf_nbuf_copy() - returns a private copy of the skb
1066  * @skb: Pointer to network buffer
1067  *
1068  * This API returns a private copy of the skb, the skb returned is completely
1069  *  modifiable by callers
1070  *
1071  * Return: skb or NULL
1072  */
1073 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1074 {
1075 	struct sk_buff *skb_new = NULL;
1076 
1077 	skb_new = skb_copy(skb, GFP_ATOMIC);
1078 	if (skb_new) {
1079 		__qdf_nbuf_count_inc(skb_new);
1080 	}
1081 	return skb_new;
1082 }
1083 
1084 #define __qdf_nbuf_reserve      skb_reserve
1085 
1086 /**
1087  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1088  * @skb: Pointer to network buffer
1089  * @data: data pointer
1090  *
1091  * Return: none
1092  */
1093 static inline void
1094 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1095 {
1096 	skb->data = data;
1097 }
1098 
1099 /**
1100  * __qdf_nbuf_set_len() - set buffer data length
1101  * @skb: Pointer to network buffer
1102  * @len: data length
1103  *
1104  * Return: none
1105  */
1106 static inline void
1107 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1108 {
1109 	skb->len = len;
1110 }
1111 
1112 /**
1113  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1114  * @skb: Pointer to network buffer
1115  * @len: skb data length
1116  *
1117  * Return: none
1118  */
1119 static inline void
1120 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1121 {
1122 	skb_set_tail_pointer(skb, len);
1123 }
1124 
1125 /**
1126  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1127  * @skb: Pointer to network buffer
1128  * @list: list to use
1129  *
1130  * This is a lockless version, driver must acquire locks if it
1131  * needs to synchronize
1132  *
1133  * Return: none
1134  */
1135 static inline void
1136 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1137 {
1138 	__skb_unlink(skb, list);
1139 }
1140 
1141 /**
1142  * __qdf_nbuf_reset() - reset the buffer data and pointer
1143  * @buf: Network buf instance
1144  * @reserve: reserve
1145  * @align: align
1146  *
1147  * Return: none
1148  */
1149 static inline void
1150 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1151 {
1152 	int offset;
1153 
1154 	skb_push(skb, skb_headroom(skb));
1155 	skb_put(skb, skb_tailroom(skb));
1156 	memset(skb->data, 0x0, skb->len);
1157 	skb_trim(skb, 0);
1158 	skb_reserve(skb, NET_SKB_PAD);
1159 	memset(skb->cb, 0x0, sizeof(skb->cb));
1160 
1161 	/*
1162 	 * The default is for netbuf fragments to be interpreted
1163 	 * as wordstreams rather than bytestreams.
1164 	 */
1165 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1166 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1167 
1168 	/*
1169 	 * Align & make sure that the tail & data are adjusted properly
1170 	 */
1171 
1172 	if (align) {
1173 		offset = ((unsigned long)skb->data) % align;
1174 		if (offset)
1175 			skb_reserve(skb, align - offset);
1176 	}
1177 
1178 	skb_reserve(skb, reserve);
1179 }
1180 
1181 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1182 /**
1183  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1184  *                                       in kernel
1185  *
1186  * Return: true if dev_scratch is supported
1187  *         false if dev_scratch is not supported
1188  */
1189 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1190 {
1191 	return true;
1192 }
1193 
1194 /**
1195  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1196  * @skb: Pointer to network buffer
1197  *
1198  * Return: dev_scratch if dev_scratch supported
1199  *         0 if dev_scratch not supported
1200  */
1201 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1202 {
1203 	return skb->dev_scratch;
1204 }
1205 
1206 /**
1207  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1208  * @skb: Pointer to network buffer
1209  * @value: value to be set in dev_scratch of network buffer
1210  *
1211  * Return: void
1212  */
1213 static inline void
1214 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1215 {
1216 	skb->dev_scratch = value;
1217 }
1218 #else
1219 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1220 {
1221 	return false;
1222 }
1223 
1224 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1225 {
1226 	return 0;
1227 }
1228 
1229 static inline void
1230 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1231 {
1232 }
1233 #endif /* KERNEL_VERSION(4, 14, 0) */
1234 
1235 /**
1236  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1237  * @skb: Pointer to network buffer
1238  *
1239  * Return: Pointer to head buffer
1240  */
1241 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1242 {
1243 	return skb->head;
1244 }
1245 
1246 /**
1247  * __qdf_nbuf_data() - return the pointer to data header in the skb
1248  * @skb: Pointer to network buffer
1249  *
1250  * Return: Pointer to skb data
1251  */
1252 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1253 {
1254 	return skb->data;
1255 }
1256 
1257 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1258 {
1259 	return (uint8_t *)&skb->data;
1260 }
1261 
1262 /**
1263  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1264  * @skb: Pointer to network buffer
1265  *
1266  * Return: skb protocol
1267  */
1268 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1269 {
1270 	return skb->protocol;
1271 }
1272 
1273 /**
1274  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1275  * @skb: Pointer to network buffer
1276  *
1277  * Return: skb ip_summed
1278  */
1279 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1280 {
1281 	return skb->ip_summed;
1282 }
1283 
1284 /**
1285  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1286  * @skb: Pointer to network buffer
1287  * @ip_summed: ip checksum
1288  *
1289  * Return: none
1290  */
1291 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1292 		 uint8_t ip_summed)
1293 {
1294 	skb->ip_summed = ip_summed;
1295 }
1296 
1297 /**
1298  * __qdf_nbuf_get_priority() - return the priority value of the skb
1299  * @skb: Pointer to network buffer
1300  *
1301  * Return: skb priority
1302  */
1303 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1304 {
1305 	return skb->priority;
1306 }
1307 
1308 /**
1309  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1310  * @skb: Pointer to network buffer
1311  * @p: priority
1312  *
1313  * Return: none
1314  */
1315 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1316 {
1317 	skb->priority = p;
1318 }
1319 
1320 /**
1321  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1322  * @skb: Current skb
1323  * @next_skb: Next skb
1324  *
1325  * Return: void
1326  */
1327 static inline void
1328 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1329 {
1330 	skb->next = skb_next;
1331 }
1332 
1333 /**
1334  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1335  * @skb: Current skb
1336  *
1337  * Return: the next skb pointed to by the current skb
1338  */
1339 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1340 {
1341 	return skb->next;
1342 }
1343 
1344 /**
1345  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1346  * @skb: Current skb
1347  * @next_skb: Next skb
1348  *
1349  * This fn is used to link up extensions to the head skb. Does not handle
1350  * linking to the head
1351  *
1352  * Return: none
1353  */
1354 static inline void
1355 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1356 {
1357 	skb->next = skb_next;
1358 }
1359 
1360 /**
1361  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1362  * @skb: Current skb
1363  *
1364  * Return: the next skb pointed to by the current skb
1365  */
1366 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1367 {
1368 	return skb->next;
1369 }
1370 
1371 /**
1372  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1373  * @skb_head: head_buf nbuf holding head segment (single)
1374  * @ext_list: nbuf list holding linked extensions to the head
1375  * @ext_len: Total length of all buffers in the extension list
1376  *
1377  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1378  * to the nbuf holding the head segment (seg0)
1379  *
1380  * Return: none
1381  */
1382 static inline void
1383 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1384 			struct sk_buff *ext_list, size_t ext_len)
1385 {
1386 	skb_shinfo(skb_head)->frag_list = ext_list;
1387 	skb_head->data_len += ext_len;
1388 	skb_head->len += ext_len;
1389 }
1390 
1391 /**
1392  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1393  * @head_buf: Network buf holding head segment (single)
1394  *
1395  * This ext_list is populated when we have Jumbo packet, for example in case of
1396  * monitor mode amsdu packet reception, and are stiched using frags_list.
1397  *
1398  * Return: Network buf list holding linked extensions from head buf.
1399  */
1400 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1401 {
1402 	return (skb_shinfo(head_buf)->frag_list);
1403 }
1404 
1405 /**
1406  * __qdf_nbuf_get_age() - return the checksum value of the skb
1407  * @skb: Pointer to network buffer
1408  *
1409  * Return: checksum value
1410  */
1411 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1412 {
1413 	return skb->csum;
1414 }
1415 
1416 /**
1417  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1418  * @skb: Pointer to network buffer
1419  * @v: Value
1420  *
1421  * Return: none
1422  */
1423 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1424 {
1425 	skb->csum = v;
1426 }
1427 
1428 /**
1429  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1430  * @skb: Pointer to network buffer
1431  * @adj: Adjustment value
1432  *
1433  * Return: none
1434  */
1435 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1436 {
1437 	skb->csum -= adj;
1438 }
1439 
1440 /**
1441  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1442  * @skb: Pointer to network buffer
1443  * @offset: Offset value
1444  * @len: Length
1445  * @to: Destination pointer
1446  *
1447  * Return: length of the copy bits for skb
1448  */
1449 static inline int32_t
1450 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1451 {
1452 	return skb_copy_bits(skb, offset, to, len);
1453 }
1454 
1455 /**
1456  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1457  * @skb: Pointer to network buffer
1458  * @len:  Packet length
1459  *
1460  * Return: none
1461  */
1462 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1463 {
1464 	if (skb->len > len) {
1465 		skb_trim(skb, len);
1466 	} else {
1467 		if (skb_tailroom(skb) < len - skb->len) {
1468 			if (unlikely(pskb_expand_head(skb, 0,
1469 				len - skb->len - skb_tailroom(skb),
1470 				GFP_ATOMIC))) {
1471 				dev_kfree_skb_any(skb);
1472 				qdf_assert(0);
1473 			}
1474 		}
1475 		skb_put(skb, (len - skb->len));
1476 	}
1477 }
1478 
1479 /**
1480  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1481  * @skb: Pointer to network buffer
1482  * @protocol: Protocol type
1483  *
1484  * Return: none
1485  */
1486 static inline void
1487 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1488 {
1489 	skb->protocol = protocol;
1490 }
1491 
1492 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1493 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1494 
1495 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1496 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1497 
1498 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1499 				      uint32_t *lo, uint32_t *hi);
1500 
1501 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1502 	struct qdf_tso_info_t *tso_info);
1503 
1504 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1505 			  struct qdf_tso_seg_elem_t *tso_seg,
1506 			  bool is_last_seg);
1507 
1508 #ifdef FEATURE_TSO
1509 /**
1510  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1511  *                                    payload len
1512  * @skb: buffer
1513  *
1514  * Return: size
1515  */
1516 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1517 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1518 
1519 #else
1520 static inline
1521 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1522 {
1523 	return 0;
1524 }
1525 
1526 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1527 {
1528 	return 0;
1529 }
1530 
1531 #endif /* FEATURE_TSO */
1532 
1533 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1534 {
1535 	if (skb_is_gso(skb) &&
1536 		(skb_is_gso_v6(skb) ||
1537 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1538 		return true;
1539 	else
1540 		return false;
1541 }
1542 
1543 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1544 
1545 int __qdf_nbuf_get_users(struct sk_buff *skb);
1546 
1547 /**
1548  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1549  *			      and get hw_classify by peeking
1550  *			      into packet
1551  * @nbuf:		Network buffer (skb on Linux)
1552  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1553  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1554  *			needs to be set in case of CE classification support
1555  *			Is set by this macro.
1556  * @hw_classify:	This is a flag which is set to indicate
1557  *			CE classification is enabled.
1558  *			Do not set this bit for VLAN packets
1559  *			OR for mcast / bcast frames.
1560  *
1561  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1562  * whether to enable tx_classify bit in CE.
1563  *
1564  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1565  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1566  * it is the length and a 802.3 frame else it is Ethernet Type II
1567  * (RFC 894).
1568  * Bit 4 in pkt_subtype is the tx_classify bit
1569  *
1570  * Return:	void
1571  */
1572 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1573 				pkt_subtype, hw_classify)	\
1574 do {								\
1575 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1576 	uint16_t ether_type = ntohs(eh->h_proto);		\
1577 	bool is_mc_bc;						\
1578 								\
1579 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1580 		   is_multicast_ether_addr((uint8_t *)eh);	\
1581 								\
1582 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1583 		hw_classify = 1;				\
1584 		pkt_subtype = 0x01 <<				\
1585 			HTT_TX_CLASSIFY_BIT_S;			\
1586 	}							\
1587 								\
1588 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1589 		pkt_type = htt_pkt_type_ethernet;		\
1590 								\
1591 } while (0)
1592 
1593 /**
1594  * nbuf private buffer routines
1595  */
1596 
1597 /**
1598  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1599  * @skb: Pointer to network buffer
1600  * @addr: Pointer to store header's addr
1601  * @m_len: network buffer length
1602  *
1603  * Return: none
1604  */
1605 static inline void
1606 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1607 {
1608 	*addr = skb->data;
1609 	*len = skb->len;
1610 }
1611 
1612 /**
1613  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1614  * @head: Head pointer
1615  * @tail: Tail pointer
1616  * @qlen: Queue length
1617  */
1618 typedef struct __qdf_nbuf_qhead {
1619 	struct sk_buff *head;
1620 	struct sk_buff *tail;
1621 	unsigned int qlen;
1622 } __qdf_nbuf_queue_t;
1623 
1624 /******************Functions *************/
1625 
1626 /**
1627  * __qdf_nbuf_queue_init() - initiallize the queue head
1628  * @qhead: Queue head
1629  *
1630  * Return: QDF status
1631  */
1632 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1633 {
1634 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1635 	return QDF_STATUS_SUCCESS;
1636 }
1637 
1638 /**
1639  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1640  * @qhead: Queue head
1641  * @skb: Pointer to network buffer
1642  *
1643  * This is a lockless version, driver must acquire locks if it
1644  * needs to synchronize
1645  *
1646  * Return: none
1647  */
1648 static inline void
1649 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1650 {
1651 	skb->next = NULL;       /*Nullify the next ptr */
1652 
1653 	if (!qhead->head)
1654 		qhead->head = skb;
1655 	else
1656 		qhead->tail->next = skb;
1657 
1658 	qhead->tail = skb;
1659 	qhead->qlen++;
1660 }
1661 
1662 /**
1663  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1664  * @dest: target netbuf queue
1665  * @src:  source netbuf queue
1666  *
1667  * Return: target netbuf queue
1668  */
1669 static inline __qdf_nbuf_queue_t *
1670 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1671 {
1672 	if (!dest)
1673 		return NULL;
1674 	else if (!src || !(src->head))
1675 		return dest;
1676 
1677 	if (!(dest->head))
1678 		dest->head = src->head;
1679 	else
1680 		dest->tail->next = src->head;
1681 
1682 	dest->tail = src->tail;
1683 	dest->qlen += src->qlen;
1684 	return dest;
1685 }
1686 
1687 /**
1688  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1689  * @qhead: Queue head
1690  * @skb: Pointer to network buffer
1691  *
1692  * This is a lockless version, driver must acquire locks if it needs to
1693  * synchronize
1694  *
1695  * Return: none
1696  */
1697 static inline void
1698 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1699 {
1700 	if (!qhead->head) {
1701 		/*Empty queue Tail pointer Must be updated */
1702 		qhead->tail = skb;
1703 	}
1704 	skb->next = qhead->head;
1705 	qhead->head = skb;
1706 	qhead->qlen++;
1707 }
1708 
1709 /**
1710  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1711  * @qhead: Queue head
1712  *
1713  * This is a lockless version. Driver should take care of the locks
1714  *
1715  * Return: skb or NULL
1716  */
1717 static inline
1718 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1719 {
1720 	__qdf_nbuf_t tmp = NULL;
1721 
1722 	if (qhead->head) {
1723 		qhead->qlen--;
1724 		tmp = qhead->head;
1725 		if (qhead->head == qhead->tail) {
1726 			qhead->head = NULL;
1727 			qhead->tail = NULL;
1728 		} else {
1729 			qhead->head = tmp->next;
1730 		}
1731 		tmp->next = NULL;
1732 	}
1733 	return tmp;
1734 }
1735 
1736 /**
1737  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1738  * @qhead: head of queue
1739  *
1740  * Return: NULL if the queue is empty
1741  */
1742 static inline struct sk_buff *
1743 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1744 {
1745 	return qhead->head;
1746 }
1747 
1748 /**
1749  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1750  * @qhead: head of queue
1751  *
1752  * Return: NULL if the queue is empty
1753  */
1754 static inline struct sk_buff *
1755 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1756 {
1757 	return qhead->tail;
1758 }
1759 
1760 /**
1761  * __qdf_nbuf_queue_len() - return the queue length
1762  * @qhead: Queue head
1763  *
1764  * Return: Queue length
1765  */
1766 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1767 {
1768 	return qhead->qlen;
1769 }
1770 
1771 /**
1772  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1773  * @skb: Pointer to network buffer
1774  *
1775  * This API returns the next skb from packet chain, remember the skb is
1776  * still in the queue
1777  *
1778  * Return: NULL if no packets are there
1779  */
1780 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1781 {
1782 	return skb->next;
1783 }
1784 
1785 /**
1786  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1787  * @qhead: Queue head
1788  *
1789  * Return: true if length is 0 else false
1790  */
1791 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1792 {
1793 	return qhead->qlen == 0;
1794 }
1795 
1796 /*
1797  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1798  * Because the queue head will most likely put in some structure,
1799  * we don't use pointer type as the definition.
1800  */
1801 
1802 /*
1803  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1804  * Because the queue head will most likely put in some structure,
1805  * we don't use pointer type as the definition.
1806  */
1807 
1808 static inline void
1809 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1810 {
1811 }
1812 
1813 /**
1814  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1815  *        expands the headroom
1816  *        in the data region. In case of failure the skb is released.
1817  * @skb: sk buff
1818  * @headroom: size of headroom
1819  *
1820  * Return: skb or NULL
1821  */
1822 static inline struct sk_buff *
1823 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1824 {
1825 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1826 		dev_kfree_skb_any(skb);
1827 		skb = NULL;
1828 	}
1829 	return skb;
1830 }
1831 
1832 /**
1833  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1834  *        exapnds the tailroom
1835  *        in data region. In case of failure it releases the skb.
1836  * @skb: sk buff
1837  * @tailroom: size of tailroom
1838  *
1839  * Return: skb or NULL
1840  */
1841 static inline struct sk_buff *
1842 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1843 {
1844 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1845 		return skb;
1846 	/**
1847 	 * unlikely path
1848 	 */
1849 	dev_kfree_skb_any(skb);
1850 	return NULL;
1851 }
1852 
1853 /**
1854  * __qdf_nbuf_linearize() - skb linearize
1855  * @skb: sk buff
1856  *
1857  * create a version of the specified nbuf whose contents
1858  * can be safely modified without affecting other
1859  * users.If the nbuf is non-linear then this function
1860  * linearize. if unable to linearize returns -ENOMEM on
1861  * success 0 is returned
1862  *
1863  * Return: 0 on Success, -ENOMEM on failure is returned.
1864  */
1865 static inline int
1866 __qdf_nbuf_linearize(struct sk_buff *skb)
1867 {
1868 	return skb_linearize(skb);
1869 }
1870 
1871 /**
1872  * __qdf_nbuf_unshare() - skb unshare
1873  * @skb: sk buff
1874  *
1875  * create a version of the specified nbuf whose contents
1876  * can be safely modified without affecting other
1877  * users.If the nbuf is a clone then this function
1878  * creates a new copy of the data. If the buffer is not
1879  * a clone the original buffer is returned.
1880  *
1881  * Return: skb or NULL
1882  */
1883 static inline struct sk_buff *
1884 __qdf_nbuf_unshare(struct sk_buff *skb)
1885 {
1886 	struct sk_buff *skb_new;
1887 
1888 	__qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
1889 
1890 	skb_new = skb_unshare(skb, GFP_ATOMIC);
1891 	if (skb_new)
1892 		__qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
1893 
1894 	return skb_new;
1895 }
1896 
1897 /**
1898  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1899  *@buf: sk buff
1900  *
1901  * Return: true/false
1902  */
1903 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1904 {
1905 	return skb_cloned(skb);
1906 }
1907 
1908 /**
1909  * __qdf_nbuf_pool_init() - init pool
1910  * @net: net handle
1911  *
1912  * Return: QDF status
1913  */
1914 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1915 {
1916 	return QDF_STATUS_SUCCESS;
1917 }
1918 
1919 /*
1920  * adf_nbuf_pool_delete() implementation - do nothing in linux
1921  */
1922 #define __qdf_nbuf_pool_delete(osdev)
1923 
1924 /**
1925  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1926  *        release the skb.
1927  * @skb: sk buff
1928  * @headroom: size of headroom
1929  * @tailroom: size of tailroom
1930  *
1931  * Return: skb or NULL
1932  */
1933 static inline struct sk_buff *
1934 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1935 {
1936 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1937 		return skb;
1938 
1939 	dev_kfree_skb_any(skb);
1940 	return NULL;
1941 }
1942 
1943 /**
1944  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1945  * @buf: Network buf instance
1946  * @headroom: Additional headroom to be added
1947  * @tailroom: Additional tailroom to be added
1948  *
1949  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1950  *	or NULL if there is no memory
1951  */
1952 static inline struct sk_buff *
1953 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1954 {
1955 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1956 }
1957 
1958 /**
1959  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
1960  * @buf: Network buf instance
1961  *
1962  * Return: void
1963  */
1964 static inline void
1965 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
1966 {
1967 	struct sk_buff *list;
1968 
1969 	skb_walk_frags(buf, list)
1970 		skb_get(list);
1971 }
1972 
1973 /**
1974  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1975  *
1976  * Return: true/false
1977  */
1978 static inline bool
1979 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1980 			 uint8_t **where)
1981 {
1982 	qdf_assert(0);
1983 	return false;
1984 }
1985 
1986 /**
1987  * __qdf_nbuf_reset_ctxt() - mem zero control block
1988  * @nbuf: buffer
1989  *
1990  * Return: none
1991  */
1992 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1993 {
1994 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1995 }
1996 
1997 /**
1998  * __qdf_nbuf_network_header() - get network header
1999  * @buf: buffer
2000  *
2001  * Return: network header pointer
2002  */
2003 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2004 {
2005 	return skb_network_header(buf);
2006 }
2007 
2008 /**
2009  * __qdf_nbuf_transport_header() - get transport header
2010  * @buf: buffer
2011  *
2012  * Return: transport header pointer
2013  */
2014 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2015 {
2016 	return skb_transport_header(buf);
2017 }
2018 
2019 /**
2020  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2021  *  passed as part of network buffer by network stack
2022  * @skb: sk buff
2023  *
2024  * Return: TCP MSS size
2025  *
2026  */
2027 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2028 {
2029 	return skb_shinfo(skb)->gso_size;
2030 }
2031 
2032 /**
2033  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2034  * @nbuf: sk buff
2035  *
2036  * Return: none
2037  */
2038 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2039 
2040 /*
2041  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2042  * @nbuf: sk buff
2043  *
2044  * Return: void ptr
2045  */
2046 static inline void *
2047 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2048 {
2049 	return (void *)nbuf->cb;
2050 }
2051 
2052 /**
2053  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2054  * @skb: sk buff
2055  *
2056  * Return: head size
2057  */
2058 static inline size_t
2059 __qdf_nbuf_headlen(struct sk_buff *skb)
2060 {
2061 	return skb_headlen(skb);
2062 }
2063 
2064 /**
2065  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2066  * @buf: sk buff
2067  *
2068  * Return: true/false
2069  */
2070 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2071 {
2072 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2073 }
2074 
2075 /**
2076  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2077  * @buf: sk buff
2078  *
2079  * Return: true/false
2080  */
2081 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2082 {
2083 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2084 }
2085 
2086 /**
2087  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2088  * @skb: sk buff
2089  *
2090  * Return: size of l2+l3+l4 header length
2091  */
2092 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2093 {
2094 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2095 }
2096 
2097 /**
2098  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2099  * @buf: sk buff
2100  *
2101  * Return:  true/false
2102  */
2103 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2104 {
2105 	if (skb_is_nonlinear(skb))
2106 		return true;
2107 	else
2108 		return false;
2109 }
2110 
2111 /**
2112  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2113  * @buf: sk buff
2114  *
2115  * Return: TCP sequence number
2116  */
2117 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2118 {
2119 	return ntohl(tcp_hdr(skb)->seq);
2120 }
2121 
2122 /**
2123  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2124  *@buf: sk buff
2125  *
2126  * Return: data pointer to typecast into your priv structure
2127  */
2128 static inline uint8_t *
2129 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2130 {
2131 	return &skb->cb[8];
2132 }
2133 
2134 /**
2135  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2136  * @buf: Pointer to nbuf
2137  *
2138  * Return: None
2139  */
2140 static inline void
2141 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2142 {
2143 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2144 }
2145 
2146 /**
2147  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2148  *
2149  * @buf: sk buff
2150  * @queue_id: Queue id
2151  *
2152  * Return: void
2153  */
2154 static inline void
2155 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2156 {
2157 	skb_record_rx_queue(skb, queue_id);
2158 }
2159 
2160 /**
2161  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2162  *
2163  * @buf: sk buff
2164  *
2165  * Return: Queue mapping
2166  */
2167 static inline uint16_t
2168 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2169 {
2170 	return skb->queue_mapping;
2171 }
2172 
2173 /**
2174  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2175  *
2176  * @buf: sk buff
2177  *
2178  * Return: void
2179  */
2180 static inline void
2181 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2182 {
2183 	__net_timestamp(skb);
2184 }
2185 
2186 /**
2187  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2188  *
2189  * @buf: sk buff
2190  *
2191  * Return: timestamp stored in skb in ms
2192  */
2193 static inline uint64_t
2194 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2195 {
2196 	return ktime_to_ms(skb_get_ktime(skb));
2197 }
2198 
2199 /**
2200  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2201  *
2202  * @buf: sk buff
2203  *
2204  * Return: time difference in ms
2205  */
2206 static inline uint64_t
2207 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2208 {
2209 	return ktime_to_ms(net_timedelta(skb->tstamp));
2210 }
2211 
2212 /**
2213  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2214  *
2215  * @buf: sk buff
2216  *
2217  * Return: time difference in micro seconds
2218  */
2219 static inline uint64_t
2220 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2221 {
2222 	return ktime_to_us(net_timedelta(skb->tstamp));
2223 }
2224 
2225 /**
2226  * __qdf_nbuf_orphan() - orphan a nbuf
2227  * @skb: sk buff
2228  *
2229  * If a buffer currently has an owner then we call the
2230  * owner's destructor function
2231  *
2232  * Return: void
2233  */
2234 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2235 {
2236 	return skb_orphan(skb);
2237 }
2238 
2239 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2240 /**
2241  * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2242  * from the total skb mem and DP tx/rx skb mem
2243  * @nbytes: number of bytes
2244  * @dir: direction
2245  * @is_mapped: is mapped or unmapped memory
2246  *
2247  * Return: none
2248  */
2249 static inline void __qdf_record_nbuf_nbytes(
2250 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2251 {
2252 	if (is_mapped) {
2253 		if (dir == QDF_DMA_TO_DEVICE) {
2254 			qdf_mem_dp_tx_skb_cnt_inc();
2255 			qdf_mem_dp_tx_skb_inc(nbytes);
2256 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2257 			qdf_mem_dp_rx_skb_cnt_inc();
2258 			qdf_mem_dp_rx_skb_inc(nbytes);
2259 		}
2260 		qdf_mem_skb_total_inc(nbytes);
2261 	} else {
2262 		if (dir == QDF_DMA_TO_DEVICE) {
2263 			qdf_mem_dp_tx_skb_cnt_dec();
2264 			qdf_mem_dp_tx_skb_dec(nbytes);
2265 		} else if (dir == QDF_DMA_FROM_DEVICE) {
2266 			qdf_mem_dp_rx_skb_cnt_dec();
2267 			qdf_mem_dp_rx_skb_dec(nbytes);
2268 		}
2269 		qdf_mem_skb_total_dec(nbytes);
2270 	}
2271 }
2272 
2273 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
2274 static inline void __qdf_record_nbuf_nbytes(
2275 	int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2276 {
2277 }
2278 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2279 
2280 /**
2281  * __qdf_nbuf_map_nbytes_single() - map nbytes
2282  * @osdev: os device
2283  * @buf: buffer
2284  * @dir: direction
2285  * @nbytes: number of bytes
2286  *
2287  * Return: QDF_STATUS
2288  */
2289 #ifdef A_SIMOS_DEVHOST
2290 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2291 		qdf_device_t osdev, struct sk_buff *buf,
2292 		qdf_dma_dir_t dir, int nbytes)
2293 {
2294 	qdf_dma_addr_t paddr;
2295 
2296 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2297 	return QDF_STATUS_SUCCESS;
2298 }
2299 #else
2300 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2301 		qdf_device_t osdev, struct sk_buff *buf,
2302 		qdf_dma_dir_t dir, int nbytes)
2303 {
2304 	qdf_dma_addr_t paddr;
2305 	QDF_STATUS ret;
2306 
2307 	/* assume that the OS only provides a single fragment */
2308 	QDF_NBUF_CB_PADDR(buf) = paddr =
2309 		dma_map_single(osdev->dev, buf->data,
2310 			       nbytes, __qdf_dma_dir_to_os(dir));
2311 	ret =  dma_mapping_error(osdev->dev, paddr) ?
2312 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2313 	if (QDF_IS_STATUS_SUCCESS(ret))
2314 		__qdf_record_nbuf_nbytes(nbytes, dir, true);
2315 	return ret;
2316 }
2317 #endif
2318 /**
2319  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2320  * @osdev: os device
2321  * @buf: buffer
2322  * @dir: direction
2323  * @nbytes: number of bytes
2324  *
2325  * Return: none
2326  */
2327 #if defined(A_SIMOS_DEVHOST)
2328 static inline void
2329 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2330 			       qdf_dma_dir_t dir, int nbytes)
2331 {
2332 }
2333 
2334 #else
2335 static inline void
2336 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2337 			       qdf_dma_dir_t dir, int nbytes)
2338 {
2339 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2340 
2341 	if (qdf_likely(paddr)) {
2342 		__qdf_record_nbuf_nbytes(nbytes, dir, false);
2343 		dma_unmap_single(osdev->dev, paddr, nbytes,
2344 				 __qdf_dma_dir_to_os(dir));
2345 		return;
2346 	}
2347 }
2348 #endif
2349 
2350 static inline struct sk_buff *
2351 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2352 {
2353 	return skb_dequeue(skb_queue_head);
2354 }
2355 
2356 static inline
2357 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2358 {
2359 	return skb_queue_head->qlen;
2360 }
2361 
2362 static inline
2363 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2364 					struct sk_buff *skb)
2365 {
2366 	return skb_queue_tail(skb_queue_head, skb);
2367 }
2368 
2369 static inline
2370 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2371 {
2372 	return skb_queue_head_init(skb_queue_head);
2373 }
2374 
2375 static inline
2376 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2377 {
2378 	return skb_queue_purge(skb_queue_head);
2379 }
2380 
2381 /**
2382  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2383  * @head: skb list for which lock is to be acquired
2384  *
2385  * Return: void
2386  */
2387 static inline
2388 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2389 {
2390 	spin_lock_bh(&skb_queue_head->lock);
2391 }
2392 
2393 /**
2394  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2395  * @head: skb list for which lock is to be release
2396  *
2397  * Return: void
2398  */
2399 static inline
2400 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2401 {
2402 	spin_unlock_bh(&skb_queue_head->lock);
2403 }
2404 
2405 /**
2406  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2407  * @nbuf: qdf_nbuf_t
2408  * @idx: Index for which frag size is requested
2409  *
2410  * Return: Frag size
2411  */
2412 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2413 							   uint8_t idx)
2414 {
2415 	unsigned int size = 0;
2416 
2417 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2418 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2419 	return size;
2420 }
2421 
2422 /**
2423  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2424  * @nbuf: qdf_nbuf_t
2425  * @idx: Index for which frag address is requested
2426  *
2427  * Return: Frag address in success, else NULL
2428  */
2429 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2430 						    uint8_t idx)
2431 {
2432 	__qdf_frag_t frag_addr = NULL;
2433 
2434 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2435 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2436 	return frag_addr;
2437 }
2438 
2439 /**
2440  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2441  * @nbuf: qdf_nbuf_t
2442  * @idx: Frag index
2443  * @size: Size by which frag_size needs to be increased/decreased
2444  *        +Ve means increase, -Ve means decrease
2445  * @truesize: truesize
2446  */
2447 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2448 						 int size,
2449 						 unsigned int truesize)
2450 {
2451 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2452 }
2453 
2454 /**
2455  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2456  *          and adjust length by size.
2457  * @nbuf: qdf_nbuf_t
2458  * @idx: Frag index
2459  * @offset: Frag page offset should be moved by offset.
2460  *      +Ve - Move offset forward.
2461  *      -Ve - Move offset backward.
2462  *
2463  * Return: QDF_STATUS
2464  */
2465 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2466 					    int offset);
2467 
2468 /**
2469  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2470  * @buf: Frag pointer needs to be added in nbuf frag
2471  * @nbuf: qdf_nbuf_t where frag will be added
2472  * @offset: Offset in frag to be added to nbuf_frags
2473  * @frag_len: Frag length
2474  * @truesize: truesize
2475  * @take_frag_ref: Whether to take ref for frag or not
2476  *      This bool must be set as per below comdition:
2477  *      1. False: If this frag is being added in any nbuf
2478  *              for the first time after allocation.
2479  *      2. True: If frag is already attached part of any
2480  *              nbuf.
2481  *
2482  * It takes ref_count based on boolean flag take_frag_ref
2483  */
2484 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2485 			    int offset, int frag_len,
2486 			    unsigned int truesize, bool take_frag_ref);
2487 
2488 /**
2489  * __qdf_nbuf_set_mark() - Set nbuf mark
2490  * @buf: Pointer to nbuf
2491  * @mark: Value to set mark
2492  *
2493  * Return: None
2494  */
2495 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
2496 {
2497 	buf->mark = mark;
2498 }
2499 
2500 /**
2501  * __qdf_nbuf_get_mark() - Get nbuf mark
2502  * @buf: Pointer to nbuf
2503  *
2504  * Return: Value of mark
2505  */
2506 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
2507 {
2508 	return buf->mark;
2509 }
2510 
2511 /**
2512  * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
2513  * the data pointer to the end pointer
2514  * @nbuf: qdf_nbuf_t
2515  *
2516  * Return: size of skb from data pointer to end pointer
2517  */
2518 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
2519 {
2520 	return (skb_end_pointer(nbuf) - nbuf->data);
2521 }
2522 
2523 #ifdef CONFIG_NBUF_AP_PLATFORM
2524 #include <i_qdf_nbuf_w.h>
2525 #else
2526 #include <i_qdf_nbuf_m.h>
2527 #endif
2528 #endif /*_I_QDF_NET_BUF_H */
2529