xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 #include <qdf_nbuf_frag.h>
41 
42 /*
43  * Use socket buffer as the underlying implementation as skbuf .
44  * Linux use sk_buff to represent both packet and data,
45  * so we use sk_buffer to represent both skbuf .
46  */
47 typedef struct sk_buff *__qdf_nbuf_t;
48 
49 /**
50  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
51  *
52  * This is used for skb queue management via linux skb buff head APIs
53  */
54 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
55 
56 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
57 
58 #define QDF_SHINFO_SIZE    SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
59 
60 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
61  * max tx fragments added by the driver
62  * The driver will always add one tx fragment (the tx descriptor)
63  */
64 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
65 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
66 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
67 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
68 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
69 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
70 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
71 
72 
73 /* mark the first packet after wow wakeup */
74 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
75 
76 /*
77  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
78  */
79 typedef union {
80 	uint64_t       u64;
81 	qdf_dma_addr_t dma_addr;
82 } qdf_paddr_t;
83 
84 /**
85  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
86  *                    - data passed between layers of the driver.
87  *
88  * Notes:
89  *   1. Hard limited to 48 bytes. Please count your bytes
90  *   2. The size of this structure has to be easily calculatable and
91  *      consistently so: do not use any conditional compile flags
92  *   3. Split into a common part followed by a tx/rx overlay
93  *   4. There is only one extra frag, which represents the HTC/HTT header
94  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
95  *      for the priv_cb_w since it must be at same offset for both
96  *      TX and RX union
97  *   6. "ipa.owned" bit must be first member in both TX and RX unions
98  *      for the priv_cb_m since it must be at same offset for both
99  *      TX and RX union.
100  *
101  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
102  *
103  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
104  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
105  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
106  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
107  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
108  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
109  *
110  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
111  * @rx.dev.priv_cb_m.flush_ind: flush indication
112  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
113  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
114  * @rx.dev.priv_cb_m.exc_frm: exception frame
115  * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
116  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
117  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
118  * @rx.dev.priv_cb_m.lro_ctx: LRO context
119  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
120  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
121  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
122  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
123  *
124  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
125  * @rx.tcp_proto: L4 protocol is TCP
126  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
127  * @rx.ipv6_proto: L3 protocol is IPV6
128  * @rx.ip_offset: offset to IP header
129  * @rx.tcp_offset: offset to TCP header
130  * @rx_ctx_id: Rx context id
131  * @num_elements_in_list: number of elements in the nbuf list
132  *
133  * @rx.tcp_udp_chksum: L4 payload checksum
134  * @rx.tcp_wim: TCP window size
135  *
136  * @rx.flow_id: 32bit flow id
137  *
138  * @rx.flag_chfrag_start: first MSDU in an AMSDU
139  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
140  * @rx.flag_chfrag_end: last MSDU in an AMSDU
141  * @rx.flag_retry: flag to indicate MSDU is retried
142  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
143  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
144  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
145  * @rx.flag_is_frag: flag to indicate skb has frag list
146  * @rx.rsrvd: reserved
147  *
148  * @rx.trace: combined structure for DP and protocol trace
149  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
150  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
151  * @rx.trace.dp_trace: flag (Datapath trace)
152  * @rx.trace.packet_track: RX_DATA packet
153  * @rx.trace.rsrvd: enable packet logging
154  *
155  * @rx.vdev_id: vdev_id for RX pkt
156  * @rx.is_raw_frame: RAW frame
157  * @rx.fcs_err: FCS error
158  * @rx.tid_val: tid value
159  * @rx.reserved: reserved
160  * @rx.ftype: mcast2ucast, TSO, SG, MESH
161  *
162  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
163  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
164  *
165  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
166  *                 + (1) CE classification enablement bit
167  *                 + (2) packet type (802.3 or Ethernet type II)
168  *                 + (3) packet offset (usually length of HTC/HTT descr)
169  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
170  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
171  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
172  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
173  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
174  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
175  * @tx.dev.priv_cb_m.reserved: reserved
176  *
177  * @tx.ftype: mcast2ucast, TSO, SG, MESH
178  * @tx.vdev_id: vdev (for protocol trace)
179  * @tx.len: length of efrag pointed by the above pointers
180  *
181  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
182  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
183  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
184  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
185  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
186  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
187  * @tx.flags.bits.flag_ext_header: extended flags
188  * @tx.flags.bits.reserved: reserved
189  * @tx.trace: combined structure for DP and protocol trace
190  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
191  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
192  * @tx.trace.is_packet_priv:
193  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
194  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
195  *                          + (MGMT_ACTION)] - 4 bits
196  * @tx.trace.dp_trace: flag (Datapath trace)
197  * @tx.trace.is_bcast: flag (Broadcast packet)
198  * @tx.trace.is_mcast: flag (Multicast packet)
199  * @tx.trace.packet_type: flag (Packet type)
200  * @tx.trace.htt2_frm: flag (high-latency path only)
201  * @tx.trace.print: enable packet logging
202  *
203  * @tx.vaddr: virtual address of ~
204  * @tx.paddr: physical/DMA address of ~
205  */
206 struct qdf_nbuf_cb {
207 	/* common */
208 	qdf_paddr_t paddr; /* of skb->data */
209 	/* valid only in one direction */
210 	union {
211 		/* Note: MAX: 40 bytes */
212 		struct {
213 			union {
214 				struct {
215 					void *ext_cb_ptr;
216 					void *fctx;
217 					uint16_t msdu_len;
218 					uint16_t peer_id;
219 					uint16_t protocol_tag;
220 					uint16_t flow_tag;
221 				} priv_cb_w;
222 				struct {
223 					/* ipa_owned bit is common between rx
224 					 * control block and tx control block.
225 					 * Do not change location of this bit.
226 					 */
227 					uint32_t ipa_owned:1,
228 						 peer_cached_buf_frm:1,
229 						 flush_ind:1,
230 						 packet_buf_pool:1,
231 						 l3_hdr_pad:3,
232 						 /* exception frame flag */
233 						 exc_frm:1,
234 						 ipa_smmu_map:1,
235 						 reserved:7,
236 						 reserved1:16;
237 					uint32_t tcp_seq_num;
238 					uint32_t tcp_ack_num;
239 					union {
240 						struct {
241 							uint16_t msdu_len;
242 							uint16_t peer_id;
243 						} wifi3;
244 						struct {
245 							uint32_t map_index;
246 						} wifi2;
247 					} dp;
248 					unsigned char *lro_ctx;
249 				} priv_cb_m;
250 			} dev;
251 			uint32_t lro_eligible:1,
252 				tcp_proto:1,
253 				tcp_pure_ack:1,
254 				ipv6_proto:1,
255 				ip_offset:7,
256 				tcp_offset:7,
257 				rx_ctx_id:4,
258 				fcs_err:1,
259 				is_raw_frame:1,
260 				num_elements_in_list:8;
261 			uint32_t tcp_udp_chksum:16,
262 				 tcp_win:16;
263 			uint32_t flow_id;
264 			uint8_t flag_chfrag_start:1,
265 				flag_chfrag_cont:1,
266 				flag_chfrag_end:1,
267 				flag_retry:1,
268 				flag_da_mcbc:1,
269 				flag_da_valid:1,
270 				flag_sa_valid:1,
271 				flag_is_frag:1;
272 			union {
273 				uint8_t packet_state;
274 				uint8_t dp_trace:1,
275 					packet_track:4,
276 					rsrvd:3;
277 			} trace;
278 			uint16_t vdev_id:8,
279 				 tid_val:4,
280 				 ftype:4;
281 		} rx;
282 
283 		/* Note: MAX: 40 bytes */
284 		struct {
285 			union {
286 				struct {
287 					void *ext_cb_ptr;
288 					void *fctx;
289 				} priv_cb_w;
290 				struct {
291 					/* ipa_owned bit is common between rx
292 					 * control block and tx control block.
293 					 * Do not change location of this bit.
294 					 */
295 					struct {
296 						uint32_t owned:1,
297 							priv:31;
298 					} ipa;
299 					uint32_t data_attr;
300 					uint16_t desc_id;
301 					uint16_t mgmt_desc_id;
302 					struct {
303 						uint8_t bi_map:1,
304 							reserved:7;
305 					} dma_option;
306 					uint8_t reserved[3];
307 				} priv_cb_m;
308 			} dev;
309 			uint8_t ftype;
310 			uint8_t vdev_id;
311 			uint16_t len;
312 			union {
313 				struct {
314 					uint8_t flag_efrag:1,
315 						flag_nbuf:1,
316 						num:1,
317 						flag_chfrag_start:1,
318 						flag_chfrag_cont:1,
319 						flag_chfrag_end:1,
320 						flag_ext_header:1,
321 						flag_notify_comp:1;
322 				} bits;
323 				uint8_t u8;
324 			} flags;
325 			struct {
326 				uint8_t packet_state:7,
327 					is_packet_priv:1;
328 				uint8_t packet_track:4,
329 					proto_type:4;
330 				uint8_t dp_trace:1,
331 					is_bcast:1,
332 					is_mcast:1,
333 					packet_type:3,
334 					/* used only for hl*/
335 					htt2_frm:1,
336 					print:1;
337 			} trace;
338 			unsigned char *vaddr;
339 			qdf_paddr_t paddr;
340 		} tx;
341 	} u;
342 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
343 
344 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
345 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
346 			(sizeof(struct qdf_nbuf_cb)) <=
347 			sizeof_field(struct sk_buff, cb));
348 #else
349 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
350 			(sizeof(struct qdf_nbuf_cb)) <=
351 			FIELD_SIZEOF(struct sk_buff, cb));
352 #endif
353 
354 /**
355  *  access macros to qdf_nbuf_cb
356  *  Note: These macros can be used as L-values as well as R-values.
357  *        When used as R-values, they effectively function as "get" macros
358  *        When used as L_values, they effectively function as "set" macros
359  */
360 
361 #define QDF_NBUF_CB_PADDR(skb) \
362 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
363 
364 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
365 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
366 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
367 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
368 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
369 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
370 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
371 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
372 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
373 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
374 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
375 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
376 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
377 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
378 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
379 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
380 
381 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
382 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
383 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
384 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
385 
386 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
387 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
388 
389 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
390 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
391 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
392 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
393 
394 #define QDF_NBUF_CB_RX_FTYPE(skb) \
395 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
396 
397 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
398 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
399 
400 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
401 	(((struct qdf_nbuf_cb *) \
402 	((skb)->cb))->u.rx.flag_chfrag_start)
403 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
404 	(((struct qdf_nbuf_cb *) \
405 	((skb)->cb))->u.rx.flag_chfrag_cont)
406 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
407 		(((struct qdf_nbuf_cb *) \
408 		((skb)->cb))->u.rx.flag_chfrag_end)
409 
410 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
411 	(((struct qdf_nbuf_cb *) \
412 	((skb)->cb))->u.rx.flag_da_mcbc)
413 
414 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
415 	(((struct qdf_nbuf_cb *) \
416 	((skb)->cb))->u.rx.flag_da_valid)
417 
418 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
419 	(((struct qdf_nbuf_cb *) \
420 	((skb)->cb))->u.rx.flag_sa_valid)
421 
422 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
423 	(((struct qdf_nbuf_cb *) \
424 	((skb)->cb))->u.rx.flag_retry)
425 
426 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
427 	(((struct qdf_nbuf_cb *) \
428 	((skb)->cb))->u.rx.is_raw_frame)
429 
430 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
431 	(((struct qdf_nbuf_cb *) \
432 	((skb)->cb))->u.rx.tid_val)
433 
434 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
435 	(((struct qdf_nbuf_cb *) \
436 	((skb)->cb))->u.rx.flag_is_frag)
437 
438 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
439 	(((struct qdf_nbuf_cb *) \
440 	((skb)->cb))->u.rx.fcs_err)
441 
442 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
443 	qdf_nbuf_set_state(skb, PACKET_STATE)
444 
445 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
446 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
447 
448 #define QDF_NBUF_CB_TX_FTYPE(skb) \
449 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
450 
451 
452 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
453 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
454 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
455 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
456 
457 /* Tx Flags Accessor Macros*/
458 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
459 	(((struct qdf_nbuf_cb *) \
460 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
461 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
462 	(((struct qdf_nbuf_cb *) \
463 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
464 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
465 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
466 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
467 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
468 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
469 	(((struct qdf_nbuf_cb *) \
470 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
471 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
472 	(((struct qdf_nbuf_cb *) \
473 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
474 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
475 		(((struct qdf_nbuf_cb *) \
476 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
477 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
478 		(((struct qdf_nbuf_cb *) \
479 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
480 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
481 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
482 /* End of Tx Flags Accessor Macros */
483 
484 /* Tx trace accessor macros */
485 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
486 	(((struct qdf_nbuf_cb *) \
487 		((skb)->cb))->u.tx.trace.packet_state)
488 
489 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
490 	(((struct qdf_nbuf_cb *) \
491 		((skb)->cb))->u.tx.trace.is_packet_priv)
492 
493 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
494 	(((struct qdf_nbuf_cb *) \
495 		((skb)->cb))->u.tx.trace.packet_track)
496 
497 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
498 		(((struct qdf_nbuf_cb *) \
499 			((skb)->cb))->u.rx.trace.packet_track)
500 
501 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
502 	(((struct qdf_nbuf_cb *) \
503 		((skb)->cb))->u.tx.trace.proto_type)
504 
505 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
506 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
507 
508 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
509 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
510 
511 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
512 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
513 
514 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
515 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
516 
517 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
518 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
519 
520 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
521 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
522 
523 #define QDF_NBUF_CB_SET_BCAST(skb) \
524 	(((struct qdf_nbuf_cb *) \
525 		((skb)->cb))->u.tx.trace.is_bcast = true)
526 
527 #define QDF_NBUF_CB_SET_MCAST(skb) \
528 	(((struct qdf_nbuf_cb *) \
529 		((skb)->cb))->u.tx.trace.is_mcast = true)
530 /* End of Tx trace accessor macros */
531 
532 
533 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
534 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
535 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
536 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
537 
538 /* assume the OS provides a single fragment */
539 #define __qdf_nbuf_get_num_frags(skb)		   \
540 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
541 
542 #define __qdf_nbuf_reset_num_frags(skb) \
543 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
544 
545 /**
546  *   end of nbuf->cb access macros
547  */
548 
549 typedef void (*qdf_nbuf_trace_update_t)(char *);
550 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
551 
552 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
553 
554 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
555 	(QDF_NBUF_CB_PADDR(skb) = paddr)
556 
557 #define __qdf_nbuf_frag_push_head(					\
558 	skb, frag_len, frag_vaddr, frag_paddr)				\
559 	do {					\
560 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
561 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
562 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
563 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
564 	} while (0)
565 
566 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
567 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
568 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
569 
570 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
571 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
572 
573 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
574 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
575 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
576 	 /* assume that the OS only provides a single fragment */	\
577 	 QDF_NBUF_CB_PADDR(skb))
578 
579 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
580 
581 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
582 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
583 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
584 
585 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
586 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
587 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
588 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
589 
590 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
591 	do {								\
592 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
593 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
594 		if (frag_num)						\
595 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
596 							      is_wstrm; \
597 		else					\
598 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
599 							      is_wstrm; \
600 	} while (0)
601 
602 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
603 	do { \
604 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
605 	} while (0)
606 
607 #define __qdf_nbuf_get_vdev_ctx(skb) \
608 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
609 
610 #define __qdf_nbuf_set_tx_ftype(skb, type) \
611 	do { \
612 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
613 	} while (0)
614 
615 #define __qdf_nbuf_get_tx_ftype(skb) \
616 		 QDF_NBUF_CB_TX_FTYPE((skb))
617 
618 
619 #define __qdf_nbuf_set_rx_ftype(skb, type) \
620 	do { \
621 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
622 	} while (0)
623 
624 #define __qdf_nbuf_get_rx_ftype(skb) \
625 		 QDF_NBUF_CB_RX_FTYPE((skb))
626 
627 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
628 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
629 
630 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
631 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
632 
633 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
634 	do { \
635 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
636 	} while (0)
637 
638 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
639 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
640 
641 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
642 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
643 
644 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
645 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
646 
647 #define __qdf_nbuf_set_da_mcbc(skb, val) \
648 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
649 
650 #define __qdf_nbuf_is_da_mcbc(skb) \
651 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
652 
653 #define __qdf_nbuf_set_da_valid(skb, val) \
654 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
655 
656 #define __qdf_nbuf_is_da_valid(skb) \
657 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
658 
659 #define __qdf_nbuf_set_sa_valid(skb, val) \
660 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
661 
662 #define __qdf_nbuf_is_sa_valid(skb) \
663 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
664 
665 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
666 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
667 
668 #define __qdf_nbuf_is_rx_retry_flag(skb) \
669 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
670 
671 #define __qdf_nbuf_set_raw_frame(skb, val) \
672 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
673 
674 #define __qdf_nbuf_is_raw_frame(skb) \
675 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
676 
677 #define __qdf_nbuf_get_tid_val(skb) \
678 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
679 
680 #define __qdf_nbuf_set_tid_val(skb, val) \
681 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
682 
683 #define __qdf_nbuf_set_is_frag(skb, val) \
684 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
685 
686 #define __qdf_nbuf_is_frag(skb) \
687 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
688 
689 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
690 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
691 
692 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
693 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
694 
695 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
696 	do { \
697 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
698 	} while (0)
699 
700 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
701 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
702 
703 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
704 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
705 
706 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
707 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
708 
709 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
710 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
711 
712 #define __qdf_nbuf_trace_get_proto_type(skb) \
713 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
714 
715 #define __qdf_nbuf_data_attr_get(skb)		\
716 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
717 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
718 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
719 
720 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
721 		skb_queue_walk_safe(queue, var, tvar)
722 
723 /**
724  * __qdf_nbuf_num_frags_init() - init extra frags
725  * @skb: sk buffer
726  *
727  * Return: none
728  */
729 static inline
730 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
731 {
732 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
733 }
734 
735 /*
736  * prototypes. Implemented in qdf_nbuf.c
737  */
738 
739 /**
740  * __qdf_nbuf_alloc() - Allocate nbuf
741  * @osdev: Device handle
742  * @size: Netbuf requested size
743  * @reserve: headroom to start with
744  * @align: Align
745  * @prio: Priority
746  * @func: Function name of the call site
747  * @line: line number of the call site
748  *
749  * This allocates an nbuf aligns if needed and reserves some space in the front,
750  * since the reserve is done after alignment the reserve value if being
751  * unaligned will result in an unaligned address.
752  *
753  * Return: nbuf or %NULL if no memory
754  */
755 __qdf_nbuf_t
756 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
757 		 int prio, const char *func, uint32_t line);
758 
759 /**
760  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
761  * @size: Size to be allocated for skb
762  * @reserve: Reserve headroom size
763  * @align: Align data
764  * @func: Function name of the call site
765  * @line: Line number of the callsite
766  *
767  * This API allocates a nbuf and aligns it if needed and reserves some headroom
768  * space after the alignment where nbuf is not allocated from skb recycler pool.
769  *
770  * Return: Allocated nbuf pointer
771  */
772 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
773 					  const char *func, uint32_t line);
774 
775 void __qdf_nbuf_free(struct sk_buff *skb);
776 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
777 			struct sk_buff *skb, qdf_dma_dir_t dir);
778 void __qdf_nbuf_unmap(__qdf_device_t osdev,
779 			struct sk_buff *skb, qdf_dma_dir_t dir);
780 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
781 				 struct sk_buff *skb, qdf_dma_dir_t dir);
782 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
783 			struct sk_buff *skb, qdf_dma_dir_t dir);
784 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
785 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
786 
787 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
788 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
789 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
790 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
791 	qdf_dma_dir_t dir, int nbytes);
792 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
793 	qdf_dma_dir_t dir, int nbytes);
794 
795 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
796 	qdf_dma_dir_t dir);
797 
798 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
799 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
800 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
801 QDF_STATUS __qdf_nbuf_frag_map(
802 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
803 	int offset, qdf_dma_dir_t dir, int cur_frag);
804 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
805 
806 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
807 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
808 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
809 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
810 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
811 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
812 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
813 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
814 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
815 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
816 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
817 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
818 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
819 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
820 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
821 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
822 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
823 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
824 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
825 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
826 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
827 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
828 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
829 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
830 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
831 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
832 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
833 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
834 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
835 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
836 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
837 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
838 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
839 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
840 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
841 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
842 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
843 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
844 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
845 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
846 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
847 
848 #ifdef QDF_NBUF_GLOBAL_COUNT
849 int __qdf_nbuf_count_get(void);
850 void __qdf_nbuf_count_inc(struct sk_buff *skb);
851 void __qdf_nbuf_count_dec(struct sk_buff *skb);
852 void __qdf_nbuf_mod_init(void);
853 void __qdf_nbuf_mod_exit(void);
854 
855 #else
856 
857 static inline int __qdf_nbuf_count_get(void)
858 {
859 	return 0;
860 }
861 
862 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
863 {
864 	return;
865 }
866 
867 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
868 {
869 	return;
870 }
871 
872 static inline void __qdf_nbuf_mod_init(void)
873 {
874 	return;
875 }
876 
877 static inline void __qdf_nbuf_mod_exit(void)
878 {
879 	return;
880 }
881 #endif
882 
883 /**
884  * __qdf_to_status() - OS to QDF status conversion
885  * @error : OS error
886  *
887  * Return: QDF status
888  */
889 static inline QDF_STATUS __qdf_to_status(signed int error)
890 {
891 	switch (error) {
892 	case 0:
893 		return QDF_STATUS_SUCCESS;
894 	case ENOMEM:
895 	case -ENOMEM:
896 		return QDF_STATUS_E_NOMEM;
897 	default:
898 		return QDF_STATUS_E_NOSUPPORT;
899 	}
900 }
901 
902 /**
903  * __qdf_nbuf_len() - return the amount of valid data in the skb
904  * @skb: Pointer to network buffer
905  *
906  * This API returns the amount of valid data in the skb, If there are frags
907  * then it returns total length.
908  *
909  * Return: network buffer length
910  */
911 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
912 {
913 	int i, extra_frag_len = 0;
914 
915 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
916 	if (i > 0)
917 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
918 
919 	return extra_frag_len + skb->len;
920 }
921 
922 /**
923  * __qdf_nbuf_cat() - link two nbufs
924  * @dst: Buffer to piggyback into
925  * @src: Buffer to put
926  *
927  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
928  * It is callers responsibility to free the src skb.
929  *
930  * Return: QDF_STATUS (status of the call) if failed the src skb
931  *         is released
932  */
933 static inline QDF_STATUS
934 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
935 {
936 	QDF_STATUS error = 0;
937 
938 	qdf_assert(dst && src);
939 
940 	/*
941 	 * Since pskb_expand_head unconditionally reallocates the skb->head
942 	 * buffer, first check whether the current buffer is already large
943 	 * enough.
944 	 */
945 	if (skb_tailroom(dst) < src->len) {
946 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
947 		if (error)
948 			return __qdf_to_status(error);
949 	}
950 
951 	memcpy(skb_tail_pointer(dst), src->data, src->len);
952 	skb_put(dst, src->len);
953 	return __qdf_to_status(error);
954 }
955 
956 /*
957  * nbuf manipulation routines
958  */
959 /**
960  * __qdf_nbuf_headroom() - return the amount of tail space available
961  * @buf: Pointer to network buffer
962  *
963  * Return: amount of tail room
964  */
965 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
966 {
967 	return skb_headroom(skb);
968 }
969 
970 /**
971  * __qdf_nbuf_tailroom() - return the amount of tail space available
972  * @buf: Pointer to network buffer
973  *
974  * Return: amount of tail room
975  */
976 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
977 {
978 	return skb_tailroom(skb);
979 }
980 
981 /**
982  * __qdf_nbuf_put_tail() - Puts data in the end
983  * @skb: Pointer to network buffer
984  * @size: size to be pushed
985  *
986  * Return: data pointer of this buf where new data has to be
987  *         put, or NULL if there is not enough room in this buf.
988  */
989 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
990 {
991 	if (skb_tailroom(skb) < size) {
992 		if (unlikely(pskb_expand_head(skb, 0,
993 			size - skb_tailroom(skb), GFP_ATOMIC))) {
994 			dev_kfree_skb_any(skb);
995 			return NULL;
996 		}
997 	}
998 	return skb_put(skb, size);
999 }
1000 
1001 /**
1002  * __qdf_nbuf_trim_tail() - trim data out from the end
1003  * @skb: Pointer to network buffer
1004  * @size: size to be popped
1005  *
1006  * Return: none
1007  */
1008 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1009 {
1010 	return skb_trim(skb, skb->len - size);
1011 }
1012 
1013 
1014 /*
1015  * prototypes. Implemented in qdf_nbuf.c
1016  */
1017 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1018 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1019 				qdf_nbuf_rx_cksum_t *cksum);
1020 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1021 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1022 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1023 void __qdf_nbuf_ref(struct sk_buff *skb);
1024 int __qdf_nbuf_shared(struct sk_buff *skb);
1025 
1026 /*
1027  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1028  */
1029 #define __qdf_nbuf_pool_delete(osdev)
1030 
1031 /**
1032  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
1033  * @skb: Pointer to network buffer
1034  *
1035  * if GFP_ATOMIC is overkill then we can check whether its
1036  * called from interrupt context and then do it or else in
1037  * normal case use GFP_KERNEL
1038  *
1039  * example     use "in_irq() || irqs_disabled()"
1040  *
1041  * Return: cloned skb
1042  */
1043 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
1044 {
1045 	struct sk_buff *skb_new = NULL;
1046 
1047 	skb_new = skb_clone(skb, GFP_ATOMIC);
1048 	if (skb_new)
1049 		__qdf_nbuf_count_inc(skb_new);
1050 
1051 	return skb_new;
1052 }
1053 
1054 /**
1055  * __qdf_nbuf_copy() - returns a private copy of the skb
1056  * @skb: Pointer to network buffer
1057  *
1058  * This API returns a private copy of the skb, the skb returned is completely
1059  *  modifiable by callers
1060  *
1061  * Return: skb or NULL
1062  */
1063 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1064 {
1065 	struct sk_buff *skb_new = NULL;
1066 
1067 	skb_new = skb_copy(skb, GFP_ATOMIC);
1068 	if (skb_new)
1069 		__qdf_nbuf_count_inc(skb_new);
1070 
1071 	return skb_new;
1072 }
1073 
1074 #define __qdf_nbuf_reserve      skb_reserve
1075 
1076 /**
1077  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1078  * @skb: Pointer to network buffer
1079  * @data: data pointer
1080  *
1081  * Return: none
1082  */
1083 static inline void
1084 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1085 {
1086 	skb->data = data;
1087 }
1088 
1089 /**
1090  * __qdf_nbuf_set_len() - set buffer data length
1091  * @skb: Pointer to network buffer
1092  * @len: data length
1093  *
1094  * Return: none
1095  */
1096 static inline void
1097 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1098 {
1099 	skb->len = len;
1100 }
1101 
1102 /**
1103  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1104  * @skb: Pointer to network buffer
1105  * @len: skb data length
1106  *
1107  * Return: none
1108  */
1109 static inline void
1110 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1111 {
1112 	skb_set_tail_pointer(skb, len);
1113 }
1114 
1115 /**
1116  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1117  * @skb: Pointer to network buffer
1118  * @list: list to use
1119  *
1120  * This is a lockless version, driver must acquire locks if it
1121  * needs to synchronize
1122  *
1123  * Return: none
1124  */
1125 static inline void
1126 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1127 {
1128 	__skb_unlink(skb, list);
1129 }
1130 
1131 /**
1132  * __qdf_nbuf_reset() - reset the buffer data and pointer
1133  * @buf: Network buf instance
1134  * @reserve: reserve
1135  * @align: align
1136  *
1137  * Return: none
1138  */
1139 static inline void
1140 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1141 {
1142 	int offset;
1143 
1144 	skb_push(skb, skb_headroom(skb));
1145 	skb_put(skb, skb_tailroom(skb));
1146 	memset(skb->data, 0x0, skb->len);
1147 	skb_trim(skb, 0);
1148 	skb_reserve(skb, NET_SKB_PAD);
1149 	memset(skb->cb, 0x0, sizeof(skb->cb));
1150 
1151 	/*
1152 	 * The default is for netbuf fragments to be interpreted
1153 	 * as wordstreams rather than bytestreams.
1154 	 */
1155 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1156 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1157 
1158 	/*
1159 	 * Align & make sure that the tail & data are adjusted properly
1160 	 */
1161 
1162 	if (align) {
1163 		offset = ((unsigned long)skb->data) % align;
1164 		if (offset)
1165 			skb_reserve(skb, align - offset);
1166 	}
1167 
1168 	skb_reserve(skb, reserve);
1169 }
1170 
1171 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1172 /**
1173  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1174  *                                       in kernel
1175  *
1176  * Return: true if dev_scratch is supported
1177  *         false if dev_scratch is not supported
1178  */
1179 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1180 {
1181 	return true;
1182 }
1183 
1184 /**
1185  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1186  * @skb: Pointer to network buffer
1187  *
1188  * Return: dev_scratch if dev_scratch supported
1189  *         0 if dev_scratch not supported
1190  */
1191 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1192 {
1193 	return skb->dev_scratch;
1194 }
1195 
1196 /**
1197  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1198  * @skb: Pointer to network buffer
1199  * @value: value to be set in dev_scratch of network buffer
1200  *
1201  * Return: void
1202  */
1203 static inline void
1204 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1205 {
1206 	skb->dev_scratch = value;
1207 }
1208 #else
1209 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1210 {
1211 	return false;
1212 }
1213 
1214 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1215 {
1216 	return 0;
1217 }
1218 
1219 static inline void
1220 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1221 {
1222 }
1223 #endif /* KERNEL_VERSION(4, 14, 0) */
1224 
1225 /**
1226  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1227  * @skb: Pointer to network buffer
1228  *
1229  * Return: Pointer to head buffer
1230  */
1231 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1232 {
1233 	return skb->head;
1234 }
1235 
1236 /**
1237  * __qdf_nbuf_data() - return the pointer to data header in the skb
1238  * @skb: Pointer to network buffer
1239  *
1240  * Return: Pointer to skb data
1241  */
1242 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1243 {
1244 	return skb->data;
1245 }
1246 
1247 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1248 {
1249 	return (uint8_t *)&skb->data;
1250 }
1251 
1252 /**
1253  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1254  * @skb: Pointer to network buffer
1255  *
1256  * Return: skb protocol
1257  */
1258 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1259 {
1260 	return skb->protocol;
1261 }
1262 
1263 /**
1264  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1265  * @skb: Pointer to network buffer
1266  *
1267  * Return: skb ip_summed
1268  */
1269 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1270 {
1271 	return skb->ip_summed;
1272 }
1273 
1274 /**
1275  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1276  * @skb: Pointer to network buffer
1277  * @ip_summed: ip checksum
1278  *
1279  * Return: none
1280  */
1281 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1282 		 uint8_t ip_summed)
1283 {
1284 	skb->ip_summed = ip_summed;
1285 }
1286 
1287 /**
1288  * __qdf_nbuf_get_priority() - return the priority value of the skb
1289  * @skb: Pointer to network buffer
1290  *
1291  * Return: skb priority
1292  */
1293 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1294 {
1295 	return skb->priority;
1296 }
1297 
1298 /**
1299  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1300  * @skb: Pointer to network buffer
1301  * @p: priority
1302  *
1303  * Return: none
1304  */
1305 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1306 {
1307 	skb->priority = p;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1312  * @skb: Current skb
1313  * @next_skb: Next skb
1314  *
1315  * Return: void
1316  */
1317 static inline void
1318 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1319 {
1320 	skb->next = skb_next;
1321 }
1322 
1323 /**
1324  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1325  * @skb: Current skb
1326  *
1327  * Return: the next skb pointed to by the current skb
1328  */
1329 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1330 {
1331 	return skb->next;
1332 }
1333 
1334 /**
1335  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1336  * @skb: Current skb
1337  * @next_skb: Next skb
1338  *
1339  * This fn is used to link up extensions to the head skb. Does not handle
1340  * linking to the head
1341  *
1342  * Return: none
1343  */
1344 static inline void
1345 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1346 {
1347 	skb->next = skb_next;
1348 }
1349 
1350 /**
1351  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1352  * @skb: Current skb
1353  *
1354  * Return: the next skb pointed to by the current skb
1355  */
1356 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1357 {
1358 	return skb->next;
1359 }
1360 
1361 /**
1362  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1363  * @skb_head: head_buf nbuf holding head segment (single)
1364  * @ext_list: nbuf list holding linked extensions to the head
1365  * @ext_len: Total length of all buffers in the extension list
1366  *
1367  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1368  * to the nbuf holding the head segment (seg0)
1369  *
1370  * Return: none
1371  */
1372 static inline void
1373 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1374 			struct sk_buff *ext_list, size_t ext_len)
1375 {
1376 	skb_shinfo(skb_head)->frag_list = ext_list;
1377 	skb_head->data_len = ext_len;
1378 	skb_head->len += skb_head->data_len;
1379 }
1380 
1381 /**
1382  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1383  * @head_buf: Network buf holding head segment (single)
1384  *
1385  * This ext_list is populated when we have Jumbo packet, for example in case of
1386  * monitor mode amsdu packet reception, and are stiched using frags_list.
1387  *
1388  * Return: Network buf list holding linked extensions from head buf.
1389  */
1390 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1391 {
1392 	return (skb_shinfo(head_buf)->frag_list);
1393 }
1394 
1395 /**
1396  * __qdf_nbuf_get_age() - return the checksum value of the skb
1397  * @skb: Pointer to network buffer
1398  *
1399  * Return: checksum value
1400  */
1401 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1402 {
1403 	return skb->csum;
1404 }
1405 
1406 /**
1407  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1408  * @skb: Pointer to network buffer
1409  * @v: Value
1410  *
1411  * Return: none
1412  */
1413 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1414 {
1415 	skb->csum = v;
1416 }
1417 
1418 /**
1419  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1420  * @skb: Pointer to network buffer
1421  * @adj: Adjustment value
1422  *
1423  * Return: none
1424  */
1425 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1426 {
1427 	skb->csum -= adj;
1428 }
1429 
1430 /**
1431  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1432  * @skb: Pointer to network buffer
1433  * @offset: Offset value
1434  * @len: Length
1435  * @to: Destination pointer
1436  *
1437  * Return: length of the copy bits for skb
1438  */
1439 static inline int32_t
1440 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1441 {
1442 	return skb_copy_bits(skb, offset, to, len);
1443 }
1444 
1445 /**
1446  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1447  * @skb: Pointer to network buffer
1448  * @len:  Packet length
1449  *
1450  * Return: none
1451  */
1452 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1453 {
1454 	if (skb->len > len) {
1455 		skb_trim(skb, len);
1456 	} else {
1457 		if (skb_tailroom(skb) < len - skb->len) {
1458 			if (unlikely(pskb_expand_head(skb, 0,
1459 				len - skb->len - skb_tailroom(skb),
1460 				GFP_ATOMIC))) {
1461 				dev_kfree_skb_any(skb);
1462 				qdf_assert(0);
1463 			}
1464 		}
1465 		skb_put(skb, (len - skb->len));
1466 	}
1467 }
1468 
1469 /**
1470  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1471  * @skb: Pointer to network buffer
1472  * @protocol: Protocol type
1473  *
1474  * Return: none
1475  */
1476 static inline void
1477 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1478 {
1479 	skb->protocol = protocol;
1480 }
1481 
1482 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1483 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1484 
1485 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1486 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1487 
1488 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1489 				      uint32_t *lo, uint32_t *hi);
1490 
1491 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1492 	struct qdf_tso_info_t *tso_info);
1493 
1494 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1495 			  struct qdf_tso_seg_elem_t *tso_seg,
1496 			  bool is_last_seg);
1497 
1498 #ifdef FEATURE_TSO
1499 /**
1500  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1501  *                                    payload len
1502  * @skb: buffer
1503  *
1504  * Return: size
1505  */
1506 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1507 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1508 
1509 #else
1510 static inline
1511 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1512 {
1513 	return 0;
1514 }
1515 
1516 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1517 {
1518 	return 0;
1519 }
1520 
1521 #endif /* FEATURE_TSO */
1522 
1523 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1524 {
1525 	if (skb_is_gso(skb) &&
1526 		(skb_is_gso_v6(skb) ||
1527 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1528 		return true;
1529 	else
1530 		return false;
1531 }
1532 
1533 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1534 
1535 int __qdf_nbuf_get_users(struct sk_buff *skb);
1536 
1537 /**
1538  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1539  *			      and get hw_classify by peeking
1540  *			      into packet
1541  * @nbuf:		Network buffer (skb on Linux)
1542  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1543  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1544  *			needs to be set in case of CE classification support
1545  *			Is set by this macro.
1546  * @hw_classify:	This is a flag which is set to indicate
1547  *			CE classification is enabled.
1548  *			Do not set this bit for VLAN packets
1549  *			OR for mcast / bcast frames.
1550  *
1551  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1552  * whether to enable tx_classify bit in CE.
1553  *
1554  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1555  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1556  * it is the length and a 802.3 frame else it is Ethernet Type II
1557  * (RFC 894).
1558  * Bit 4 in pkt_subtype is the tx_classify bit
1559  *
1560  * Return:	void
1561  */
1562 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1563 				pkt_subtype, hw_classify)	\
1564 do {								\
1565 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1566 	uint16_t ether_type = ntohs(eh->h_proto);		\
1567 	bool is_mc_bc;						\
1568 								\
1569 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1570 		   is_multicast_ether_addr((uint8_t *)eh);	\
1571 								\
1572 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1573 		hw_classify = 1;				\
1574 		pkt_subtype = 0x01 <<				\
1575 			HTT_TX_CLASSIFY_BIT_S;			\
1576 	}							\
1577 								\
1578 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1579 		pkt_type = htt_pkt_type_ethernet;		\
1580 								\
1581 } while (0)
1582 
1583 /**
1584  * nbuf private buffer routines
1585  */
1586 
1587 /**
1588  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1589  * @skb: Pointer to network buffer
1590  * @addr: Pointer to store header's addr
1591  * @m_len: network buffer length
1592  *
1593  * Return: none
1594  */
1595 static inline void
1596 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1597 {
1598 	*addr = skb->data;
1599 	*len = skb->len;
1600 }
1601 
1602 /**
1603  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1604  * @head: Head pointer
1605  * @tail: Tail pointer
1606  * @qlen: Queue length
1607  */
1608 typedef struct __qdf_nbuf_qhead {
1609 	struct sk_buff *head;
1610 	struct sk_buff *tail;
1611 	unsigned int qlen;
1612 } __qdf_nbuf_queue_t;
1613 
1614 /******************Functions *************/
1615 
1616 /**
1617  * __qdf_nbuf_queue_init() - initiallize the queue head
1618  * @qhead: Queue head
1619  *
1620  * Return: QDF status
1621  */
1622 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1623 {
1624 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1625 	return QDF_STATUS_SUCCESS;
1626 }
1627 
1628 /**
1629  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1630  * @qhead: Queue head
1631  * @skb: Pointer to network buffer
1632  *
1633  * This is a lockless version, driver must acquire locks if it
1634  * needs to synchronize
1635  *
1636  * Return: none
1637  */
1638 static inline void
1639 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1640 {
1641 	skb->next = NULL;       /*Nullify the next ptr */
1642 
1643 	if (!qhead->head)
1644 		qhead->head = skb;
1645 	else
1646 		qhead->tail->next = skb;
1647 
1648 	qhead->tail = skb;
1649 	qhead->qlen++;
1650 }
1651 
1652 /**
1653  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1654  * @dest: target netbuf queue
1655  * @src:  source netbuf queue
1656  *
1657  * Return: target netbuf queue
1658  */
1659 static inline __qdf_nbuf_queue_t *
1660 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1661 {
1662 	if (!dest)
1663 		return NULL;
1664 	else if (!src || !(src->head))
1665 		return dest;
1666 
1667 	if (!(dest->head))
1668 		dest->head = src->head;
1669 	else
1670 		dest->tail->next = src->head;
1671 
1672 	dest->tail = src->tail;
1673 	dest->qlen += src->qlen;
1674 	return dest;
1675 }
1676 
1677 /**
1678  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1679  * @qhead: Queue head
1680  * @skb: Pointer to network buffer
1681  *
1682  * This is a lockless version, driver must acquire locks if it needs to
1683  * synchronize
1684  *
1685  * Return: none
1686  */
1687 static inline void
1688 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1689 {
1690 	if (!qhead->head) {
1691 		/*Empty queue Tail pointer Must be updated */
1692 		qhead->tail = skb;
1693 	}
1694 	skb->next = qhead->head;
1695 	qhead->head = skb;
1696 	qhead->qlen++;
1697 }
1698 
1699 /**
1700  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1701  * @qhead: Queue head
1702  *
1703  * This is a lockless version. Driver should take care of the locks
1704  *
1705  * Return: skb or NULL
1706  */
1707 static inline
1708 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1709 {
1710 	__qdf_nbuf_t tmp = NULL;
1711 
1712 	if (qhead->head) {
1713 		qhead->qlen--;
1714 		tmp = qhead->head;
1715 		if (qhead->head == qhead->tail) {
1716 			qhead->head = NULL;
1717 			qhead->tail = NULL;
1718 		} else {
1719 			qhead->head = tmp->next;
1720 		}
1721 		tmp->next = NULL;
1722 	}
1723 	return tmp;
1724 }
1725 
1726 /**
1727  * __qdf_nbuf_queue_free() - free a queue
1728  * @qhead: head of queue
1729  *
1730  * Return: QDF status
1731  */
1732 static inline QDF_STATUS
1733 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1734 {
1735 	__qdf_nbuf_t  buf = NULL;
1736 
1737 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1738 		__qdf_nbuf_free(buf);
1739 	return QDF_STATUS_SUCCESS;
1740 }
1741 
1742 
1743 /**
1744  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1745  * @qhead: head of queue
1746  *
1747  * Return: NULL if the queue is empty
1748  */
1749 static inline struct sk_buff *
1750 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1751 {
1752 	return qhead->head;
1753 }
1754 
1755 /**
1756  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1757  * @qhead: head of queue
1758  *
1759  * Return: NULL if the queue is empty
1760  */
1761 static inline struct sk_buff *
1762 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1763 {
1764 	return qhead->tail;
1765 }
1766 
1767 /**
1768  * __qdf_nbuf_queue_len() - return the queue length
1769  * @qhead: Queue head
1770  *
1771  * Return: Queue length
1772  */
1773 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1774 {
1775 	return qhead->qlen;
1776 }
1777 
1778 /**
1779  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1780  * @skb: Pointer to network buffer
1781  *
1782  * This API returns the next skb from packet chain, remember the skb is
1783  * still in the queue
1784  *
1785  * Return: NULL if no packets are there
1786  */
1787 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1788 {
1789 	return skb->next;
1790 }
1791 
1792 /**
1793  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1794  * @qhead: Queue head
1795  *
1796  * Return: true if length is 0 else false
1797  */
1798 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1799 {
1800 	return qhead->qlen == 0;
1801 }
1802 
1803 /*
1804  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1805  * Because the queue head will most likely put in some structure,
1806  * we don't use pointer type as the definition.
1807  */
1808 
1809 /*
1810  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1811  * Because the queue head will most likely put in some structure,
1812  * we don't use pointer type as the definition.
1813  */
1814 
1815 static inline void
1816 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1817 {
1818 }
1819 
1820 /**
1821  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1822  *        expands the headroom
1823  *        in the data region. In case of failure the skb is released.
1824  * @skb: sk buff
1825  * @headroom: size of headroom
1826  *
1827  * Return: skb or NULL
1828  */
1829 static inline struct sk_buff *
1830 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1831 {
1832 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1833 		dev_kfree_skb_any(skb);
1834 		skb = NULL;
1835 	}
1836 	return skb;
1837 }
1838 
1839 /**
1840  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1841  *        exapnds the tailroom
1842  *        in data region. In case of failure it releases the skb.
1843  * @skb: sk buff
1844  * @tailroom: size of tailroom
1845  *
1846  * Return: skb or NULL
1847  */
1848 static inline struct sk_buff *
1849 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1850 {
1851 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1852 		return skb;
1853 	/**
1854 	 * unlikely path
1855 	 */
1856 	dev_kfree_skb_any(skb);
1857 	return NULL;
1858 }
1859 
1860 /**
1861  * __qdf_nbuf_linearize() - skb linearize
1862  * @skb: sk buff
1863  *
1864  * create a version of the specified nbuf whose contents
1865  * can be safely modified without affecting other
1866  * users.If the nbuf is non-linear then this function
1867  * linearize. if unable to linearize returns -ENOMEM on
1868  * success 0 is returned
1869  *
1870  * Return: 0 on Success, -ENOMEM on failure is returned.
1871  */
1872 static inline int
1873 __qdf_nbuf_linearize(struct sk_buff *skb)
1874 {
1875 	return skb_linearize(skb);
1876 }
1877 
1878 /**
1879  * __qdf_nbuf_unshare() - skb unshare
1880  * @skb: sk buff
1881  *
1882  * create a version of the specified nbuf whose contents
1883  * can be safely modified without affecting other
1884  * users.If the nbuf is a clone then this function
1885  * creates a new copy of the data. If the buffer is not
1886  * a clone the original buffer is returned.
1887  *
1888  * Return: skb or NULL
1889  */
1890 static inline struct sk_buff *
1891 __qdf_nbuf_unshare(struct sk_buff *skb)
1892 {
1893 	return skb_unshare(skb, GFP_ATOMIC);
1894 }
1895 
1896 /**
1897  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1898  *@buf: sk buff
1899  *
1900  * Return: true/false
1901  */
1902 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1903 {
1904 	return skb_cloned(skb);
1905 }
1906 
1907 /**
1908  * __qdf_nbuf_pool_init() - init pool
1909  * @net: net handle
1910  *
1911  * Return: QDF status
1912  */
1913 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1914 {
1915 	return QDF_STATUS_SUCCESS;
1916 }
1917 
1918 /*
1919  * adf_nbuf_pool_delete() implementation - do nothing in linux
1920  */
1921 #define __qdf_nbuf_pool_delete(osdev)
1922 
1923 /**
1924  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1925  *        release the skb.
1926  * @skb: sk buff
1927  * @headroom: size of headroom
1928  * @tailroom: size of tailroom
1929  *
1930  * Return: skb or NULL
1931  */
1932 static inline struct sk_buff *
1933 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1934 {
1935 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1936 		return skb;
1937 
1938 	dev_kfree_skb_any(skb);
1939 	return NULL;
1940 }
1941 
1942 /**
1943  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1944  * @buf: Network buf instance
1945  * @headroom: Additional headroom to be added
1946  * @tailroom: Additional tailroom to be added
1947  *
1948  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1949  *	or NULL if there is no memory
1950  */
1951 static inline struct sk_buff *
1952 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1953 {
1954 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1955 }
1956 
1957 /**
1958  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
1959  * @buf: Network buf instance
1960  *
1961  * Return: void
1962  */
1963 static inline void
1964 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
1965 {
1966 	struct sk_buff *list;
1967 
1968 	skb_walk_frags(buf, list)
1969 		skb_get(list);
1970 }
1971 
1972 /**
1973  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1974  *
1975  * Return: true/false
1976  */
1977 static inline bool
1978 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1979 			 uint8_t **where)
1980 {
1981 	qdf_assert(0);
1982 	return false;
1983 }
1984 
1985 /**
1986  * __qdf_nbuf_reset_ctxt() - mem zero control block
1987  * @nbuf: buffer
1988  *
1989  * Return: none
1990  */
1991 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1992 {
1993 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1994 }
1995 
1996 /**
1997  * __qdf_nbuf_network_header() - get network header
1998  * @buf: buffer
1999  *
2000  * Return: network header pointer
2001  */
2002 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2003 {
2004 	return skb_network_header(buf);
2005 }
2006 
2007 /**
2008  * __qdf_nbuf_transport_header() - get transport header
2009  * @buf: buffer
2010  *
2011  * Return: transport header pointer
2012  */
2013 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2014 {
2015 	return skb_transport_header(buf);
2016 }
2017 
2018 /**
2019  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2020  *  passed as part of network buffer by network stack
2021  * @skb: sk buff
2022  *
2023  * Return: TCP MSS size
2024  *
2025  */
2026 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2027 {
2028 	return skb_shinfo(skb)->gso_size;
2029 }
2030 
2031 /**
2032  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2033  * @nbuf: sk buff
2034  *
2035  * Return: none
2036  */
2037 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2038 
2039 /*
2040  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2041  * @nbuf: sk buff
2042  *
2043  * Return: void ptr
2044  */
2045 static inline void *
2046 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2047 {
2048 	return (void *)nbuf->cb;
2049 }
2050 
2051 /**
2052  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2053  * @skb: sk buff
2054  *
2055  * Return: head size
2056  */
2057 static inline size_t
2058 __qdf_nbuf_headlen(struct sk_buff *skb)
2059 {
2060 	return skb_headlen(skb);
2061 }
2062 
2063 /**
2064  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
2065  * @skb: sk buff
2066  *
2067  * Return: number of fragments
2068  */
2069 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
2070 {
2071 	return skb_shinfo(skb)->nr_frags;
2072 }
2073 
2074 /**
2075  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2076  * @buf: sk buff
2077  *
2078  * Return: true/false
2079  */
2080 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2081 {
2082 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2083 }
2084 
2085 /**
2086  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2087  * @buf: sk buff
2088  *
2089  * Return: true/false
2090  */
2091 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2092 {
2093 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2094 }
2095 
2096 /**
2097  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2098  * @skb: sk buff
2099  *
2100  * Return: size of l2+l3+l4 header length
2101  */
2102 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2103 {
2104 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2105 }
2106 
2107 /**
2108  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2109  * @buf: sk buff
2110  *
2111  * Return:  true/false
2112  */
2113 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2114 {
2115 	if (skb_is_nonlinear(skb))
2116 		return true;
2117 	else
2118 		return false;
2119 }
2120 
2121 /**
2122  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2123  * @buf: sk buff
2124  *
2125  * Return: TCP sequence number
2126  */
2127 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2128 {
2129 	return ntohl(tcp_hdr(skb)->seq);
2130 }
2131 
2132 /**
2133  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2134  *@buf: sk buff
2135  *
2136  * Return: data pointer to typecast into your priv structure
2137  */
2138 static inline uint8_t *
2139 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2140 {
2141 	return &skb->cb[8];
2142 }
2143 
2144 /**
2145  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2146  * @buf: Pointer to nbuf
2147  *
2148  * Return: None
2149  */
2150 static inline void
2151 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2152 {
2153 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2154 }
2155 
2156 /**
2157  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2158  *
2159  * @buf: sk buff
2160  * @queue_id: Queue id
2161  *
2162  * Return: void
2163  */
2164 static inline void
2165 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2166 {
2167 	skb_record_rx_queue(skb, queue_id);
2168 }
2169 
2170 /**
2171  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2172  *
2173  * @buf: sk buff
2174  *
2175  * Return: Queue mapping
2176  */
2177 static inline uint16_t
2178 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2179 {
2180 	return skb->queue_mapping;
2181 }
2182 
2183 /**
2184  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2185  *
2186  * @buf: sk buff
2187  *
2188  * Return: void
2189  */
2190 static inline void
2191 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2192 {
2193 	__net_timestamp(skb);
2194 }
2195 
2196 /**
2197  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2198  *
2199  * @buf: sk buff
2200  *
2201  * Return: timestamp stored in skb in ms
2202  */
2203 static inline uint64_t
2204 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2205 {
2206 	return ktime_to_ms(skb_get_ktime(skb));
2207 }
2208 
2209 /**
2210  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2211  *
2212  * @buf: sk buff
2213  *
2214  * Return: time difference in ms
2215  */
2216 static inline uint64_t
2217 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2218 {
2219 	return ktime_to_ms(net_timedelta(skb->tstamp));
2220 }
2221 
2222 /**
2223  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2224  *
2225  * @buf: sk buff
2226  *
2227  * Return: time difference in micro seconds
2228  */
2229 static inline uint64_t
2230 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2231 {
2232 	return ktime_to_us(net_timedelta(skb->tstamp));
2233 }
2234 
2235 /**
2236  * __qdf_nbuf_orphan() - orphan a nbuf
2237  * @skb: sk buff
2238  *
2239  * If a buffer currently has an owner then we call the
2240  * owner's destructor function
2241  *
2242  * Return: void
2243  */
2244 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2245 {
2246 	return skb_orphan(skb);
2247 }
2248 
2249 /**
2250  * __qdf_nbuf_map_nbytes_single() - map nbytes
2251  * @osdev: os device
2252  * @buf: buffer
2253  * @dir: direction
2254  * @nbytes: number of bytes
2255  *
2256  * Return: QDF_STATUS
2257  */
2258 #ifdef A_SIMOS_DEVHOST
2259 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2260 		qdf_device_t osdev, struct sk_buff *buf,
2261 		qdf_dma_dir_t dir, int nbytes)
2262 {
2263 	qdf_dma_addr_t paddr;
2264 
2265 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2266 	return QDF_STATUS_SUCCESS;
2267 }
2268 #else
2269 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2270 		qdf_device_t osdev, struct sk_buff *buf,
2271 		qdf_dma_dir_t dir, int nbytes)
2272 {
2273 	qdf_dma_addr_t paddr;
2274 
2275 	/* assume that the OS only provides a single fragment */
2276 	QDF_NBUF_CB_PADDR(buf) = paddr =
2277 		dma_map_single(osdev->dev, buf->data,
2278 			       nbytes, __qdf_dma_dir_to_os(dir));
2279 	return dma_mapping_error(osdev->dev, paddr) ?
2280 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2281 }
2282 #endif
2283 /**
2284  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2285  * @osdev: os device
2286  * @buf: buffer
2287  * @dir: direction
2288  * @nbytes: number of bytes
2289  *
2290  * Return: none
2291  */
2292 #if defined(A_SIMOS_DEVHOST)
2293 static inline void
2294 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2295 			       qdf_dma_dir_t dir, int nbytes)
2296 {
2297 }
2298 
2299 #else
2300 static inline void
2301 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2302 			       qdf_dma_dir_t dir, int nbytes)
2303 {
2304 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2305 
2306 	if (qdf_likely(paddr)) {
2307 		dma_unmap_single(osdev->dev, paddr, nbytes,
2308 				 __qdf_dma_dir_to_os(dir));
2309 		return;
2310 	}
2311 }
2312 #endif
2313 
2314 static inline struct sk_buff *
2315 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2316 {
2317 	return skb_dequeue(skb_queue_head);
2318 }
2319 
2320 static inline
2321 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2322 {
2323 	return skb_queue_head->qlen;
2324 }
2325 
2326 static inline
2327 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2328 					struct sk_buff *skb)
2329 {
2330 	return skb_queue_tail(skb_queue_head, skb);
2331 }
2332 
2333 static inline
2334 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2335 {
2336 	return skb_queue_head_init(skb_queue_head);
2337 }
2338 
2339 static inline
2340 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2341 {
2342 	return skb_queue_purge(skb_queue_head);
2343 }
2344 
2345 /**
2346  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2347  * @head: skb list for which lock is to be acquired
2348  *
2349  * Return: void
2350  */
2351 static inline
2352 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2353 {
2354 	spin_lock_bh(&skb_queue_head->lock);
2355 }
2356 
2357 /**
2358  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2359  * @head: skb list for which lock is to be release
2360  *
2361  * Return: void
2362  */
2363 static inline
2364 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2365 {
2366 	spin_unlock_bh(&skb_queue_head->lock);
2367 }
2368 
2369 /**
2370  * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2371  * @nbuf: qdf_nbuf_t
2372  * @idx: Index for which frag size is requested
2373  *
2374  * Return: Frag size
2375  */
2376 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2377 							   uint8_t idx)
2378 {
2379 	unsigned int size = 0;
2380 
2381 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2382 		size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2383 	return size;
2384 }
2385 
2386 /**
2387  * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
2388  * @nbuf: qdf_nbuf_t
2389  * @idx: Index for which frag address is requested
2390  *
2391  * Return: Frag address in success, else NULL
2392  */
2393 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
2394 						    uint8_t idx)
2395 {
2396 	__qdf_frag_t frag_addr = NULL;
2397 
2398 	if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2399 		frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
2400 	return frag_addr;
2401 }
2402 
2403 /**
2404  * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
2405  * @nbuf: qdf_nbuf_t
2406  * @idx: Frag index
2407  * @size: Size by which frag_size needs to be increased/decreased
2408  *        +Ve means increase, -Ve means decrease
2409  * @truesize: truesize
2410  */
2411 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
2412 						 int size,
2413 						 unsigned int truesize)
2414 {
2415 	skb_coalesce_rx_frag(nbuf, idx, size, truesize);
2416 }
2417 
2418 /**
2419  * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
2420  *          and adjust length by size.
2421  * @nbuf: qdf_nbuf_t
2422  * @idx: Frag index
2423  * @offset: Frag page offset should be moved by offset.
2424  *      +Ve - Move offset forward.
2425  *      -Ve - Move offset backward.
2426  *
2427  * Return: QDF_STATUS
2428  */
2429 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
2430 					    int offset);
2431 
2432 /**
2433  * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
2434  * @buf: Frag pointer needs to be added in nbuf frag
2435  * @nbuf: qdf_nbuf_t where frag will be added
2436  * @offset: Offset in frag to be added to nbuf_frags
2437  * @frag_len: Frag length
2438  * @truesize: truesize
2439  * @take_frag_ref: Whether to take ref for frag or not
2440  *      This bool must be set as per below comdition:
2441  *      1. False: If this frag is being added in any nbuf
2442  *              for the first time after allocation.
2443  *      2. True: If frag is already attached part of any
2444  *              nbuf.
2445  *
2446  * It takes ref_count based on boolean flag take_frag_ref
2447  */
2448 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
2449 			    int offset, int frag_len,
2450 			    unsigned int truesize, bool take_frag_ref);
2451 
2452 #ifdef CONFIG_NBUF_AP_PLATFORM
2453 #include <i_qdf_nbuf_w.h>
2454 #else
2455 #include <i_qdf_nbuf_m.h>
2456 #endif
2457 #endif /*_I_QDF_NET_BUF_H */
2458