xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *   6. "ipa.owned" bit must be first member in both TX and RX unions
95  *      for the priv_cb_m since it must be at same offset for both
96  *      TX and RX union.
97  *
98  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
99  *
100  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
101  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
102  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
103  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
104  * @rx.dev.priv_cb_w.protocol_tag:	protocol tag set by application for
105  *				received packet type
106  * @rx.dev.priv_cb_w.reserved1: reserved for flow tag set by application
107  *				for 5 tuples received
108  *
109  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
110  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
111  * @rx.dev.priv_cb_m.lro_ctx: LRO context
112  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
113  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
114  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
115  * @rx.dev.priv_cb_m.peer_local_id: peer_local_id for RX pkt
116  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
117  *
118  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
119  * @rx.peer_cached_buf_frm: peer cached buffer
120  * @rx.tcp_proto: L4 protocol is TCP
121  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
122  * @rx.ipv6_proto: L3 protocol is IPV6
123  * @rx.ip_offset: offset to IP header
124  * @rx.tcp_offset: offset to TCP header
125  * @rx_ctx_id: Rx context id
126  * @flush_ind: flush indication
127  * @num_elements_in_list: number of elements in the nbuf list
128  *
129  * @rx.tcp_udp_chksum: L4 payload checksum
130  * @rx.tcp_wim: TCP window size
131  *
132  * @rx.flow_id: 32bit flow id
133  *
134  * @rx.flag_chfrag_start: first MSDU in an AMSDU
135  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
136  * @rx.flag_chfrag_end: last MSDU in an AMSDU
137  * @rx.packet_buff_pool: indicate packet from pre-allocated pool for Rx ring
138  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
139  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
140  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
141  * @rx.flag_is_frag: flag to indicate skb has frag list
142  * @rx.rsrvd: reserved
143  *
144  * @rx.trace: combined structure for DP and protocol trace
145  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
146  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
147  * @rx.trace.dp_trace: flag (Datapath trace)
148  * @rx.trace.packet_track: RX_DATA packet
149  * @rx.trace.rsrvd: enable packet logging
150  *
151  * @rx.ftype: mcast2ucast, TSO, SG, MESH
152  * @rx.is_raw_frame: RAW frame
153  * @rx.fcs_err: FCS error
154  * @rx.tid_val: tid value
155  * @rx.reserved: reserved
156  *
157  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
158  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
159  *
160  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
161  *                 + (1) CE classification enablement bit
162  *                 + (2) packet type (802.3 or Ethernet type II)
163  *                 + (3) packet offset (usually length of HTC/HTT descr)
164  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
165  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
166  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
167  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
168  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
169  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
170  * @tx.dev.priv_cb_m.reserved: reserved
171  *
172  * @tx.ftype: mcast2ucast, TSO, SG, MESH
173  * @tx.vdev_id: vdev (for protocol trace)
174  * @tx.len: length of efrag pointed by the above pointers
175  *
176  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
177  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
178  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
179  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
180  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
181  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
182  * @tx.flags.bits.flag_ext_header: extended flags
183  * @tx.flags.bits.reserved: reserved
184  * @tx.trace: combined structure for DP and protocol trace
185  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
186  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
187  * @tx.trace.is_packet_priv:
188  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
189  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
190  *                          + (MGMT_ACTION)] - 4 bits
191  * @tx.trace.dp_trace: flag (Datapath trace)
192  * @tx.trace.is_bcast: flag (Broadcast packet)
193  * @tx.trace.is_mcast: flag (Multicast packet)
194  * @tx.trace.packet_type: flag (Packet type)
195  * @tx.trace.htt2_frm: flag (high-latency path only)
196  * @tx.trace.print: enable packet logging
197  *
198  * @tx.vaddr: virtual address of ~
199  * @tx.paddr: physical/DMA address of ~
200  */
201 struct qdf_nbuf_cb {
202 	/* common */
203 	qdf_paddr_t paddr; /* of skb->data */
204 	/* valid only in one direction */
205 	union {
206 		/* Note: MAX: 40 bytes */
207 		struct {
208 			union {
209 				struct {
210 					void *ext_cb_ptr;
211 					void *fctx;
212 					uint16_t msdu_len;
213 					uint16_t peer_id;
214 					uint16_t protocol_tag;
215 					uint16_t reserved1;
216 				} priv_cb_w;
217 				struct {
218 					/* ipa_owned bit is common between rx
219 					 * control block and tx control block.
220 					 * Do not change location of this bit.
221 					 */
222 					uint32_t ipa_owned:1,
223 						 reserved:15,
224 						 peer_local_id:16;
225 					uint32_t tcp_seq_num;
226 					uint32_t tcp_ack_num;
227 					union {
228 						struct {
229 							uint16_t msdu_len;
230 							uint16_t peer_id;
231 						} wifi3;
232 						struct {
233 							uint32_t map_index;
234 						} wifi2;
235 					} dp;
236 					unsigned char *lro_ctx;
237 				} priv_cb_m;
238 			} dev;
239 			uint32_t lro_eligible:1,
240 				peer_cached_buf_frm:1,
241 				tcp_proto:1,
242 				tcp_pure_ack:1,
243 				ipv6_proto:1,
244 				ip_offset:7,
245 				tcp_offset:7,
246 				rx_ctx_id:4,
247 				flush_ind:1,
248 				num_elements_in_list:8;
249 			uint32_t tcp_udp_chksum:16,
250 				 tcp_win:16;
251 			uint32_t flow_id;
252 			uint8_t flag_chfrag_start:1,
253 				flag_chfrag_cont:1,
254 				flag_chfrag_end:1,
255 				packet_buff_pool:1,
256 				flag_da_mcbc:1,
257 				flag_da_valid:1,
258 				flag_sa_valid:1,
259 				flag_is_frag:1;
260 			union {
261 				uint8_t packet_state;
262 				uint8_t dp_trace:1,
263 					packet_track:4,
264 					rsrvd:3;
265 			} trace;
266 			uint8_t ftype;
267 			uint8_t is_raw_frame:1,
268 				fcs_err:1,
269 				tid_val:4,
270 				reserved:2;
271 		} rx;
272 
273 		/* Note: MAX: 40 bytes */
274 		struct {
275 			union {
276 				struct {
277 					void *ext_cb_ptr;
278 					void *fctx;
279 				} priv_cb_w;
280 				struct {
281 					/* ipa_owned bit is common between rx
282 					 * control block and tx control block.
283 					 * Do not change location of this bit.
284 					 */
285 					struct {
286 						uint32_t owned:1,
287 							priv:31;
288 					} ipa;
289 					uint32_t data_attr;
290 					uint16_t desc_id;
291 					uint16_t mgmt_desc_id;
292 					struct {
293 						uint8_t bi_map:1,
294 							reserved:7;
295 					} dma_option;
296 					uint8_t reserved[3];
297 				} priv_cb_m;
298 			} dev;
299 			uint8_t ftype;
300 			uint8_t vdev_id;
301 			uint16_t len;
302 			union {
303 				struct {
304 					uint8_t flag_efrag:1,
305 						flag_nbuf:1,
306 						num:1,
307 						flag_chfrag_start:1,
308 						flag_chfrag_cont:1,
309 						flag_chfrag_end:1,
310 						flag_ext_header:1,
311 						flag_notify_comp:1;
312 				} bits;
313 				uint8_t u8;
314 			} flags;
315 			struct {
316 				uint8_t packet_state:7,
317 					is_packet_priv:1;
318 				uint8_t packet_track:4,
319 					proto_type:4;
320 				uint8_t dp_trace:1,
321 					is_bcast:1,
322 					is_mcast:1,
323 					packet_type:3,
324 					/* used only for hl*/
325 					htt2_frm:1,
326 					print:1;
327 			} trace;
328 			unsigned char *vaddr;
329 			qdf_paddr_t paddr;
330 		} tx;
331 	} u;
332 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
333 
334 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
335 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
336 
337 /**
338  *  access macros to qdf_nbuf_cb
339  *  Note: These macros can be used as L-values as well as R-values.
340  *        When used as R-values, they effectively function as "get" macros
341  *        When used as L_values, they effectively function as "set" macros
342  */
343 
344 #define QDF_NBUF_CB_PADDR(skb) \
345 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
346 
347 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
348 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
349 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
351 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
353 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
354 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
355 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
356 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
357 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
358 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
359 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
360 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
361 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
362 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
363 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \
364 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flush_ind)
365 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
366 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
367 
368 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
369 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
370 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
371 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
372 
373 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
374 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
375 
376 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
377 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
378 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
379 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
380 
381 #define QDF_NBUF_CB_RX_FTYPE(skb) \
382 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
383 
384 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
385 	(((struct qdf_nbuf_cb *) \
386 	((skb)->cb))->u.rx.flag_chfrag_start)
387 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
388 	(((struct qdf_nbuf_cb *) \
389 	((skb)->cb))->u.rx.flag_chfrag_cont)
390 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
391 		(((struct qdf_nbuf_cb *) \
392 		((skb)->cb))->u.rx.flag_chfrag_end)
393 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
394 		(((struct qdf_nbuf_cb *) \
395 		((skb)->cb))->u.rx.packet_buff_pool)
396 
397 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
398 	(((struct qdf_nbuf_cb *) \
399 	((skb)->cb))->u.rx.flag_da_mcbc)
400 
401 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
402 	(((struct qdf_nbuf_cb *) \
403 	((skb)->cb))->u.rx.flag_da_valid)
404 
405 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
406 	(((struct qdf_nbuf_cb *) \
407 	((skb)->cb))->u.rx.flag_sa_valid)
408 
409 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
410 	(((struct qdf_nbuf_cb *) \
411 	((skb)->cb))->u.rx.is_raw_frame)
412 
413 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
414 	(((struct qdf_nbuf_cb *) \
415 	((skb)->cb))->u.rx.tid_val)
416 
417 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
418 	(((struct qdf_nbuf_cb *) \
419 	((skb)->cb))->u.rx.flag_is_frag)
420 
421 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
422 	(((struct qdf_nbuf_cb *) \
423 	((skb)->cb))->u.rx.fcs_err)
424 
425 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
426 	qdf_nbuf_set_state(skb, PACKET_STATE)
427 
428 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
429 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
430 
431 #define QDF_NBUF_CB_TX_FTYPE(skb) \
432 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
433 
434 
435 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
436 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
437 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
438 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
439 
440 /* Tx Flags Accessor Macros*/
441 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
442 	(((struct qdf_nbuf_cb *) \
443 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
444 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
445 	(((struct qdf_nbuf_cb *) \
446 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
447 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
448 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
449 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
450 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
451 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
452 	(((struct qdf_nbuf_cb *) \
453 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
454 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
455 	(((struct qdf_nbuf_cb *) \
456 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
457 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
458 		(((struct qdf_nbuf_cb *) \
459 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
460 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
461 		(((struct qdf_nbuf_cb *) \
462 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
463 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
464 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
465 /* End of Tx Flags Accessor Macros */
466 
467 /* Tx trace accessor macros */
468 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
469 	(((struct qdf_nbuf_cb *) \
470 		((skb)->cb))->u.tx.trace.packet_state)
471 
472 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
473 	(((struct qdf_nbuf_cb *) \
474 		((skb)->cb))->u.tx.trace.is_packet_priv)
475 
476 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
477 	(((struct qdf_nbuf_cb *) \
478 		((skb)->cb))->u.tx.trace.packet_track)
479 
480 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
481 		(((struct qdf_nbuf_cb *) \
482 			((skb)->cb))->u.rx.trace.packet_track)
483 
484 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
485 	(((struct qdf_nbuf_cb *) \
486 		((skb)->cb))->u.tx.trace.proto_type)
487 
488 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
489 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
490 
491 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
492 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
493 
494 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
495 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
496 
497 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
498 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
499 
500 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
501 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
502 
503 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
504 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
505 
506 #define QDF_NBUF_CB_SET_BCAST(skb) \
507 	(((struct qdf_nbuf_cb *) \
508 		((skb)->cb))->u.tx.trace.is_bcast = true)
509 
510 #define QDF_NBUF_CB_SET_MCAST(skb) \
511 	(((struct qdf_nbuf_cb *) \
512 		((skb)->cb))->u.tx.trace.is_mcast = true)
513 /* End of Tx trace accessor macros */
514 
515 
516 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
517 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
518 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
519 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
520 
521 /* assume the OS provides a single fragment */
522 #define __qdf_nbuf_get_num_frags(skb)		   \
523 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
524 
525 #define __qdf_nbuf_reset_num_frags(skb) \
526 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
527 
528 /**
529  *   end of nbuf->cb access macros
530  */
531 
532 typedef void (*qdf_nbuf_trace_update_t)(char *);
533 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
534 
535 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
536 
537 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
538 	(QDF_NBUF_CB_PADDR(skb) = paddr)
539 
540 #define __qdf_nbuf_frag_push_head(					\
541 	skb, frag_len, frag_vaddr, frag_paddr)				\
542 	do {					\
543 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
544 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
545 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
546 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
547 	} while (0)
548 
549 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
550 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
551 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
552 
553 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
554 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
555 
556 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
557 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
558 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
559 	 /* assume that the OS only provides a single fragment */	\
560 	 QDF_NBUF_CB_PADDR(skb))
561 
562 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
563 
564 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
565 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
566 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
567 
568 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
569 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
570 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
571 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
572 
573 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
574 	do {								\
575 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
576 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
577 		if (frag_num)						\
578 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
579 							      is_wstrm; \
580 		else					\
581 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
582 							      is_wstrm; \
583 	} while (0)
584 
585 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
586 	do { \
587 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
588 	} while (0)
589 
590 #define __qdf_nbuf_get_vdev_ctx(skb) \
591 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
592 
593 #define __qdf_nbuf_set_tx_ftype(skb, type) \
594 	do { \
595 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
596 	} while (0)
597 
598 #define __qdf_nbuf_get_tx_ftype(skb) \
599 		 QDF_NBUF_CB_TX_FTYPE((skb))
600 
601 
602 #define __qdf_nbuf_set_rx_ftype(skb, type) \
603 	do { \
604 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
605 	} while (0)
606 
607 #define __qdf_nbuf_get_rx_ftype(skb) \
608 		 QDF_NBUF_CB_RX_FTYPE((skb))
609 
610 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
611 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
612 
613 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
614 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
615 
616 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
617 	do { \
618 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
619 	} while (0)
620 
621 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
622 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
623 
624 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
625 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
626 
627 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
628 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
629 
630 #define __qdf_nbuf_set_da_mcbc(skb, val) \
631 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
632 
633 #define __qdf_nbuf_is_da_mcbc(skb) \
634 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
635 
636 #define __qdf_nbuf_set_da_valid(skb, val) \
637 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
638 
639 #define __qdf_nbuf_is_da_valid(skb) \
640 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
641 
642 #define __qdf_nbuf_set_sa_valid(skb, val) \
643 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
644 
645 #define __qdf_nbuf_is_sa_valid(skb) \
646 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
647 
648 #define __qdf_nbuf_set_raw_frame(skb, val) \
649 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
650 
651 #define __qdf_nbuf_is_raw_frame(skb) \
652 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
653 
654 #define __qdf_nbuf_get_tid_val(skb) \
655 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
656 
657 #define __qdf_nbuf_set_tid_val(skb, val) \
658 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
659 
660 #define __qdf_nbuf_set_is_frag(skb, val) \
661 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
662 
663 #define __qdf_nbuf_is_frag(skb) \
664 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
665 
666 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
667 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
668 
669 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
670 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
671 
672 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
673 	do { \
674 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
675 	} while (0)
676 
677 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
678 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
679 
680 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
681 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
682 
683 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
684 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
685 
686 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
687 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
688 
689 #define __qdf_nbuf_trace_get_proto_type(skb) \
690 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
691 
692 #define __qdf_nbuf_data_attr_get(skb)		\
693 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
694 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
695 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
696 
697 /**
698  * __qdf_nbuf_num_frags_init() - init extra frags
699  * @skb: sk buffer
700  *
701  * Return: none
702  */
703 static inline
704 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
705 {
706 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
707 }
708 
709 /*
710  * prototypes. Implemented in qdf_nbuf.c
711  */
712 
713 /**
714  * __qdf_nbuf_alloc() - Allocate nbuf
715  * @osdev: Device handle
716  * @size: Netbuf requested size
717  * @reserve: headroom to start with
718  * @align: Align
719  * @prio: Priority
720  * @func: Function name of the call site
721  * @line: line number of the call site
722  *
723  * This allocates an nbuf aligns if needed and reserves some space in the front,
724  * since the reserve is done after alignment the reserve value if being
725  * unaligned will result in an unaligned address.
726  *
727  * Return: nbuf or %NULL if no memory
728  */
729 __qdf_nbuf_t
730 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
731 		 int prio, const char *func, uint32_t line);
732 
733 void __qdf_nbuf_free(struct sk_buff *skb);
734 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
735 			struct sk_buff *skb, qdf_dma_dir_t dir);
736 void __qdf_nbuf_unmap(__qdf_device_t osdev,
737 			struct sk_buff *skb, qdf_dma_dir_t dir);
738 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
739 				 struct sk_buff *skb, qdf_dma_dir_t dir);
740 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
741 			struct sk_buff *skb, qdf_dma_dir_t dir);
742 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
743 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
744 
745 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
746 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
747 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
748 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
749 	qdf_dma_dir_t dir, int nbytes);
750 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
751 	qdf_dma_dir_t dir, int nbytes);
752 
753 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
754 	qdf_dma_dir_t dir);
755 
756 QDF_STATUS __qdf_nbuf_map_nbytes_single(
757 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
758 void __qdf_nbuf_unmap_nbytes_single(
759 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
760 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
761 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
762 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
763 QDF_STATUS __qdf_nbuf_frag_map(
764 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
765 	int offset, qdf_dma_dir_t dir, int cur_frag);
766 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
767 
768 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
769 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
770 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
771 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
772 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
773 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
774 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
775 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
776 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
777 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
778 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
779 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
780 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
781 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
782 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
783 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
784 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
785 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
786 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
787 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
788 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
789 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
790 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
791 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
792 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
793 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
794 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
795 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
796 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
797 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
798 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
799 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
800 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
801 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
802 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
803 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
804 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
805 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
806 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
807 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
808 
809 #ifdef QDF_NBUF_GLOBAL_COUNT
810 int __qdf_nbuf_count_get(void);
811 void __qdf_nbuf_count_inc(struct sk_buff *skb);
812 void __qdf_nbuf_count_dec(struct sk_buff *skb);
813 void __qdf_nbuf_mod_init(void);
814 void __qdf_nbuf_mod_exit(void);
815 
816 #else
817 
818 static inline int __qdf_nbuf_count_get(void)
819 {
820 	return 0;
821 }
822 
823 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
824 {
825 	return;
826 }
827 
828 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
829 {
830 	return;
831 }
832 
833 static inline void __qdf_nbuf_mod_init(void)
834 {
835 	return;
836 }
837 
838 static inline void __qdf_nbuf_mod_exit(void)
839 {
840 	return;
841 }
842 #endif
843 
844 /**
845  * __qdf_to_status() - OS to QDF status conversion
846  * @error : OS error
847  *
848  * Return: QDF status
849  */
850 static inline QDF_STATUS __qdf_to_status(signed int error)
851 {
852 	switch (error) {
853 	case 0:
854 		return QDF_STATUS_SUCCESS;
855 	case ENOMEM:
856 	case -ENOMEM:
857 		return QDF_STATUS_E_NOMEM;
858 	default:
859 		return QDF_STATUS_E_NOSUPPORT;
860 	}
861 }
862 
863 /**
864  * __qdf_nbuf_len() - return the amount of valid data in the skb
865  * @skb: Pointer to network buffer
866  *
867  * This API returns the amount of valid data in the skb, If there are frags
868  * then it returns total length.
869  *
870  * Return: network buffer length
871  */
872 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
873 {
874 	int i, extra_frag_len = 0;
875 
876 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
877 	if (i > 0)
878 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
879 
880 	return extra_frag_len + skb->len;
881 }
882 
883 /**
884  * __qdf_nbuf_cat() - link two nbufs
885  * @dst: Buffer to piggyback into
886  * @src: Buffer to put
887  *
888  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
889  * It is callers responsibility to free the src skb.
890  *
891  * Return: QDF_STATUS (status of the call) if failed the src skb
892  *         is released
893  */
894 static inline QDF_STATUS
895 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
896 {
897 	QDF_STATUS error = 0;
898 
899 	qdf_assert(dst && src);
900 
901 	/*
902 	 * Since pskb_expand_head unconditionally reallocates the skb->head
903 	 * buffer, first check whether the current buffer is already large
904 	 * enough.
905 	 */
906 	if (skb_tailroom(dst) < src->len) {
907 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
908 		if (error)
909 			return __qdf_to_status(error);
910 	}
911 
912 	memcpy(skb_tail_pointer(dst), src->data, src->len);
913 	skb_put(dst, src->len);
914 	return __qdf_to_status(error);
915 }
916 
917 /*
918  * nbuf manipulation routines
919  */
920 /**
921  * __qdf_nbuf_headroom() - return the amount of tail space available
922  * @buf: Pointer to network buffer
923  *
924  * Return: amount of tail room
925  */
926 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
927 {
928 	return skb_headroom(skb);
929 }
930 
931 /**
932  * __qdf_nbuf_tailroom() - return the amount of tail space available
933  * @buf: Pointer to network buffer
934  *
935  * Return: amount of tail room
936  */
937 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
938 {
939 	return skb_tailroom(skb);
940 }
941 
942 /**
943  * __qdf_nbuf_put_tail() - Puts data in the end
944  * @skb: Pointer to network buffer
945  * @size: size to be pushed
946  *
947  * Return: data pointer of this buf where new data has to be
948  *         put, or NULL if there is not enough room in this buf.
949  */
950 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
951 {
952 	if (skb_tailroom(skb) < size) {
953 		if (unlikely(pskb_expand_head(skb, 0,
954 			size - skb_tailroom(skb), GFP_ATOMIC))) {
955 			dev_kfree_skb_any(skb);
956 			return NULL;
957 		}
958 	}
959 	return skb_put(skb, size);
960 }
961 
962 /**
963  * __qdf_nbuf_trim_tail() - trim data out from the end
964  * @skb: Pointer to network buffer
965  * @size: size to be popped
966  *
967  * Return: none
968  */
969 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
970 {
971 	return skb_trim(skb, skb->len - size);
972 }
973 
974 
975 /*
976  * prototypes. Implemented in qdf_nbuf.c
977  */
978 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
979 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
980 				qdf_nbuf_rx_cksum_t *cksum);
981 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
982 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
983 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
984 void __qdf_nbuf_ref(struct sk_buff *skb);
985 int __qdf_nbuf_shared(struct sk_buff *skb);
986 
987 /*
988  * qdf_nbuf_pool_delete() implementation - do nothing in linux
989  */
990 #define __qdf_nbuf_pool_delete(osdev)
991 
992 /**
993  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
994  * @skb: Pointer to network buffer
995  *
996  * if GFP_ATOMIC is overkill then we can check whether its
997  * called from interrupt context and then do it or else in
998  * normal case use GFP_KERNEL
999  *
1000  * example     use "in_irq() || irqs_disabled()"
1001  *
1002  * Return: cloned skb
1003  */
1004 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
1005 {
1006 	struct sk_buff *skb_new = NULL;
1007 
1008 	skb_new = skb_clone(skb, GFP_ATOMIC);
1009 	if (skb_new)
1010 		__qdf_nbuf_count_inc(skb_new);
1011 
1012 	return skb_new;
1013 }
1014 
1015 /**
1016  * __qdf_nbuf_copy() - returns a private copy of the skb
1017  * @skb: Pointer to network buffer
1018  *
1019  * This API returns a private copy of the skb, the skb returned is completely
1020  *  modifiable by callers
1021  *
1022  * Return: skb or NULL
1023  */
1024 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1025 {
1026 	struct sk_buff *skb_new = NULL;
1027 
1028 	skb_new = skb_copy(skb, GFP_ATOMIC);
1029 	if (skb_new)
1030 		__qdf_nbuf_count_inc(skb_new);
1031 
1032 	return skb_new;
1033 }
1034 
1035 #define __qdf_nbuf_reserve      skb_reserve
1036 
1037 /**
1038  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1039  * @skb: Pointer to network buffer
1040  * @data: data pointer
1041  *
1042  * Return: none
1043  */
1044 static inline void
1045 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1046 {
1047 	skb->data = data;
1048 }
1049 
1050 /**
1051  * __qdf_nbuf_set_len() - set buffer data length
1052  * @skb: Pointer to network buffer
1053  * @len: data length
1054  *
1055  * Return: none
1056  */
1057 static inline void
1058 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1059 {
1060 	skb->len = len;
1061 }
1062 
1063 /**
1064  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1065  * @skb: Pointer to network buffer
1066  * @len: skb data length
1067  *
1068  * Return: none
1069  */
1070 static inline void
1071 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1072 {
1073 	skb_set_tail_pointer(skb, len);
1074 }
1075 
1076 /**
1077  * __qdf_nbuf_reset() - reset the buffer data and pointer
1078  * @buf: Network buf instance
1079  * @reserve: reserve
1080  * @align: align
1081  *
1082  * Return: none
1083  */
1084 static inline void
1085 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1086 {
1087 	int offset;
1088 
1089 	skb_push(skb, skb_headroom(skb));
1090 	skb_put(skb, skb_tailroom(skb));
1091 	memset(skb->data, 0x0, skb->len);
1092 	skb_trim(skb, 0);
1093 	skb_reserve(skb, NET_SKB_PAD);
1094 	memset(skb->cb, 0x0, sizeof(skb->cb));
1095 
1096 	/*
1097 	 * The default is for netbuf fragments to be interpreted
1098 	 * as wordstreams rather than bytestreams.
1099 	 */
1100 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1101 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1102 
1103 	/*
1104 	 * Align & make sure that the tail & data are adjusted properly
1105 	 */
1106 
1107 	if (align) {
1108 		offset = ((unsigned long)skb->data) % align;
1109 		if (offset)
1110 			skb_reserve(skb, align - offset);
1111 	}
1112 
1113 	skb_reserve(skb, reserve);
1114 }
1115 
1116 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1117 /**
1118  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1119  *                                       in kernel
1120  *
1121  * Return: true if dev_scratch is supported
1122  *         false if dev_scratch is not supported
1123  */
1124 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1125 {
1126 	return true;
1127 }
1128 
1129 /**
1130  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1131  * @skb: Pointer to network buffer
1132  *
1133  * Return: dev_scratch if dev_scratch supported
1134  *         0 if dev_scratch not supported
1135  */
1136 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1137 {
1138 	return skb->dev_scratch;
1139 }
1140 
1141 /**
1142  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1143  * @skb: Pointer to network buffer
1144  * @value: value to be set in dev_scratch of network buffer
1145  *
1146  * Return: void
1147  */
1148 static inline void
1149 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1150 {
1151 	skb->dev_scratch = value;
1152 }
1153 #else
1154 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1155 {
1156 	return false;
1157 }
1158 
1159 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1160 {
1161 	return 0;
1162 }
1163 
1164 static inline void
1165 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1166 {
1167 }
1168 #endif /* KERNEL_VERSION(4, 14, 0) */
1169 
1170 /**
1171  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1172  * @skb: Pointer to network buffer
1173  *
1174  * Return: Pointer to head buffer
1175  */
1176 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1177 {
1178 	return skb->head;
1179 }
1180 
1181 /**
1182  * __qdf_nbuf_data() - return the pointer to data header in the skb
1183  * @skb: Pointer to network buffer
1184  *
1185  * Return: Pointer to skb data
1186  */
1187 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1188 {
1189 	return skb->data;
1190 }
1191 
1192 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1193 {
1194 	return (uint8_t *)&skb->data;
1195 }
1196 
1197 /**
1198  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1199  * @skb: Pointer to network buffer
1200  *
1201  * Return: skb protocol
1202  */
1203 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1204 {
1205 	return skb->protocol;
1206 }
1207 
1208 /**
1209  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1210  * @skb: Pointer to network buffer
1211  *
1212  * Return: skb ip_summed
1213  */
1214 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1215 {
1216 	return skb->ip_summed;
1217 }
1218 
1219 /**
1220  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1221  * @skb: Pointer to network buffer
1222  * @ip_summed: ip checksum
1223  *
1224  * Return: none
1225  */
1226 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1227 		 uint8_t ip_summed)
1228 {
1229 	skb->ip_summed = ip_summed;
1230 }
1231 
1232 /**
1233  * __qdf_nbuf_get_priority() - return the priority value of the skb
1234  * @skb: Pointer to network buffer
1235  *
1236  * Return: skb priority
1237  */
1238 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1239 {
1240 	return skb->priority;
1241 }
1242 
1243 /**
1244  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1245  * @skb: Pointer to network buffer
1246  * @p: priority
1247  *
1248  * Return: none
1249  */
1250 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1251 {
1252 	skb->priority = p;
1253 }
1254 
1255 /**
1256  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1257  * @skb: Current skb
1258  * @next_skb: Next skb
1259  *
1260  * Return: void
1261  */
1262 static inline void
1263 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1264 {
1265 	skb->next = skb_next;
1266 }
1267 
1268 /**
1269  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1270  * @skb: Current skb
1271  *
1272  * Return: the next skb pointed to by the current skb
1273  */
1274 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1275 {
1276 	return skb->next;
1277 }
1278 
1279 /**
1280  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1281  * @skb: Current skb
1282  * @next_skb: Next skb
1283  *
1284  * This fn is used to link up extensions to the head skb. Does not handle
1285  * linking to the head
1286  *
1287  * Return: none
1288  */
1289 static inline void
1290 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1291 {
1292 	skb->next = skb_next;
1293 }
1294 
1295 /**
1296  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1297  * @skb: Current skb
1298  *
1299  * Return: the next skb pointed to by the current skb
1300  */
1301 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1302 {
1303 	return skb->next;
1304 }
1305 
1306 /**
1307  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1308  * @skb_head: head_buf nbuf holding head segment (single)
1309  * @ext_list: nbuf list holding linked extensions to the head
1310  * @ext_len: Total length of all buffers in the extension list
1311  *
1312  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1313  * to the nbuf holding the head segment (seg0)
1314  *
1315  * Return: none
1316  */
1317 static inline void
1318 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1319 			struct sk_buff *ext_list, size_t ext_len)
1320 {
1321 	skb_shinfo(skb_head)->frag_list = ext_list;
1322 	skb_head->data_len = ext_len;
1323 	skb_head->len += skb_head->data_len;
1324 }
1325 
1326 /**
1327  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1328  * @head_buf: Network buf holding head segment (single)
1329  *
1330  * This ext_list is populated when we have Jumbo packet, for example in case of
1331  * monitor mode amsdu packet reception, and are stiched using frags_list.
1332  *
1333  * Return: Network buf list holding linked extensions from head buf.
1334  */
1335 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1336 {
1337 	return (skb_shinfo(head_buf)->frag_list);
1338 }
1339 
1340 /**
1341  * __qdf_nbuf_get_age() - return the checksum value of the skb
1342  * @skb: Pointer to network buffer
1343  *
1344  * Return: checksum value
1345  */
1346 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1347 {
1348 	return skb->csum;
1349 }
1350 
1351 /**
1352  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1353  * @skb: Pointer to network buffer
1354  * @v: Value
1355  *
1356  * Return: none
1357  */
1358 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1359 {
1360 	skb->csum = v;
1361 }
1362 
1363 /**
1364  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1365  * @skb: Pointer to network buffer
1366  * @adj: Adjustment value
1367  *
1368  * Return: none
1369  */
1370 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1371 {
1372 	skb->csum -= adj;
1373 }
1374 
1375 /**
1376  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1377  * @skb: Pointer to network buffer
1378  * @offset: Offset value
1379  * @len: Length
1380  * @to: Destination pointer
1381  *
1382  * Return: length of the copy bits for skb
1383  */
1384 static inline int32_t
1385 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1386 {
1387 	return skb_copy_bits(skb, offset, to, len);
1388 }
1389 
1390 /**
1391  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1392  * @skb: Pointer to network buffer
1393  * @len:  Packet length
1394  *
1395  * Return: none
1396  */
1397 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1398 {
1399 	if (skb->len > len) {
1400 		skb_trim(skb, len);
1401 	} else {
1402 		if (skb_tailroom(skb) < len - skb->len) {
1403 			if (unlikely(pskb_expand_head(skb, 0,
1404 				len - skb->len - skb_tailroom(skb),
1405 				GFP_ATOMIC))) {
1406 				dev_kfree_skb_any(skb);
1407 				qdf_assert(0);
1408 			}
1409 		}
1410 		skb_put(skb, (len - skb->len));
1411 	}
1412 }
1413 
1414 /**
1415  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1416  * @skb: Pointer to network buffer
1417  * @protocol: Protocol type
1418  *
1419  * Return: none
1420  */
1421 static inline void
1422 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1423 {
1424 	skb->protocol = protocol;
1425 }
1426 
1427 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1428 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1429 
1430 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1431 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1432 
1433 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1434 				      uint32_t *lo, uint32_t *hi);
1435 
1436 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1437 	struct qdf_tso_info_t *tso_info);
1438 
1439 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1440 			  struct qdf_tso_seg_elem_t *tso_seg,
1441 			  bool is_last_seg);
1442 
1443 #ifdef FEATURE_TSO
1444 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1445 
1446 #else
1447 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1448 {
1449 	return 0;
1450 }
1451 
1452 #endif /* FEATURE_TSO */
1453 
1454 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1455 {
1456 	if (skb_is_gso(skb) &&
1457 		(skb_is_gso_v6(skb) ||
1458 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1459 		return true;
1460 	else
1461 		return false;
1462 }
1463 
1464 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1465 
1466 int __qdf_nbuf_get_users(struct sk_buff *skb);
1467 
1468 /**
1469  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1470  *			      and get hw_classify by peeking
1471  *			      into packet
1472  * @nbuf:		Network buffer (skb on Linux)
1473  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1474  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1475  *			needs to be set in case of CE classification support
1476  *			Is set by this macro.
1477  * @hw_classify:	This is a flag which is set to indicate
1478  *			CE classification is enabled.
1479  *			Do not set this bit for VLAN packets
1480  *			OR for mcast / bcast frames.
1481  *
1482  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1483  * whether to enable tx_classify bit in CE.
1484  *
1485  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1486  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1487  * it is the length and a 802.3 frame else it is Ethernet Type II
1488  * (RFC 894).
1489  * Bit 4 in pkt_subtype is the tx_classify bit
1490  *
1491  * Return:	void
1492  */
1493 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1494 				pkt_subtype, hw_classify)	\
1495 do {								\
1496 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1497 	uint16_t ether_type = ntohs(eh->h_proto);		\
1498 	bool is_mc_bc;						\
1499 								\
1500 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1501 		   is_multicast_ether_addr((uint8_t *)eh);	\
1502 								\
1503 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1504 		hw_classify = 1;				\
1505 		pkt_subtype = 0x01 <<				\
1506 			HTT_TX_CLASSIFY_BIT_S;			\
1507 	}							\
1508 								\
1509 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1510 		pkt_type = htt_pkt_type_ethernet;		\
1511 								\
1512 } while (0)
1513 
1514 /**
1515  * nbuf private buffer routines
1516  */
1517 
1518 /**
1519  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1520  * @skb: Pointer to network buffer
1521  * @addr: Pointer to store header's addr
1522  * @m_len: network buffer length
1523  *
1524  * Return: none
1525  */
1526 static inline void
1527 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1528 {
1529 	*addr = skb->data;
1530 	*len = skb->len;
1531 }
1532 
1533 /**
1534  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1535  * @head: Head pointer
1536  * @tail: Tail pointer
1537  * @qlen: Queue length
1538  */
1539 typedef struct __qdf_nbuf_qhead {
1540 	struct sk_buff *head;
1541 	struct sk_buff *tail;
1542 	unsigned int qlen;
1543 } __qdf_nbuf_queue_t;
1544 
1545 /******************Functions *************/
1546 
1547 /**
1548  * __qdf_nbuf_queue_init() - initiallize the queue head
1549  * @qhead: Queue head
1550  *
1551  * Return: QDF status
1552  */
1553 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1554 {
1555 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1556 	return QDF_STATUS_SUCCESS;
1557 }
1558 
1559 /**
1560  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1561  * @qhead: Queue head
1562  * @skb: Pointer to network buffer
1563  *
1564  * This is a lockless version, driver must acquire locks if it
1565  * needs to synchronize
1566  *
1567  * Return: none
1568  */
1569 static inline void
1570 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1571 {
1572 	skb->next = NULL;       /*Nullify the next ptr */
1573 
1574 	if (!qhead->head)
1575 		qhead->head = skb;
1576 	else
1577 		qhead->tail->next = skb;
1578 
1579 	qhead->tail = skb;
1580 	qhead->qlen++;
1581 }
1582 
1583 /**
1584  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1585  * @dest: target netbuf queue
1586  * @src:  source netbuf queue
1587  *
1588  * Return: target netbuf queue
1589  */
1590 static inline __qdf_nbuf_queue_t *
1591 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1592 {
1593 	if (!dest)
1594 		return NULL;
1595 	else if (!src || !(src->head))
1596 		return dest;
1597 
1598 	if (!(dest->head))
1599 		dest->head = src->head;
1600 	else
1601 		dest->tail->next = src->head;
1602 
1603 	dest->tail = src->tail;
1604 	dest->qlen += src->qlen;
1605 	return dest;
1606 }
1607 
1608 /**
1609  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1610  * @qhead: Queue head
1611  * @skb: Pointer to network buffer
1612  *
1613  * This is a lockless version, driver must acquire locks if it needs to
1614  * synchronize
1615  *
1616  * Return: none
1617  */
1618 static inline void
1619 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1620 {
1621 	if (!qhead->head) {
1622 		/*Empty queue Tail pointer Must be updated */
1623 		qhead->tail = skb;
1624 	}
1625 	skb->next = qhead->head;
1626 	qhead->head = skb;
1627 	qhead->qlen++;
1628 }
1629 
1630 /**
1631  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1632  * @qhead: Queue head
1633  *
1634  * This is a lockless version. Driver should take care of the locks
1635  *
1636  * Return: skb or NULL
1637  */
1638 static inline
1639 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1640 {
1641 	__qdf_nbuf_t tmp = NULL;
1642 
1643 	if (qhead->head) {
1644 		qhead->qlen--;
1645 		tmp = qhead->head;
1646 		if (qhead->head == qhead->tail) {
1647 			qhead->head = NULL;
1648 			qhead->tail = NULL;
1649 		} else {
1650 			qhead->head = tmp->next;
1651 		}
1652 		tmp->next = NULL;
1653 	}
1654 	return tmp;
1655 }
1656 
1657 /**
1658  * __qdf_nbuf_queue_free() - free a queue
1659  * @qhead: head of queue
1660  *
1661  * Return: QDF status
1662  */
1663 static inline QDF_STATUS
1664 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1665 {
1666 	__qdf_nbuf_t  buf = NULL;
1667 
1668 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1669 		__qdf_nbuf_free(buf);
1670 	return QDF_STATUS_SUCCESS;
1671 }
1672 
1673 
1674 /**
1675  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1676  * @qhead: head of queue
1677  *
1678  * Return: NULL if the queue is empty
1679  */
1680 static inline struct sk_buff *
1681 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1682 {
1683 	return qhead->head;
1684 }
1685 
1686 /**
1687  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1688  * @qhead: head of queue
1689  *
1690  * Return: NULL if the queue is empty
1691  */
1692 static inline struct sk_buff *
1693 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1694 {
1695 	return qhead->tail;
1696 }
1697 
1698 /**
1699  * __qdf_nbuf_queue_len() - return the queue length
1700  * @qhead: Queue head
1701  *
1702  * Return: Queue length
1703  */
1704 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1705 {
1706 	return qhead->qlen;
1707 }
1708 
1709 /**
1710  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1711  * @skb: Pointer to network buffer
1712  *
1713  * This API returns the next skb from packet chain, remember the skb is
1714  * still in the queue
1715  *
1716  * Return: NULL if no packets are there
1717  */
1718 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1719 {
1720 	return skb->next;
1721 }
1722 
1723 /**
1724  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1725  * @qhead: Queue head
1726  *
1727  * Return: true if length is 0 else false
1728  */
1729 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1730 {
1731 	return qhead->qlen == 0;
1732 }
1733 
1734 /*
1735  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1736  * Because the queue head will most likely put in some structure,
1737  * we don't use pointer type as the definition.
1738  */
1739 
1740 /*
1741  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1742  * Because the queue head will most likely put in some structure,
1743  * we don't use pointer type as the definition.
1744  */
1745 
1746 static inline void
1747 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1748 {
1749 }
1750 
1751 /**
1752  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1753  *        expands the headroom
1754  *        in the data region. In case of failure the skb is released.
1755  * @skb: sk buff
1756  * @headroom: size of headroom
1757  *
1758  * Return: skb or NULL
1759  */
1760 static inline struct sk_buff *
1761 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1762 {
1763 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1764 		dev_kfree_skb_any(skb);
1765 		skb = NULL;
1766 	}
1767 	return skb;
1768 }
1769 
1770 /**
1771  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1772  *        exapnds the tailroom
1773  *        in data region. In case of failure it releases the skb.
1774  * @skb: sk buff
1775  * @tailroom: size of tailroom
1776  *
1777  * Return: skb or NULL
1778  */
1779 static inline struct sk_buff *
1780 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1781 {
1782 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1783 		return skb;
1784 	/**
1785 	 * unlikely path
1786 	 */
1787 	dev_kfree_skb_any(skb);
1788 	return NULL;
1789 }
1790 
1791 /**
1792  * __qdf_nbuf_linearize() - skb linearize
1793  * @skb: sk buff
1794  *
1795  * create a version of the specified nbuf whose contents
1796  * can be safely modified without affecting other
1797  * users.If the nbuf is non-linear then this function
1798  * linearize. if unable to linearize returns -ENOMEM on
1799  * success 0 is returned
1800  *
1801  * Return: 0 on Success, -ENOMEM on failure is returned.
1802  */
1803 static inline int
1804 __qdf_nbuf_linearize(struct sk_buff *skb)
1805 {
1806 	return skb_linearize(skb);
1807 }
1808 
1809 /**
1810  * __qdf_nbuf_unshare() - skb unshare
1811  * @skb: sk buff
1812  *
1813  * create a version of the specified nbuf whose contents
1814  * can be safely modified without affecting other
1815  * users.If the nbuf is a clone then this function
1816  * creates a new copy of the data. If the buffer is not
1817  * a clone the original buffer is returned.
1818  *
1819  * Return: skb or NULL
1820  */
1821 static inline struct sk_buff *
1822 __qdf_nbuf_unshare(struct sk_buff *skb)
1823 {
1824 	return skb_unshare(skb, GFP_ATOMIC);
1825 }
1826 
1827 /**
1828  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1829  *@buf: sk buff
1830  *
1831  * Return: true/false
1832  */
1833 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1834 {
1835 	return skb_cloned(skb);
1836 }
1837 
1838 /**
1839  * __qdf_nbuf_pool_init() - init pool
1840  * @net: net handle
1841  *
1842  * Return: QDF status
1843  */
1844 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1845 {
1846 	return QDF_STATUS_SUCCESS;
1847 }
1848 
1849 /*
1850  * adf_nbuf_pool_delete() implementation - do nothing in linux
1851  */
1852 #define __qdf_nbuf_pool_delete(osdev)
1853 
1854 /**
1855  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1856  *        release the skb.
1857  * @skb: sk buff
1858  * @headroom: size of headroom
1859  * @tailroom: size of tailroom
1860  *
1861  * Return: skb or NULL
1862  */
1863 static inline struct sk_buff *
1864 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1865 {
1866 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1867 		return skb;
1868 
1869 	dev_kfree_skb_any(skb);
1870 	return NULL;
1871 }
1872 
1873 /**
1874  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1875  *
1876  * Return: true/false
1877  */
1878 static inline bool
1879 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1880 			 uint8_t **where)
1881 {
1882 	qdf_assert(0);
1883 	return false;
1884 }
1885 
1886 /**
1887  * __qdf_nbuf_reset_ctxt() - mem zero control block
1888  * @nbuf: buffer
1889  *
1890  * Return: none
1891  */
1892 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1893 {
1894 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1895 }
1896 
1897 /**
1898  * __qdf_nbuf_network_header() - get network header
1899  * @buf: buffer
1900  *
1901  * Return: network header pointer
1902  */
1903 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1904 {
1905 	return skb_network_header(buf);
1906 }
1907 
1908 /**
1909  * __qdf_nbuf_transport_header() - get transport header
1910  * @buf: buffer
1911  *
1912  * Return: transport header pointer
1913  */
1914 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1915 {
1916 	return skb_transport_header(buf);
1917 }
1918 
1919 /**
1920  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1921  *  passed as part of network buffer by network stack
1922  * @skb: sk buff
1923  *
1924  * Return: TCP MSS size
1925  *
1926  */
1927 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1928 {
1929 	return skb_shinfo(skb)->gso_size;
1930 }
1931 
1932 /**
1933  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1934  * @nbuf: sk buff
1935  *
1936  * Return: none
1937  */
1938 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1939 
1940 /*
1941  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1942  * @nbuf: sk buff
1943  *
1944  * Return: void ptr
1945  */
1946 static inline void *
1947 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1948 {
1949 	return (void *)nbuf->cb;
1950 }
1951 
1952 /**
1953  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1954  * @skb: sk buff
1955  *
1956  * Return: head size
1957  */
1958 static inline size_t
1959 __qdf_nbuf_headlen(struct sk_buff *skb)
1960 {
1961 	return skb_headlen(skb);
1962 }
1963 
1964 /**
1965  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1966  * @skb: sk buff
1967  *
1968  * Return: number of fragments
1969  */
1970 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1971 {
1972 	return skb_shinfo(skb)->nr_frags;
1973 }
1974 
1975 /**
1976  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1977  * @buf: sk buff
1978  *
1979  * Return: true/false
1980  */
1981 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1982 {
1983 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1984 }
1985 
1986 /**
1987  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1988  * @buf: sk buff
1989  *
1990  * Return: true/false
1991  */
1992 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1993 {
1994 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1995 }
1996 
1997 /**
1998  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1999  * @skb: sk buff
2000  *
2001  * Return: size of l2+l3+l4 header length
2002  */
2003 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2004 {
2005 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2006 }
2007 
2008 /**
2009  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2010  * @buf: sk buff
2011  *
2012  * Return:  true/false
2013  */
2014 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2015 {
2016 	if (skb_is_nonlinear(skb))
2017 		return true;
2018 	else
2019 		return false;
2020 }
2021 
2022 /**
2023  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2024  * @buf: sk buff
2025  *
2026  * Return: TCP sequence number
2027  */
2028 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2029 {
2030 	return ntohl(tcp_hdr(skb)->seq);
2031 }
2032 
2033 /**
2034  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2035  *@buf: sk buff
2036  *
2037  * Return: data pointer to typecast into your priv structure
2038  */
2039 static inline uint8_t *
2040 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2041 {
2042 	return &skb->cb[8];
2043 }
2044 
2045 /**
2046  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2047  * @buf: Pointer to nbuf
2048  *
2049  * Return: None
2050  */
2051 static inline void
2052 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2053 {
2054 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2055 }
2056 
2057 /**
2058  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2059  *
2060  * @buf: sk buff
2061  * @queue_id: Queue id
2062  *
2063  * Return: void
2064  */
2065 static inline void
2066 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2067 {
2068 	skb_record_rx_queue(skb, queue_id);
2069 }
2070 
2071 /**
2072  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2073  *
2074  * @buf: sk buff
2075  *
2076  * Return: Queue mapping
2077  */
2078 static inline uint16_t
2079 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2080 {
2081 	return skb->queue_mapping;
2082 }
2083 
2084 /**
2085  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2086  *
2087  * @buf: sk buff
2088  *
2089  * Return: void
2090  */
2091 static inline void
2092 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2093 {
2094 	__net_timestamp(skb);
2095 }
2096 
2097 /**
2098  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2099  *
2100  * @buf: sk buff
2101  *
2102  * Return: timestamp stored in skb in ms
2103  */
2104 static inline uint64_t
2105 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2106 {
2107 	return ktime_to_ms(skb_get_ktime(skb));
2108 }
2109 
2110 /**
2111  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2112  *
2113  * @buf: sk buff
2114  *
2115  * Return: time difference in ms
2116  */
2117 static inline uint64_t
2118 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2119 {
2120 	return ktime_to_ms(net_timedelta(skb->tstamp));
2121 }
2122 
2123 /**
2124  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2125  *
2126  * @buf: sk buff
2127  *
2128  * Return: time difference in micro seconds
2129  */
2130 static inline uint64_t
2131 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2132 {
2133 	return ktime_to_us(net_timedelta(skb->tstamp));
2134 }
2135 
2136 /**
2137  * __qdf_nbuf_orphan() - orphan a nbuf
2138  * @skb: sk buff
2139  *
2140  * If a buffer currently has an owner then we call the
2141  * owner's destructor function
2142  *
2143  * Return: void
2144  */
2145 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2146 {
2147 	return skb_orphan(skb);
2148 }
2149 
2150 static inline struct sk_buff *
2151 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2152 {
2153 	return skb_dequeue(skb_queue_head);
2154 }
2155 
2156 static inline
2157 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2158 {
2159 	return skb_queue_head->qlen;
2160 }
2161 
2162 static inline
2163 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2164 					struct sk_buff *skb)
2165 {
2166 	return skb_queue_tail(skb_queue_head, skb);
2167 }
2168 
2169 static inline
2170 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2171 {
2172 	return skb_queue_head_init(skb_queue_head);
2173 }
2174 
2175 static inline
2176 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2177 {
2178 	return skb_queue_purge(skb_queue_head);
2179 }
2180 
2181 #ifdef CONFIG_WIN
2182 #include <i_qdf_nbuf_w.h>
2183 #else
2184 #include <i_qdf_nbuf_m.h>
2185 #endif
2186 #endif /*_I_QDF_NET_BUF_H */
2187