xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision d281143698c171e8a9883bbcdf2b9849b1f64630)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *   6. "ipa.owned" bit must be first member in both TX and RX unions
95  *      for the priv_cb_m since it must be at same offset for both
96  *      TX and RX union.
97  *
98  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
99  *
100  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
101  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
102  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
103  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
104  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
105  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
106  *
107  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
108  * @rx.dev.priv_cb_m.flush_ind: flush indication
109  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
110  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
111  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
112  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
113  * @rx.dev.priv_cb_m.lro_ctx: LRO context
114  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
115  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
116  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
117  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
118  *
119  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
120  * @rx.tcp_proto: L4 protocol is TCP
121  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
122  * @rx.ipv6_proto: L3 protocol is IPV6
123  * @rx.ip_offset: offset to IP header
124  * @rx.tcp_offset: offset to TCP header
125  * @rx_ctx_id: Rx context id
126  * @num_elements_in_list: number of elements in the nbuf list
127  *
128  * @rx.tcp_udp_chksum: L4 payload checksum
129  * @rx.tcp_wim: TCP window size
130  *
131  * @rx.flow_id: 32bit flow id
132  *
133  * @rx.flag_chfrag_start: first MSDU in an AMSDU
134  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
135  * @rx.flag_chfrag_end: last MSDU in an AMSDU
136  * @rx.flag_retry: flag to indicate MSDU is retried
137  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
138  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
139  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
140  * @rx.flag_is_frag: flag to indicate skb has frag list
141  * @rx.rsrvd: reserved
142  *
143  * @rx.trace: combined structure for DP and protocol trace
144  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
145  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
146  * @rx.trace.dp_trace: flag (Datapath trace)
147  * @rx.trace.packet_track: RX_DATA packet
148  * @rx.trace.rsrvd: enable packet logging
149  *
150  * @rx.vdev_id: vdev_id for RX pkt
151  * @rx.is_raw_frame: RAW frame
152  * @rx.fcs_err: FCS error
153  * @rx.tid_val: tid value
154  * @rx.reserved: reserved
155  * @rx.ftype: mcast2ucast, TSO, SG, MESH
156  *
157  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
158  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
159  *
160  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
161  *                 + (1) CE classification enablement bit
162  *                 + (2) packet type (802.3 or Ethernet type II)
163  *                 + (3) packet offset (usually length of HTC/HTT descr)
164  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
165  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
166  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
167  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
168  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
169  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
170  * @tx.dev.priv_cb_m.reserved: reserved
171  *
172  * @tx.ftype: mcast2ucast, TSO, SG, MESH
173  * @tx.vdev_id: vdev (for protocol trace)
174  * @tx.len: length of efrag pointed by the above pointers
175  *
176  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
177  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
178  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
179  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
180  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
181  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
182  * @tx.flags.bits.flag_ext_header: extended flags
183  * @tx.flags.bits.reserved: reserved
184  * @tx.trace: combined structure for DP and protocol trace
185  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
186  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
187  * @tx.trace.is_packet_priv:
188  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
189  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
190  *                          + (MGMT_ACTION)] - 4 bits
191  * @tx.trace.dp_trace: flag (Datapath trace)
192  * @tx.trace.is_bcast: flag (Broadcast packet)
193  * @tx.trace.is_mcast: flag (Multicast packet)
194  * @tx.trace.packet_type: flag (Packet type)
195  * @tx.trace.htt2_frm: flag (high-latency path only)
196  * @tx.trace.print: enable packet logging
197  *
198  * @tx.vaddr: virtual address of ~
199  * @tx.paddr: physical/DMA address of ~
200  */
201 struct qdf_nbuf_cb {
202 	/* common */
203 	qdf_paddr_t paddr; /* of skb->data */
204 	/* valid only in one direction */
205 	union {
206 		/* Note: MAX: 40 bytes */
207 		struct {
208 			union {
209 				struct {
210 					void *ext_cb_ptr;
211 					void *fctx;
212 					uint16_t msdu_len;
213 					uint16_t peer_id;
214 					uint16_t protocol_tag;
215 					uint16_t flow_tag;
216 				} priv_cb_w;
217 				struct {
218 					/* ipa_owned bit is common between rx
219 					 * control block and tx control block.
220 					 * Do not change location of this bit.
221 					 */
222 					uint32_t ipa_owned:1,
223 						 peer_cached_buf_frm:1,
224 						 flush_ind:1,
225 						 packet_buf_pool:1,
226 						 l3_hdr_pad:3,
227 						 reserved:9,
228 						 reserved1:16;
229 					uint32_t tcp_seq_num;
230 					uint32_t tcp_ack_num;
231 					union {
232 						struct {
233 							uint16_t msdu_len;
234 							uint16_t peer_id;
235 						} wifi3;
236 						struct {
237 							uint32_t map_index;
238 						} wifi2;
239 					} dp;
240 					unsigned char *lro_ctx;
241 				} priv_cb_m;
242 			} dev;
243 			uint32_t lro_eligible:1,
244 				tcp_proto:1,
245 				tcp_pure_ack:1,
246 				ipv6_proto:1,
247 				ip_offset:7,
248 				tcp_offset:7,
249 				rx_ctx_id:4,
250 				fcs_err:1,
251 				is_raw_frame:1,
252 				num_elements_in_list:8;
253 			uint32_t tcp_udp_chksum:16,
254 				 tcp_win:16;
255 			uint32_t flow_id;
256 			uint8_t flag_chfrag_start:1,
257 				flag_chfrag_cont:1,
258 				flag_chfrag_end:1,
259 				flag_retry:1,
260 				flag_da_mcbc:1,
261 				flag_da_valid:1,
262 				flag_sa_valid:1,
263 				flag_is_frag:1;
264 			union {
265 				uint8_t packet_state;
266 				uint8_t dp_trace:1,
267 					packet_track:4,
268 					rsrvd:3;
269 			} trace;
270 			uint16_t vdev_id:8,
271 				 tid_val:4,
272 				 ftype:4;
273 		} rx;
274 
275 		/* Note: MAX: 40 bytes */
276 		struct {
277 			union {
278 				struct {
279 					void *ext_cb_ptr;
280 					void *fctx;
281 				} priv_cb_w;
282 				struct {
283 					/* ipa_owned bit is common between rx
284 					 * control block and tx control block.
285 					 * Do not change location of this bit.
286 					 */
287 					struct {
288 						uint32_t owned:1,
289 							priv:31;
290 					} ipa;
291 					uint32_t data_attr;
292 					uint16_t desc_id;
293 					uint16_t mgmt_desc_id;
294 					struct {
295 						uint8_t bi_map:1,
296 							reserved:7;
297 					} dma_option;
298 					uint8_t reserved[3];
299 				} priv_cb_m;
300 			} dev;
301 			uint8_t ftype;
302 			uint8_t vdev_id;
303 			uint16_t len;
304 			union {
305 				struct {
306 					uint8_t flag_efrag:1,
307 						flag_nbuf:1,
308 						num:1,
309 						flag_chfrag_start:1,
310 						flag_chfrag_cont:1,
311 						flag_chfrag_end:1,
312 						flag_ext_header:1,
313 						flag_notify_comp:1;
314 				} bits;
315 				uint8_t u8;
316 			} flags;
317 			struct {
318 				uint8_t packet_state:7,
319 					is_packet_priv:1;
320 				uint8_t packet_track:4,
321 					proto_type:4;
322 				uint8_t dp_trace:1,
323 					is_bcast:1,
324 					is_mcast:1,
325 					packet_type:3,
326 					/* used only for hl*/
327 					htt2_frm:1,
328 					print:1;
329 			} trace;
330 			unsigned char *vaddr;
331 			qdf_paddr_t paddr;
332 		} tx;
333 	} u;
334 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
335 
336 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
337 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
338 
339 /**
340  *  access macros to qdf_nbuf_cb
341  *  Note: These macros can be used as L-values as well as R-values.
342  *        When used as R-values, they effectively function as "get" macros
343  *        When used as L_values, they effectively function as "set" macros
344  */
345 
346 #define QDF_NBUF_CB_PADDR(skb) \
347 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
348 
349 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
351 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
353 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
354 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
355 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
356 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
357 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
358 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
359 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
360 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
361 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
362 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
363 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
364 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
365 
366 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
367 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
368 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
369 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
370 
371 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
372 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
373 
374 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
375 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
376 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
377 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
378 
379 #define QDF_NBUF_CB_RX_FTYPE(skb) \
380 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
381 
382 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
383 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
384 
385 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
386 	(((struct qdf_nbuf_cb *) \
387 	((skb)->cb))->u.rx.flag_chfrag_start)
388 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
389 	(((struct qdf_nbuf_cb *) \
390 	((skb)->cb))->u.rx.flag_chfrag_cont)
391 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
392 		(((struct qdf_nbuf_cb *) \
393 		((skb)->cb))->u.rx.flag_chfrag_end)
394 
395 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
396 	(((struct qdf_nbuf_cb *) \
397 	((skb)->cb))->u.rx.flag_da_mcbc)
398 
399 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
400 	(((struct qdf_nbuf_cb *) \
401 	((skb)->cb))->u.rx.flag_da_valid)
402 
403 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
404 	(((struct qdf_nbuf_cb *) \
405 	((skb)->cb))->u.rx.flag_sa_valid)
406 
407 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
408 	(((struct qdf_nbuf_cb *) \
409 	((skb)->cb))->u.rx.flag_retry)
410 
411 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
412 	(((struct qdf_nbuf_cb *) \
413 	((skb)->cb))->u.rx.is_raw_frame)
414 
415 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
416 	(((struct qdf_nbuf_cb *) \
417 	((skb)->cb))->u.rx.tid_val)
418 
419 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
420 	(((struct qdf_nbuf_cb *) \
421 	((skb)->cb))->u.rx.flag_is_frag)
422 
423 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
424 	(((struct qdf_nbuf_cb *) \
425 	((skb)->cb))->u.rx.fcs_err)
426 
427 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
428 	qdf_nbuf_set_state(skb, PACKET_STATE)
429 
430 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
431 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
432 
433 #define QDF_NBUF_CB_TX_FTYPE(skb) \
434 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
435 
436 
437 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
438 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
439 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
440 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
441 
442 /* Tx Flags Accessor Macros*/
443 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
444 	(((struct qdf_nbuf_cb *) \
445 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
446 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
447 	(((struct qdf_nbuf_cb *) \
448 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
449 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
450 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
451 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
452 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
453 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
454 	(((struct qdf_nbuf_cb *) \
455 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
456 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
459 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
460 		(((struct qdf_nbuf_cb *) \
461 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
462 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
463 		(((struct qdf_nbuf_cb *) \
464 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
465 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
466 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
467 /* End of Tx Flags Accessor Macros */
468 
469 /* Tx trace accessor macros */
470 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
471 	(((struct qdf_nbuf_cb *) \
472 		((skb)->cb))->u.tx.trace.packet_state)
473 
474 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
475 	(((struct qdf_nbuf_cb *) \
476 		((skb)->cb))->u.tx.trace.is_packet_priv)
477 
478 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
479 	(((struct qdf_nbuf_cb *) \
480 		((skb)->cb))->u.tx.trace.packet_track)
481 
482 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
483 		(((struct qdf_nbuf_cb *) \
484 			((skb)->cb))->u.rx.trace.packet_track)
485 
486 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
487 	(((struct qdf_nbuf_cb *) \
488 		((skb)->cb))->u.tx.trace.proto_type)
489 
490 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
491 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
492 
493 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
494 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
495 
496 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
497 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
498 
499 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
500 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
501 
502 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
503 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
504 
505 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
506 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
507 
508 #define QDF_NBUF_CB_SET_BCAST(skb) \
509 	(((struct qdf_nbuf_cb *) \
510 		((skb)->cb))->u.tx.trace.is_bcast = true)
511 
512 #define QDF_NBUF_CB_SET_MCAST(skb) \
513 	(((struct qdf_nbuf_cb *) \
514 		((skb)->cb))->u.tx.trace.is_mcast = true)
515 /* End of Tx trace accessor macros */
516 
517 
518 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
519 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
520 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
521 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
522 
523 /* assume the OS provides a single fragment */
524 #define __qdf_nbuf_get_num_frags(skb)		   \
525 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
526 
527 #define __qdf_nbuf_reset_num_frags(skb) \
528 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
529 
530 /**
531  *   end of nbuf->cb access macros
532  */
533 
534 typedef void (*qdf_nbuf_trace_update_t)(char *);
535 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
536 
537 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
538 
539 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
540 	(QDF_NBUF_CB_PADDR(skb) = paddr)
541 
542 #define __qdf_nbuf_frag_push_head(					\
543 	skb, frag_len, frag_vaddr, frag_paddr)				\
544 	do {					\
545 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
546 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
547 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
548 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
549 	} while (0)
550 
551 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
552 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
553 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
554 
555 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
556 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
557 
558 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
559 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
560 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
561 	 /* assume that the OS only provides a single fragment */	\
562 	 QDF_NBUF_CB_PADDR(skb))
563 
564 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
565 
566 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
567 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
568 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
569 
570 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
571 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
572 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
573 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
574 
575 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
576 	do {								\
577 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
578 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
579 		if (frag_num)						\
580 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
581 							      is_wstrm; \
582 		else					\
583 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
584 							      is_wstrm; \
585 	} while (0)
586 
587 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
588 	do { \
589 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
590 	} while (0)
591 
592 #define __qdf_nbuf_get_vdev_ctx(skb) \
593 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
594 
595 #define __qdf_nbuf_set_tx_ftype(skb, type) \
596 	do { \
597 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
598 	} while (0)
599 
600 #define __qdf_nbuf_get_tx_ftype(skb) \
601 		 QDF_NBUF_CB_TX_FTYPE((skb))
602 
603 
604 #define __qdf_nbuf_set_rx_ftype(skb, type) \
605 	do { \
606 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
607 	} while (0)
608 
609 #define __qdf_nbuf_get_rx_ftype(skb) \
610 		 QDF_NBUF_CB_RX_FTYPE((skb))
611 
612 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
613 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
614 
615 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
616 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
617 
618 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
619 	do { \
620 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
621 	} while (0)
622 
623 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
624 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
625 
626 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
627 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
628 
629 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
630 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
631 
632 #define __qdf_nbuf_set_da_mcbc(skb, val) \
633 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
634 
635 #define __qdf_nbuf_is_da_mcbc(skb) \
636 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
637 
638 #define __qdf_nbuf_set_da_valid(skb, val) \
639 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
640 
641 #define __qdf_nbuf_is_da_valid(skb) \
642 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
643 
644 #define __qdf_nbuf_set_sa_valid(skb, val) \
645 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
646 
647 #define __qdf_nbuf_is_sa_valid(skb) \
648 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
649 
650 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
651 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
652 
653 #define __qdf_nbuf_is_rx_retry_flag(skb) \
654 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
655 
656 #define __qdf_nbuf_set_raw_frame(skb, val) \
657 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
658 
659 #define __qdf_nbuf_is_raw_frame(skb) \
660 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
661 
662 #define __qdf_nbuf_get_tid_val(skb) \
663 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
664 
665 #define __qdf_nbuf_set_tid_val(skb, val) \
666 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
667 
668 #define __qdf_nbuf_set_is_frag(skb, val) \
669 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
670 
671 #define __qdf_nbuf_is_frag(skb) \
672 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
673 
674 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
675 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
676 
677 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
678 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
679 
680 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
681 	do { \
682 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
683 	} while (0)
684 
685 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
686 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
687 
688 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
689 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
690 
691 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
692 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
693 
694 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
695 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
696 
697 #define __qdf_nbuf_trace_get_proto_type(skb) \
698 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
699 
700 #define __qdf_nbuf_data_attr_get(skb)		\
701 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
702 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
703 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
704 
705 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
706 		skb_queue_walk_safe(queue, var, tvar)
707 
708 /**
709  * __qdf_nbuf_num_frags_init() - init extra frags
710  * @skb: sk buffer
711  *
712  * Return: none
713  */
714 static inline
715 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
716 {
717 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
718 }
719 
720 /*
721  * prototypes. Implemented in qdf_nbuf.c
722  */
723 
724 /**
725  * __qdf_nbuf_alloc() - Allocate nbuf
726  * @osdev: Device handle
727  * @size: Netbuf requested size
728  * @reserve: headroom to start with
729  * @align: Align
730  * @prio: Priority
731  * @func: Function name of the call site
732  * @line: line number of the call site
733  *
734  * This allocates an nbuf aligns if needed and reserves some space in the front,
735  * since the reserve is done after alignment the reserve value if being
736  * unaligned will result in an unaligned address.
737  *
738  * Return: nbuf or %NULL if no memory
739  */
740 __qdf_nbuf_t
741 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
742 		 int prio, const char *func, uint32_t line);
743 
744 void __qdf_nbuf_free(struct sk_buff *skb);
745 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
746 			struct sk_buff *skb, qdf_dma_dir_t dir);
747 void __qdf_nbuf_unmap(__qdf_device_t osdev,
748 			struct sk_buff *skb, qdf_dma_dir_t dir);
749 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
750 				 struct sk_buff *skb, qdf_dma_dir_t dir);
751 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
752 			struct sk_buff *skb, qdf_dma_dir_t dir);
753 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
754 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
755 
756 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
757 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
758 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
759 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
760 	qdf_dma_dir_t dir, int nbytes);
761 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
762 	qdf_dma_dir_t dir, int nbytes);
763 
764 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
765 	qdf_dma_dir_t dir);
766 
767 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
768 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
769 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
770 QDF_STATUS __qdf_nbuf_frag_map(
771 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
772 	int offset, qdf_dma_dir_t dir, int cur_frag);
773 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
774 
775 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
776 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
777 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
778 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
779 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
780 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
781 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
782 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
783 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
784 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
785 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
786 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
787 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
788 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
789 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
790 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
791 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
792 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
793 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
794 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
795 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
796 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
797 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
798 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
799 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
800 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
801 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
802 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
803 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
804 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
805 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
806 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
807 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
808 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
809 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
810 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
811 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
812 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
813 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
814 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
815 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
816 
817 #ifdef QDF_NBUF_GLOBAL_COUNT
818 int __qdf_nbuf_count_get(void);
819 void __qdf_nbuf_count_inc(struct sk_buff *skb);
820 void __qdf_nbuf_count_dec(struct sk_buff *skb);
821 void __qdf_nbuf_mod_init(void);
822 void __qdf_nbuf_mod_exit(void);
823 
824 #else
825 
826 static inline int __qdf_nbuf_count_get(void)
827 {
828 	return 0;
829 }
830 
831 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
832 {
833 	return;
834 }
835 
836 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
837 {
838 	return;
839 }
840 
841 static inline void __qdf_nbuf_mod_init(void)
842 {
843 	return;
844 }
845 
846 static inline void __qdf_nbuf_mod_exit(void)
847 {
848 	return;
849 }
850 #endif
851 
852 /**
853  * __qdf_to_status() - OS to QDF status conversion
854  * @error : OS error
855  *
856  * Return: QDF status
857  */
858 static inline QDF_STATUS __qdf_to_status(signed int error)
859 {
860 	switch (error) {
861 	case 0:
862 		return QDF_STATUS_SUCCESS;
863 	case ENOMEM:
864 	case -ENOMEM:
865 		return QDF_STATUS_E_NOMEM;
866 	default:
867 		return QDF_STATUS_E_NOSUPPORT;
868 	}
869 }
870 
871 /**
872  * __qdf_nbuf_len() - return the amount of valid data in the skb
873  * @skb: Pointer to network buffer
874  *
875  * This API returns the amount of valid data in the skb, If there are frags
876  * then it returns total length.
877  *
878  * Return: network buffer length
879  */
880 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
881 {
882 	int i, extra_frag_len = 0;
883 
884 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
885 	if (i > 0)
886 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
887 
888 	return extra_frag_len + skb->len;
889 }
890 
891 /**
892  * __qdf_nbuf_cat() - link two nbufs
893  * @dst: Buffer to piggyback into
894  * @src: Buffer to put
895  *
896  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
897  * It is callers responsibility to free the src skb.
898  *
899  * Return: QDF_STATUS (status of the call) if failed the src skb
900  *         is released
901  */
902 static inline QDF_STATUS
903 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
904 {
905 	QDF_STATUS error = 0;
906 
907 	qdf_assert(dst && src);
908 
909 	/*
910 	 * Since pskb_expand_head unconditionally reallocates the skb->head
911 	 * buffer, first check whether the current buffer is already large
912 	 * enough.
913 	 */
914 	if (skb_tailroom(dst) < src->len) {
915 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
916 		if (error)
917 			return __qdf_to_status(error);
918 	}
919 
920 	memcpy(skb_tail_pointer(dst), src->data, src->len);
921 	skb_put(dst, src->len);
922 	return __qdf_to_status(error);
923 }
924 
925 /*
926  * nbuf manipulation routines
927  */
928 /**
929  * __qdf_nbuf_headroom() - return the amount of tail space available
930  * @buf: Pointer to network buffer
931  *
932  * Return: amount of tail room
933  */
934 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
935 {
936 	return skb_headroom(skb);
937 }
938 
939 /**
940  * __qdf_nbuf_tailroom() - return the amount of tail space available
941  * @buf: Pointer to network buffer
942  *
943  * Return: amount of tail room
944  */
945 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
946 {
947 	return skb_tailroom(skb);
948 }
949 
950 /**
951  * __qdf_nbuf_put_tail() - Puts data in the end
952  * @skb: Pointer to network buffer
953  * @size: size to be pushed
954  *
955  * Return: data pointer of this buf where new data has to be
956  *         put, or NULL if there is not enough room in this buf.
957  */
958 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
959 {
960 	if (skb_tailroom(skb) < size) {
961 		if (unlikely(pskb_expand_head(skb, 0,
962 			size - skb_tailroom(skb), GFP_ATOMIC))) {
963 			dev_kfree_skb_any(skb);
964 			return NULL;
965 		}
966 	}
967 	return skb_put(skb, size);
968 }
969 
970 /**
971  * __qdf_nbuf_trim_tail() - trim data out from the end
972  * @skb: Pointer to network buffer
973  * @size: size to be popped
974  *
975  * Return: none
976  */
977 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
978 {
979 	return skb_trim(skb, skb->len - size);
980 }
981 
982 
983 /*
984  * prototypes. Implemented in qdf_nbuf.c
985  */
986 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
987 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
988 				qdf_nbuf_rx_cksum_t *cksum);
989 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
990 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
991 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
992 void __qdf_nbuf_ref(struct sk_buff *skb);
993 int __qdf_nbuf_shared(struct sk_buff *skb);
994 
995 /*
996  * qdf_nbuf_pool_delete() implementation - do nothing in linux
997  */
998 #define __qdf_nbuf_pool_delete(osdev)
999 
1000 /**
1001  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
1002  * @skb: Pointer to network buffer
1003  *
1004  * if GFP_ATOMIC is overkill then we can check whether its
1005  * called from interrupt context and then do it or else in
1006  * normal case use GFP_KERNEL
1007  *
1008  * example     use "in_irq() || irqs_disabled()"
1009  *
1010  * Return: cloned skb
1011  */
1012 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
1013 {
1014 	struct sk_buff *skb_new = NULL;
1015 
1016 	skb_new = skb_clone(skb, GFP_ATOMIC);
1017 	if (skb_new)
1018 		__qdf_nbuf_count_inc(skb_new);
1019 
1020 	return skb_new;
1021 }
1022 
1023 /**
1024  * __qdf_nbuf_copy() - returns a private copy of the skb
1025  * @skb: Pointer to network buffer
1026  *
1027  * This API returns a private copy of the skb, the skb returned is completely
1028  *  modifiable by callers
1029  *
1030  * Return: skb or NULL
1031  */
1032 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1033 {
1034 	struct sk_buff *skb_new = NULL;
1035 
1036 	skb_new = skb_copy(skb, GFP_ATOMIC);
1037 	if (skb_new)
1038 		__qdf_nbuf_count_inc(skb_new);
1039 
1040 	return skb_new;
1041 }
1042 
1043 #define __qdf_nbuf_reserve      skb_reserve
1044 
1045 /**
1046  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1047  * @skb: Pointer to network buffer
1048  * @data: data pointer
1049  *
1050  * Return: none
1051  */
1052 static inline void
1053 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1054 {
1055 	skb->data = data;
1056 }
1057 
1058 /**
1059  * __qdf_nbuf_set_len() - set buffer data length
1060  * @skb: Pointer to network buffer
1061  * @len: data length
1062  *
1063  * Return: none
1064  */
1065 static inline void
1066 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1067 {
1068 	skb->len = len;
1069 }
1070 
1071 /**
1072  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1073  * @skb: Pointer to network buffer
1074  * @len: skb data length
1075  *
1076  * Return: none
1077  */
1078 static inline void
1079 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1080 {
1081 	skb_set_tail_pointer(skb, len);
1082 }
1083 
1084 /**
1085  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1086  * @skb: Pointer to network buffer
1087  * @list: list to use
1088  *
1089  * This is a lockless version, driver must acquire locks if it
1090  * needs to synchronize
1091  *
1092  * Return: none
1093  */
1094 static inline void
1095 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1096 {
1097 	__skb_unlink(skb, list);
1098 }
1099 
1100 /**
1101  * __qdf_nbuf_reset() - reset the buffer data and pointer
1102  * @buf: Network buf instance
1103  * @reserve: reserve
1104  * @align: align
1105  *
1106  * Return: none
1107  */
1108 static inline void
1109 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1110 {
1111 	int offset;
1112 
1113 	skb_push(skb, skb_headroom(skb));
1114 	skb_put(skb, skb_tailroom(skb));
1115 	memset(skb->data, 0x0, skb->len);
1116 	skb_trim(skb, 0);
1117 	skb_reserve(skb, NET_SKB_PAD);
1118 	memset(skb->cb, 0x0, sizeof(skb->cb));
1119 
1120 	/*
1121 	 * The default is for netbuf fragments to be interpreted
1122 	 * as wordstreams rather than bytestreams.
1123 	 */
1124 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1125 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1126 
1127 	/*
1128 	 * Align & make sure that the tail & data are adjusted properly
1129 	 */
1130 
1131 	if (align) {
1132 		offset = ((unsigned long)skb->data) % align;
1133 		if (offset)
1134 			skb_reserve(skb, align - offset);
1135 	}
1136 
1137 	skb_reserve(skb, reserve);
1138 }
1139 
1140 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1141 /**
1142  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1143  *                                       in kernel
1144  *
1145  * Return: true if dev_scratch is supported
1146  *         false if dev_scratch is not supported
1147  */
1148 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1149 {
1150 	return true;
1151 }
1152 
1153 /**
1154  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1155  * @skb: Pointer to network buffer
1156  *
1157  * Return: dev_scratch if dev_scratch supported
1158  *         0 if dev_scratch not supported
1159  */
1160 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1161 {
1162 	return skb->dev_scratch;
1163 }
1164 
1165 /**
1166  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1167  * @skb: Pointer to network buffer
1168  * @value: value to be set in dev_scratch of network buffer
1169  *
1170  * Return: void
1171  */
1172 static inline void
1173 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1174 {
1175 	skb->dev_scratch = value;
1176 }
1177 #else
1178 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1179 {
1180 	return false;
1181 }
1182 
1183 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1184 {
1185 	return 0;
1186 }
1187 
1188 static inline void
1189 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1190 {
1191 }
1192 #endif /* KERNEL_VERSION(4, 14, 0) */
1193 
1194 /**
1195  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1196  * @skb: Pointer to network buffer
1197  *
1198  * Return: Pointer to head buffer
1199  */
1200 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1201 {
1202 	return skb->head;
1203 }
1204 
1205 /**
1206  * __qdf_nbuf_data() - return the pointer to data header in the skb
1207  * @skb: Pointer to network buffer
1208  *
1209  * Return: Pointer to skb data
1210  */
1211 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1212 {
1213 	return skb->data;
1214 }
1215 
1216 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1217 {
1218 	return (uint8_t *)&skb->data;
1219 }
1220 
1221 /**
1222  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1223  * @skb: Pointer to network buffer
1224  *
1225  * Return: skb protocol
1226  */
1227 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1228 {
1229 	return skb->protocol;
1230 }
1231 
1232 /**
1233  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1234  * @skb: Pointer to network buffer
1235  *
1236  * Return: skb ip_summed
1237  */
1238 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1239 {
1240 	return skb->ip_summed;
1241 }
1242 
1243 /**
1244  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1245  * @skb: Pointer to network buffer
1246  * @ip_summed: ip checksum
1247  *
1248  * Return: none
1249  */
1250 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1251 		 uint8_t ip_summed)
1252 {
1253 	skb->ip_summed = ip_summed;
1254 }
1255 
1256 /**
1257  * __qdf_nbuf_get_priority() - return the priority value of the skb
1258  * @skb: Pointer to network buffer
1259  *
1260  * Return: skb priority
1261  */
1262 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1263 {
1264 	return skb->priority;
1265 }
1266 
1267 /**
1268  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1269  * @skb: Pointer to network buffer
1270  * @p: priority
1271  *
1272  * Return: none
1273  */
1274 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1275 {
1276 	skb->priority = p;
1277 }
1278 
1279 /**
1280  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1281  * @skb: Current skb
1282  * @next_skb: Next skb
1283  *
1284  * Return: void
1285  */
1286 static inline void
1287 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1288 {
1289 	skb->next = skb_next;
1290 }
1291 
1292 /**
1293  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1294  * @skb: Current skb
1295  *
1296  * Return: the next skb pointed to by the current skb
1297  */
1298 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1299 {
1300 	return skb->next;
1301 }
1302 
1303 /**
1304  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1305  * @skb: Current skb
1306  * @next_skb: Next skb
1307  *
1308  * This fn is used to link up extensions to the head skb. Does not handle
1309  * linking to the head
1310  *
1311  * Return: none
1312  */
1313 static inline void
1314 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1315 {
1316 	skb->next = skb_next;
1317 }
1318 
1319 /**
1320  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1321  * @skb: Current skb
1322  *
1323  * Return: the next skb pointed to by the current skb
1324  */
1325 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1326 {
1327 	return skb->next;
1328 }
1329 
1330 /**
1331  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1332  * @skb_head: head_buf nbuf holding head segment (single)
1333  * @ext_list: nbuf list holding linked extensions to the head
1334  * @ext_len: Total length of all buffers in the extension list
1335  *
1336  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1337  * to the nbuf holding the head segment (seg0)
1338  *
1339  * Return: none
1340  */
1341 static inline void
1342 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1343 			struct sk_buff *ext_list, size_t ext_len)
1344 {
1345 	skb_shinfo(skb_head)->frag_list = ext_list;
1346 	skb_head->data_len = ext_len;
1347 	skb_head->len += skb_head->data_len;
1348 }
1349 
1350 /**
1351  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1352  * @head_buf: Network buf holding head segment (single)
1353  *
1354  * This ext_list is populated when we have Jumbo packet, for example in case of
1355  * monitor mode amsdu packet reception, and are stiched using frags_list.
1356  *
1357  * Return: Network buf list holding linked extensions from head buf.
1358  */
1359 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1360 {
1361 	return (skb_shinfo(head_buf)->frag_list);
1362 }
1363 
1364 /**
1365  * __qdf_nbuf_get_age() - return the checksum value of the skb
1366  * @skb: Pointer to network buffer
1367  *
1368  * Return: checksum value
1369  */
1370 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1371 {
1372 	return skb->csum;
1373 }
1374 
1375 /**
1376  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1377  * @skb: Pointer to network buffer
1378  * @v: Value
1379  *
1380  * Return: none
1381  */
1382 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1383 {
1384 	skb->csum = v;
1385 }
1386 
1387 /**
1388  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1389  * @skb: Pointer to network buffer
1390  * @adj: Adjustment value
1391  *
1392  * Return: none
1393  */
1394 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1395 {
1396 	skb->csum -= adj;
1397 }
1398 
1399 /**
1400  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1401  * @skb: Pointer to network buffer
1402  * @offset: Offset value
1403  * @len: Length
1404  * @to: Destination pointer
1405  *
1406  * Return: length of the copy bits for skb
1407  */
1408 static inline int32_t
1409 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1410 {
1411 	return skb_copy_bits(skb, offset, to, len);
1412 }
1413 
1414 /**
1415  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1416  * @skb: Pointer to network buffer
1417  * @len:  Packet length
1418  *
1419  * Return: none
1420  */
1421 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1422 {
1423 	if (skb->len > len) {
1424 		skb_trim(skb, len);
1425 	} else {
1426 		if (skb_tailroom(skb) < len - skb->len) {
1427 			if (unlikely(pskb_expand_head(skb, 0,
1428 				len - skb->len - skb_tailroom(skb),
1429 				GFP_ATOMIC))) {
1430 				dev_kfree_skb_any(skb);
1431 				qdf_assert(0);
1432 			}
1433 		}
1434 		skb_put(skb, (len - skb->len));
1435 	}
1436 }
1437 
1438 /**
1439  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1440  * @skb: Pointer to network buffer
1441  * @protocol: Protocol type
1442  *
1443  * Return: none
1444  */
1445 static inline void
1446 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1447 {
1448 	skb->protocol = protocol;
1449 }
1450 
1451 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1452 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1453 
1454 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1455 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1456 
1457 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1458 				      uint32_t *lo, uint32_t *hi);
1459 
1460 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1461 	struct qdf_tso_info_t *tso_info);
1462 
1463 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1464 			  struct qdf_tso_seg_elem_t *tso_seg,
1465 			  bool is_last_seg);
1466 
1467 #ifdef FEATURE_TSO
1468 /**
1469  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1470  *                                    payload len
1471  * @skb: buffer
1472  *
1473  * Return: size
1474  */
1475 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1476 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1477 
1478 #else
1479 static inline
1480 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1481 {
1482 	return 0;
1483 }
1484 
1485 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1486 {
1487 	return 0;
1488 }
1489 
1490 #endif /* FEATURE_TSO */
1491 
1492 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1493 {
1494 	if (skb_is_gso(skb) &&
1495 		(skb_is_gso_v6(skb) ||
1496 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1497 		return true;
1498 	else
1499 		return false;
1500 }
1501 
1502 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1503 
1504 int __qdf_nbuf_get_users(struct sk_buff *skb);
1505 
1506 /**
1507  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1508  *			      and get hw_classify by peeking
1509  *			      into packet
1510  * @nbuf:		Network buffer (skb on Linux)
1511  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1512  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1513  *			needs to be set in case of CE classification support
1514  *			Is set by this macro.
1515  * @hw_classify:	This is a flag which is set to indicate
1516  *			CE classification is enabled.
1517  *			Do not set this bit for VLAN packets
1518  *			OR for mcast / bcast frames.
1519  *
1520  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1521  * whether to enable tx_classify bit in CE.
1522  *
1523  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1524  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1525  * it is the length and a 802.3 frame else it is Ethernet Type II
1526  * (RFC 894).
1527  * Bit 4 in pkt_subtype is the tx_classify bit
1528  *
1529  * Return:	void
1530  */
1531 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1532 				pkt_subtype, hw_classify)	\
1533 do {								\
1534 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1535 	uint16_t ether_type = ntohs(eh->h_proto);		\
1536 	bool is_mc_bc;						\
1537 								\
1538 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1539 		   is_multicast_ether_addr((uint8_t *)eh);	\
1540 								\
1541 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1542 		hw_classify = 1;				\
1543 		pkt_subtype = 0x01 <<				\
1544 			HTT_TX_CLASSIFY_BIT_S;			\
1545 	}							\
1546 								\
1547 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1548 		pkt_type = htt_pkt_type_ethernet;		\
1549 								\
1550 } while (0)
1551 
1552 /**
1553  * nbuf private buffer routines
1554  */
1555 
1556 /**
1557  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1558  * @skb: Pointer to network buffer
1559  * @addr: Pointer to store header's addr
1560  * @m_len: network buffer length
1561  *
1562  * Return: none
1563  */
1564 static inline void
1565 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1566 {
1567 	*addr = skb->data;
1568 	*len = skb->len;
1569 }
1570 
1571 /**
1572  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1573  * @head: Head pointer
1574  * @tail: Tail pointer
1575  * @qlen: Queue length
1576  */
1577 typedef struct __qdf_nbuf_qhead {
1578 	struct sk_buff *head;
1579 	struct sk_buff *tail;
1580 	unsigned int qlen;
1581 } __qdf_nbuf_queue_t;
1582 
1583 /******************Functions *************/
1584 
1585 /**
1586  * __qdf_nbuf_queue_init() - initiallize the queue head
1587  * @qhead: Queue head
1588  *
1589  * Return: QDF status
1590  */
1591 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1592 {
1593 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1594 	return QDF_STATUS_SUCCESS;
1595 }
1596 
1597 /**
1598  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1599  * @qhead: Queue head
1600  * @skb: Pointer to network buffer
1601  *
1602  * This is a lockless version, driver must acquire locks if it
1603  * needs to synchronize
1604  *
1605  * Return: none
1606  */
1607 static inline void
1608 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1609 {
1610 	skb->next = NULL;       /*Nullify the next ptr */
1611 
1612 	if (!qhead->head)
1613 		qhead->head = skb;
1614 	else
1615 		qhead->tail->next = skb;
1616 
1617 	qhead->tail = skb;
1618 	qhead->qlen++;
1619 }
1620 
1621 /**
1622  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1623  * @dest: target netbuf queue
1624  * @src:  source netbuf queue
1625  *
1626  * Return: target netbuf queue
1627  */
1628 static inline __qdf_nbuf_queue_t *
1629 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1630 {
1631 	if (!dest)
1632 		return NULL;
1633 	else if (!src || !(src->head))
1634 		return dest;
1635 
1636 	if (!(dest->head))
1637 		dest->head = src->head;
1638 	else
1639 		dest->tail->next = src->head;
1640 
1641 	dest->tail = src->tail;
1642 	dest->qlen += src->qlen;
1643 	return dest;
1644 }
1645 
1646 /**
1647  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1648  * @qhead: Queue head
1649  * @skb: Pointer to network buffer
1650  *
1651  * This is a lockless version, driver must acquire locks if it needs to
1652  * synchronize
1653  *
1654  * Return: none
1655  */
1656 static inline void
1657 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1658 {
1659 	if (!qhead->head) {
1660 		/*Empty queue Tail pointer Must be updated */
1661 		qhead->tail = skb;
1662 	}
1663 	skb->next = qhead->head;
1664 	qhead->head = skb;
1665 	qhead->qlen++;
1666 }
1667 
1668 /**
1669  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1670  * @qhead: Queue head
1671  *
1672  * This is a lockless version. Driver should take care of the locks
1673  *
1674  * Return: skb or NULL
1675  */
1676 static inline
1677 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1678 {
1679 	__qdf_nbuf_t tmp = NULL;
1680 
1681 	if (qhead->head) {
1682 		qhead->qlen--;
1683 		tmp = qhead->head;
1684 		if (qhead->head == qhead->tail) {
1685 			qhead->head = NULL;
1686 			qhead->tail = NULL;
1687 		} else {
1688 			qhead->head = tmp->next;
1689 		}
1690 		tmp->next = NULL;
1691 	}
1692 	return tmp;
1693 }
1694 
1695 /**
1696  * __qdf_nbuf_queue_free() - free a queue
1697  * @qhead: head of queue
1698  *
1699  * Return: QDF status
1700  */
1701 static inline QDF_STATUS
1702 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1703 {
1704 	__qdf_nbuf_t  buf = NULL;
1705 
1706 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1707 		__qdf_nbuf_free(buf);
1708 	return QDF_STATUS_SUCCESS;
1709 }
1710 
1711 
1712 /**
1713  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1714  * @qhead: head of queue
1715  *
1716  * Return: NULL if the queue is empty
1717  */
1718 static inline struct sk_buff *
1719 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1720 {
1721 	return qhead->head;
1722 }
1723 
1724 /**
1725  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1726  * @qhead: head of queue
1727  *
1728  * Return: NULL if the queue is empty
1729  */
1730 static inline struct sk_buff *
1731 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1732 {
1733 	return qhead->tail;
1734 }
1735 
1736 /**
1737  * __qdf_nbuf_queue_len() - return the queue length
1738  * @qhead: Queue head
1739  *
1740  * Return: Queue length
1741  */
1742 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1743 {
1744 	return qhead->qlen;
1745 }
1746 
1747 /**
1748  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1749  * @skb: Pointer to network buffer
1750  *
1751  * This API returns the next skb from packet chain, remember the skb is
1752  * still in the queue
1753  *
1754  * Return: NULL if no packets are there
1755  */
1756 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1757 {
1758 	return skb->next;
1759 }
1760 
1761 /**
1762  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1763  * @qhead: Queue head
1764  *
1765  * Return: true if length is 0 else false
1766  */
1767 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1768 {
1769 	return qhead->qlen == 0;
1770 }
1771 
1772 /*
1773  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1774  * Because the queue head will most likely put in some structure,
1775  * we don't use pointer type as the definition.
1776  */
1777 
1778 /*
1779  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1780  * Because the queue head will most likely put in some structure,
1781  * we don't use pointer type as the definition.
1782  */
1783 
1784 static inline void
1785 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1786 {
1787 }
1788 
1789 /**
1790  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1791  *        expands the headroom
1792  *        in the data region. In case of failure the skb is released.
1793  * @skb: sk buff
1794  * @headroom: size of headroom
1795  *
1796  * Return: skb or NULL
1797  */
1798 static inline struct sk_buff *
1799 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1800 {
1801 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1802 		dev_kfree_skb_any(skb);
1803 		skb = NULL;
1804 	}
1805 	return skb;
1806 }
1807 
1808 /**
1809  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1810  *        exapnds the tailroom
1811  *        in data region. In case of failure it releases the skb.
1812  * @skb: sk buff
1813  * @tailroom: size of tailroom
1814  *
1815  * Return: skb or NULL
1816  */
1817 static inline struct sk_buff *
1818 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1819 {
1820 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1821 		return skb;
1822 	/**
1823 	 * unlikely path
1824 	 */
1825 	dev_kfree_skb_any(skb);
1826 	return NULL;
1827 }
1828 
1829 /**
1830  * __qdf_nbuf_linearize() - skb linearize
1831  * @skb: sk buff
1832  *
1833  * create a version of the specified nbuf whose contents
1834  * can be safely modified without affecting other
1835  * users.If the nbuf is non-linear then this function
1836  * linearize. if unable to linearize returns -ENOMEM on
1837  * success 0 is returned
1838  *
1839  * Return: 0 on Success, -ENOMEM on failure is returned.
1840  */
1841 static inline int
1842 __qdf_nbuf_linearize(struct sk_buff *skb)
1843 {
1844 	return skb_linearize(skb);
1845 }
1846 
1847 /**
1848  * __qdf_nbuf_unshare() - skb unshare
1849  * @skb: sk buff
1850  *
1851  * create a version of the specified nbuf whose contents
1852  * can be safely modified without affecting other
1853  * users.If the nbuf is a clone then this function
1854  * creates a new copy of the data. If the buffer is not
1855  * a clone the original buffer is returned.
1856  *
1857  * Return: skb or NULL
1858  */
1859 static inline struct sk_buff *
1860 __qdf_nbuf_unshare(struct sk_buff *skb)
1861 {
1862 	return skb_unshare(skb, GFP_ATOMIC);
1863 }
1864 
1865 /**
1866  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1867  *@buf: sk buff
1868  *
1869  * Return: true/false
1870  */
1871 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1872 {
1873 	return skb_cloned(skb);
1874 }
1875 
1876 /**
1877  * __qdf_nbuf_pool_init() - init pool
1878  * @net: net handle
1879  *
1880  * Return: QDF status
1881  */
1882 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1883 {
1884 	return QDF_STATUS_SUCCESS;
1885 }
1886 
1887 /*
1888  * adf_nbuf_pool_delete() implementation - do nothing in linux
1889  */
1890 #define __qdf_nbuf_pool_delete(osdev)
1891 
1892 /**
1893  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1894  *        release the skb.
1895  * @skb: sk buff
1896  * @headroom: size of headroom
1897  * @tailroom: size of tailroom
1898  *
1899  * Return: skb or NULL
1900  */
1901 static inline struct sk_buff *
1902 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1903 {
1904 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1905 		return skb;
1906 
1907 	dev_kfree_skb_any(skb);
1908 	return NULL;
1909 }
1910 
1911 /**
1912  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1913  * @buf: Network buf instance
1914  * @headroom: Additional headroom to be added
1915  * @tailroom: Additional tailroom to be added
1916  *
1917  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1918  *	or NULL if there is no memory
1919  */
1920 static inline struct sk_buff *
1921 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1922 {
1923 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1924 }
1925 
1926 /**
1927  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
1928  * @buf: Network buf instance
1929  *
1930  * Return: void
1931  */
1932 static inline void
1933 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
1934 {
1935 	struct sk_buff *list;
1936 
1937 	skb_walk_frags(buf, list)
1938 		skb_get(list);
1939 }
1940 
1941 /**
1942  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1943  *
1944  * Return: true/false
1945  */
1946 static inline bool
1947 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1948 			 uint8_t **where)
1949 {
1950 	qdf_assert(0);
1951 	return false;
1952 }
1953 
1954 /**
1955  * __qdf_nbuf_reset_ctxt() - mem zero control block
1956  * @nbuf: buffer
1957  *
1958  * Return: none
1959  */
1960 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1961 {
1962 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1963 }
1964 
1965 /**
1966  * __qdf_nbuf_network_header() - get network header
1967  * @buf: buffer
1968  *
1969  * Return: network header pointer
1970  */
1971 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1972 {
1973 	return skb_network_header(buf);
1974 }
1975 
1976 /**
1977  * __qdf_nbuf_transport_header() - get transport header
1978  * @buf: buffer
1979  *
1980  * Return: transport header pointer
1981  */
1982 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1983 {
1984 	return skb_transport_header(buf);
1985 }
1986 
1987 /**
1988  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1989  *  passed as part of network buffer by network stack
1990  * @skb: sk buff
1991  *
1992  * Return: TCP MSS size
1993  *
1994  */
1995 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1996 {
1997 	return skb_shinfo(skb)->gso_size;
1998 }
1999 
2000 /**
2001  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2002  * @nbuf: sk buff
2003  *
2004  * Return: none
2005  */
2006 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2007 
2008 /*
2009  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2010  * @nbuf: sk buff
2011  *
2012  * Return: void ptr
2013  */
2014 static inline void *
2015 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2016 {
2017 	return (void *)nbuf->cb;
2018 }
2019 
2020 /**
2021  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2022  * @skb: sk buff
2023  *
2024  * Return: head size
2025  */
2026 static inline size_t
2027 __qdf_nbuf_headlen(struct sk_buff *skb)
2028 {
2029 	return skb_headlen(skb);
2030 }
2031 
2032 /**
2033  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
2034  * @skb: sk buff
2035  *
2036  * Return: number of fragments
2037  */
2038 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
2039 {
2040 	return skb_shinfo(skb)->nr_frags;
2041 }
2042 
2043 /**
2044  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2045  * @buf: sk buff
2046  *
2047  * Return: true/false
2048  */
2049 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2050 {
2051 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2052 }
2053 
2054 /**
2055  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2056  * @buf: sk buff
2057  *
2058  * Return: true/false
2059  */
2060 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2061 {
2062 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2063 }
2064 
2065 /**
2066  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2067  * @skb: sk buff
2068  *
2069  * Return: size of l2+l3+l4 header length
2070  */
2071 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2072 {
2073 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2074 }
2075 
2076 /**
2077  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2078  * @buf: sk buff
2079  *
2080  * Return:  true/false
2081  */
2082 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2083 {
2084 	if (skb_is_nonlinear(skb))
2085 		return true;
2086 	else
2087 		return false;
2088 }
2089 
2090 /**
2091  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2092  * @buf: sk buff
2093  *
2094  * Return: TCP sequence number
2095  */
2096 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2097 {
2098 	return ntohl(tcp_hdr(skb)->seq);
2099 }
2100 
2101 /**
2102  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2103  *@buf: sk buff
2104  *
2105  * Return: data pointer to typecast into your priv structure
2106  */
2107 static inline uint8_t *
2108 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2109 {
2110 	return &skb->cb[8];
2111 }
2112 
2113 /**
2114  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2115  * @buf: Pointer to nbuf
2116  *
2117  * Return: None
2118  */
2119 static inline void
2120 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2121 {
2122 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2123 }
2124 
2125 /**
2126  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2127  *
2128  * @buf: sk buff
2129  * @queue_id: Queue id
2130  *
2131  * Return: void
2132  */
2133 static inline void
2134 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2135 {
2136 	skb_record_rx_queue(skb, queue_id);
2137 }
2138 
2139 /**
2140  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2141  *
2142  * @buf: sk buff
2143  *
2144  * Return: Queue mapping
2145  */
2146 static inline uint16_t
2147 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2148 {
2149 	return skb->queue_mapping;
2150 }
2151 
2152 /**
2153  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2154  *
2155  * @buf: sk buff
2156  *
2157  * Return: void
2158  */
2159 static inline void
2160 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2161 {
2162 	__net_timestamp(skb);
2163 }
2164 
2165 /**
2166  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2167  *
2168  * @buf: sk buff
2169  *
2170  * Return: timestamp stored in skb in ms
2171  */
2172 static inline uint64_t
2173 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2174 {
2175 	return ktime_to_ms(skb_get_ktime(skb));
2176 }
2177 
2178 /**
2179  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2180  *
2181  * @buf: sk buff
2182  *
2183  * Return: time difference in ms
2184  */
2185 static inline uint64_t
2186 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2187 {
2188 	return ktime_to_ms(net_timedelta(skb->tstamp));
2189 }
2190 
2191 /**
2192  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2193  *
2194  * @buf: sk buff
2195  *
2196  * Return: time difference in micro seconds
2197  */
2198 static inline uint64_t
2199 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2200 {
2201 	return ktime_to_us(net_timedelta(skb->tstamp));
2202 }
2203 
2204 /**
2205  * __qdf_nbuf_orphan() - orphan a nbuf
2206  * @skb: sk buff
2207  *
2208  * If a buffer currently has an owner then we call the
2209  * owner's destructor function
2210  *
2211  * Return: void
2212  */
2213 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2214 {
2215 	return skb_orphan(skb);
2216 }
2217 
2218 /**
2219  * __qdf_nbuf_map_nbytes_single() - map nbytes
2220  * @osdev: os device
2221  * @buf: buffer
2222  * @dir: direction
2223  * @nbytes: number of bytes
2224  *
2225  * Return: QDF_STATUS
2226  */
2227 #ifdef A_SIMOS_DEVHOST
2228 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2229 		qdf_device_t osdev, struct sk_buff *buf,
2230 		qdf_dma_dir_t dir, int nbytes)
2231 {
2232 	qdf_dma_addr_t paddr;
2233 
2234 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2235 	return QDF_STATUS_SUCCESS;
2236 }
2237 #else
2238 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2239 		qdf_device_t osdev, struct sk_buff *buf,
2240 		qdf_dma_dir_t dir, int nbytes)
2241 {
2242 	qdf_dma_addr_t paddr;
2243 
2244 	/* assume that the OS only provides a single fragment */
2245 	QDF_NBUF_CB_PADDR(buf) = paddr =
2246 		dma_map_single(osdev->dev, buf->data,
2247 			       nbytes, __qdf_dma_dir_to_os(dir));
2248 	return dma_mapping_error(osdev->dev, paddr) ?
2249 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2250 }
2251 #endif
2252 /**
2253  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2254  * @osdev: os device
2255  * @buf: buffer
2256  * @dir: direction
2257  * @nbytes: number of bytes
2258  *
2259  * Return: none
2260  */
2261 #if defined(A_SIMOS_DEVHOST)
2262 static inline void
2263 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2264 			       qdf_dma_dir_t dir, int nbytes)
2265 {
2266 }
2267 
2268 #else
2269 static inline void
2270 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2271 			       qdf_dma_dir_t dir, int nbytes)
2272 {
2273 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2274 
2275 	if (qdf_likely(paddr)) {
2276 		dma_unmap_single(osdev->dev, paddr, nbytes,
2277 				 __qdf_dma_dir_to_os(dir));
2278 		return;
2279 	}
2280 }
2281 #endif
2282 
2283 static inline struct sk_buff *
2284 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2285 {
2286 	return skb_dequeue(skb_queue_head);
2287 }
2288 
2289 static inline
2290 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2291 {
2292 	return skb_queue_head->qlen;
2293 }
2294 
2295 static inline
2296 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2297 					struct sk_buff *skb)
2298 {
2299 	return skb_queue_tail(skb_queue_head, skb);
2300 }
2301 
2302 static inline
2303 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2304 {
2305 	return skb_queue_head_init(skb_queue_head);
2306 }
2307 
2308 static inline
2309 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2310 {
2311 	return skb_queue_purge(skb_queue_head);
2312 }
2313 
2314 /**
2315  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2316  * @head: skb list for which lock is to be acquired
2317  *
2318  * Return: void
2319  */
2320 static inline
2321 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2322 {
2323 	spin_lock_bh(&skb_queue_head->lock);
2324 }
2325 
2326 /**
2327  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2328  * @head: skb list for which lock is to be release
2329  *
2330  * Return: void
2331  */
2332 static inline
2333 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2334 {
2335 	spin_unlock_bh(&skb_queue_head->lock);
2336 }
2337 
2338 #ifdef CONFIG_NBUF_AP_PLATFORM
2339 #include <i_qdf_nbuf_w.h>
2340 #else
2341 #include <i_qdf_nbuf_m.h>
2342 #endif
2343 #endif /*_I_QDF_NET_BUF_H */
2344