xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *   6. "ipa.owned" bit must be first member in both TX and RX unions
95  *      for the priv_cb_m since it must be at same offset for both
96  *      TX and RX union.
97  *
98  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
99  *
100  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
101  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
102  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
103  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
104  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
105  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
106  *
107  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
108  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
109  * @rx.dev.priv_cb_m.lro_ctx: LRO context
110  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
111  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
112  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
113  * @rx.dev.priv_cb_m.vdev_id: vdev_id for RX pkt
114  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
115  *
116  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
117  * @rx.peer_cached_buf_frm: peer cached buffer
118  * @rx.tcp_proto: L4 protocol is TCP
119  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
120  * @rx.ipv6_proto: L3 protocol is IPV6
121  * @rx.ip_offset: offset to IP header
122  * @rx.tcp_offset: offset to TCP header
123  * @rx_ctx_id: Rx context id
124  * @flush_ind: flush indication
125  * @num_elements_in_list: number of elements in the nbuf list
126  *
127  * @rx.tcp_udp_chksum: L4 payload checksum
128  * @rx.tcp_wim: TCP window size
129  *
130  * @rx.flow_id: 32bit flow id
131  *
132  * @rx.flag_chfrag_start: first MSDU in an AMSDU
133  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
134  * @rx.flag_chfrag_end: last MSDU in an AMSDU
135  * @rx.packet_buff_pool: indicate packet from pre-allocated pool for Rx ring
136  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
137  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
138  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
139  * @rx.flag_is_frag: flag to indicate skb has frag list
140  * @rx.rsrvd: reserved
141  *
142  * @rx.trace: combined structure for DP and protocol trace
143  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
144  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
145  * @rx.trace.dp_trace: flag (Datapath trace)
146  * @rx.trace.packet_track: RX_DATA packet
147  * @rx.trace.rsrvd: enable packet logging
148  *
149  * @rx.ftype: mcast2ucast, TSO, SG, MESH
150  * @rx.is_raw_frame: RAW frame
151  * @rx.fcs_err: FCS error
152  * @rx.tid_val: tid value
153  * @rx.flag_retry: flag to indicate MSDU is retried
154  * @rx.reserved: reserved
155  *
156  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
157  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
158  *
159  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
160  *                 + (1) CE classification enablement bit
161  *                 + (2) packet type (802.3 or Ethernet type II)
162  *                 + (3) packet offset (usually length of HTC/HTT descr)
163  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
164  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
165  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
166  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
167  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
168  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
169  * @tx.dev.priv_cb_m.reserved: reserved
170  *
171  * @tx.ftype: mcast2ucast, TSO, SG, MESH
172  * @tx.vdev_id: vdev (for protocol trace)
173  * @tx.len: length of efrag pointed by the above pointers
174  *
175  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
176  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
177  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
178  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
179  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
180  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
181  * @tx.flags.bits.flag_ext_header: extended flags
182  * @tx.flags.bits.reserved: reserved
183  * @tx.trace: combined structure for DP and protocol trace
184  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
185  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
186  * @tx.trace.is_packet_priv:
187  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
188  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
189  *                          + (MGMT_ACTION)] - 4 bits
190  * @tx.trace.dp_trace: flag (Datapath trace)
191  * @tx.trace.is_bcast: flag (Broadcast packet)
192  * @tx.trace.is_mcast: flag (Multicast packet)
193  * @tx.trace.packet_type: flag (Packet type)
194  * @tx.trace.htt2_frm: flag (high-latency path only)
195  * @tx.trace.print: enable packet logging
196  *
197  * @tx.vaddr: virtual address of ~
198  * @tx.paddr: physical/DMA address of ~
199  */
200 struct qdf_nbuf_cb {
201 	/* common */
202 	qdf_paddr_t paddr; /* of skb->data */
203 	/* valid only in one direction */
204 	union {
205 		/* Note: MAX: 40 bytes */
206 		struct {
207 			union {
208 				struct {
209 					void *ext_cb_ptr;
210 					void *fctx;
211 					uint16_t msdu_len;
212 					uint16_t peer_id;
213 					uint16_t protocol_tag;
214 					uint16_t flow_tag;
215 				} priv_cb_w;
216 				struct {
217 					/* ipa_owned bit is common between rx
218 					 * control block and tx control block.
219 					 * Do not change location of this bit.
220 					 */
221 					uint32_t ipa_owned:1,
222 						 reserved:15,
223 						 vdev_id:8,
224 						 reserved1:8;
225 					uint32_t tcp_seq_num;
226 					uint32_t tcp_ack_num;
227 					union {
228 						struct {
229 							uint16_t msdu_len;
230 							uint16_t peer_id;
231 						} wifi3;
232 						struct {
233 							uint32_t map_index;
234 						} wifi2;
235 					} dp;
236 					unsigned char *lro_ctx;
237 				} priv_cb_m;
238 			} dev;
239 			uint32_t lro_eligible:1,
240 				peer_cached_buf_frm:1,
241 				tcp_proto:1,
242 				tcp_pure_ack:1,
243 				ipv6_proto:1,
244 				ip_offset:7,
245 				tcp_offset:7,
246 				rx_ctx_id:4,
247 				flush_ind:1,
248 				num_elements_in_list:8;
249 			uint32_t tcp_udp_chksum:16,
250 				 tcp_win:16;
251 			uint32_t flow_id;
252 			uint8_t flag_chfrag_start:1,
253 				flag_chfrag_cont:1,
254 				flag_chfrag_end:1,
255 				packet_buff_pool:1,
256 				flag_da_mcbc:1,
257 				flag_da_valid:1,
258 				flag_sa_valid:1,
259 				flag_is_frag:1;
260 			union {
261 				uint8_t packet_state;
262 				uint8_t dp_trace:1,
263 					packet_track:4,
264 					rsrvd:3;
265 			} trace;
266 			uint8_t ftype;
267 			uint8_t is_raw_frame:1,
268 				fcs_err:1,
269 				tid_val:4,
270 				flag_retry:1,
271 				reserved:1;
272 		} rx;
273 
274 		/* Note: MAX: 40 bytes */
275 		struct {
276 			union {
277 				struct {
278 					void *ext_cb_ptr;
279 					void *fctx;
280 				} priv_cb_w;
281 				struct {
282 					/* ipa_owned bit is common between rx
283 					 * control block and tx control block.
284 					 * Do not change location of this bit.
285 					 */
286 					struct {
287 						uint32_t owned:1,
288 							priv:31;
289 					} ipa;
290 					uint32_t data_attr;
291 					uint16_t desc_id;
292 					uint16_t mgmt_desc_id;
293 					struct {
294 						uint8_t bi_map:1,
295 							reserved:7;
296 					} dma_option;
297 					uint8_t reserved[3];
298 				} priv_cb_m;
299 			} dev;
300 			uint8_t ftype;
301 			uint8_t vdev_id;
302 			uint16_t len;
303 			union {
304 				struct {
305 					uint8_t flag_efrag:1,
306 						flag_nbuf:1,
307 						num:1,
308 						flag_chfrag_start:1,
309 						flag_chfrag_cont:1,
310 						flag_chfrag_end:1,
311 						flag_ext_header:1,
312 						flag_notify_comp:1;
313 				} bits;
314 				uint8_t u8;
315 			} flags;
316 			struct {
317 				uint8_t packet_state:7,
318 					is_packet_priv:1;
319 				uint8_t packet_track:4,
320 					proto_type:4;
321 				uint8_t dp_trace:1,
322 					is_bcast:1,
323 					is_mcast:1,
324 					packet_type:3,
325 					/* used only for hl*/
326 					htt2_frm:1,
327 					print:1;
328 			} trace;
329 			unsigned char *vaddr;
330 			qdf_paddr_t paddr;
331 		} tx;
332 	} u;
333 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
334 
335 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
336 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
337 
338 /**
339  *  access macros to qdf_nbuf_cb
340  *  Note: These macros can be used as L-values as well as R-values.
341  *        When used as R-values, they effectively function as "get" macros
342  *        When used as L_values, they effectively function as "set" macros
343  */
344 
345 #define QDF_NBUF_CB_PADDR(skb) \
346 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
347 
348 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
349 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
350 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
351 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
352 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
353 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
354 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
355 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
356 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
357 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
358 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
359 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
360 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
361 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
362 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
363 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
364 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \
365 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flush_ind)
366 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
367 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
368 
369 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
370 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
371 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
372 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
373 
374 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
375 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
376 
377 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
378 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
379 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
380 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
381 
382 #define QDF_NBUF_CB_RX_FTYPE(skb) \
383 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
384 
385 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
386 	(((struct qdf_nbuf_cb *) \
387 	((skb)->cb))->u.rx.flag_chfrag_start)
388 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
389 	(((struct qdf_nbuf_cb *) \
390 	((skb)->cb))->u.rx.flag_chfrag_cont)
391 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
392 		(((struct qdf_nbuf_cb *) \
393 		((skb)->cb))->u.rx.flag_chfrag_end)
394 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
395 		(((struct qdf_nbuf_cb *) \
396 		((skb)->cb))->u.rx.packet_buff_pool)
397 
398 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
399 	(((struct qdf_nbuf_cb *) \
400 	((skb)->cb))->u.rx.flag_da_mcbc)
401 
402 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
403 	(((struct qdf_nbuf_cb *) \
404 	((skb)->cb))->u.rx.flag_da_valid)
405 
406 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
407 	(((struct qdf_nbuf_cb *) \
408 	((skb)->cb))->u.rx.flag_sa_valid)
409 
410 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
411 	(((struct qdf_nbuf_cb *) \
412 	((skb)->cb))->u.rx.flag_retry)
413 
414 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
415 	(((struct qdf_nbuf_cb *) \
416 	((skb)->cb))->u.rx.is_raw_frame)
417 
418 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
419 	(((struct qdf_nbuf_cb *) \
420 	((skb)->cb))->u.rx.tid_val)
421 
422 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
423 	(((struct qdf_nbuf_cb *) \
424 	((skb)->cb))->u.rx.flag_is_frag)
425 
426 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
427 	(((struct qdf_nbuf_cb *) \
428 	((skb)->cb))->u.rx.fcs_err)
429 
430 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
431 	qdf_nbuf_set_state(skb, PACKET_STATE)
432 
433 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
434 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
435 
436 #define QDF_NBUF_CB_TX_FTYPE(skb) \
437 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
438 
439 
440 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
441 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
442 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
443 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
444 
445 /* Tx Flags Accessor Macros*/
446 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
447 	(((struct qdf_nbuf_cb *) \
448 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
449 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
450 	(((struct qdf_nbuf_cb *) \
451 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
452 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
453 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
454 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
455 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
456 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
459 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
460 	(((struct qdf_nbuf_cb *) \
461 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
462 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
463 		(((struct qdf_nbuf_cb *) \
464 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
465 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
466 		(((struct qdf_nbuf_cb *) \
467 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
468 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
469 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
470 /* End of Tx Flags Accessor Macros */
471 
472 /* Tx trace accessor macros */
473 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
474 	(((struct qdf_nbuf_cb *) \
475 		((skb)->cb))->u.tx.trace.packet_state)
476 
477 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
478 	(((struct qdf_nbuf_cb *) \
479 		((skb)->cb))->u.tx.trace.is_packet_priv)
480 
481 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
482 	(((struct qdf_nbuf_cb *) \
483 		((skb)->cb))->u.tx.trace.packet_track)
484 
485 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
486 		(((struct qdf_nbuf_cb *) \
487 			((skb)->cb))->u.rx.trace.packet_track)
488 
489 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
490 	(((struct qdf_nbuf_cb *) \
491 		((skb)->cb))->u.tx.trace.proto_type)
492 
493 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
494 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
495 
496 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
497 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
498 
499 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
500 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
501 
502 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
503 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
504 
505 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
506 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
507 
508 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
509 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
510 
511 #define QDF_NBUF_CB_SET_BCAST(skb) \
512 	(((struct qdf_nbuf_cb *) \
513 		((skb)->cb))->u.tx.trace.is_bcast = true)
514 
515 #define QDF_NBUF_CB_SET_MCAST(skb) \
516 	(((struct qdf_nbuf_cb *) \
517 		((skb)->cb))->u.tx.trace.is_mcast = true)
518 /* End of Tx trace accessor macros */
519 
520 
521 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
522 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
523 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
524 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
525 
526 /* assume the OS provides a single fragment */
527 #define __qdf_nbuf_get_num_frags(skb)		   \
528 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
529 
530 #define __qdf_nbuf_reset_num_frags(skb) \
531 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
532 
533 /**
534  *   end of nbuf->cb access macros
535  */
536 
537 typedef void (*qdf_nbuf_trace_update_t)(char *);
538 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
539 
540 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
541 
542 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
543 	(QDF_NBUF_CB_PADDR(skb) = paddr)
544 
545 #define __qdf_nbuf_frag_push_head(					\
546 	skb, frag_len, frag_vaddr, frag_paddr)				\
547 	do {					\
548 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
549 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
550 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
551 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
552 	} while (0)
553 
554 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
555 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
556 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
557 
558 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
559 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
560 
561 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
562 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
563 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
564 	 /* assume that the OS only provides a single fragment */	\
565 	 QDF_NBUF_CB_PADDR(skb))
566 
567 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
568 
569 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
570 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
571 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
572 
573 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
574 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
575 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
576 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
577 
578 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
579 	do {								\
580 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
581 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
582 		if (frag_num)						\
583 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
584 							      is_wstrm; \
585 		else					\
586 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
587 							      is_wstrm; \
588 	} while (0)
589 
590 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
591 	do { \
592 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
593 	} while (0)
594 
595 #define __qdf_nbuf_get_vdev_ctx(skb) \
596 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
597 
598 #define __qdf_nbuf_set_tx_ftype(skb, type) \
599 	do { \
600 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
601 	} while (0)
602 
603 #define __qdf_nbuf_get_tx_ftype(skb) \
604 		 QDF_NBUF_CB_TX_FTYPE((skb))
605 
606 
607 #define __qdf_nbuf_set_rx_ftype(skb, type) \
608 	do { \
609 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
610 	} while (0)
611 
612 #define __qdf_nbuf_get_rx_ftype(skb) \
613 		 QDF_NBUF_CB_RX_FTYPE((skb))
614 
615 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
616 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
617 
618 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
619 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
620 
621 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
622 	do { \
623 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
624 	} while (0)
625 
626 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
627 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
628 
629 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
630 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
631 
632 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
633 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
634 
635 #define __qdf_nbuf_set_da_mcbc(skb, val) \
636 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
637 
638 #define __qdf_nbuf_is_da_mcbc(skb) \
639 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
640 
641 #define __qdf_nbuf_set_da_valid(skb, val) \
642 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
643 
644 #define __qdf_nbuf_is_da_valid(skb) \
645 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
646 
647 #define __qdf_nbuf_set_sa_valid(skb, val) \
648 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
649 
650 #define __qdf_nbuf_is_sa_valid(skb) \
651 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
652 
653 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
654 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
655 
656 #define __qdf_nbuf_is_rx_retry_flag(skb) \
657 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
658 
659 #define __qdf_nbuf_set_raw_frame(skb, val) \
660 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
661 
662 #define __qdf_nbuf_is_raw_frame(skb) \
663 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
664 
665 #define __qdf_nbuf_get_tid_val(skb) \
666 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
667 
668 #define __qdf_nbuf_set_tid_val(skb, val) \
669 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
670 
671 #define __qdf_nbuf_set_is_frag(skb, val) \
672 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
673 
674 #define __qdf_nbuf_is_frag(skb) \
675 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
676 
677 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
678 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
679 
680 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
681 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
682 
683 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
684 	do { \
685 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
686 	} while (0)
687 
688 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
689 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
690 
691 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
692 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
693 
694 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
695 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
696 
697 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
698 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
699 
700 #define __qdf_nbuf_trace_get_proto_type(skb) \
701 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
702 
703 #define __qdf_nbuf_data_attr_get(skb)		\
704 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
705 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
706 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
707 
708 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
709 		skb_queue_walk_safe(queue, var, tvar)
710 
711 /**
712  * __qdf_nbuf_num_frags_init() - init extra frags
713  * @skb: sk buffer
714  *
715  * Return: none
716  */
717 static inline
718 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
719 {
720 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
721 }
722 
723 /*
724  * prototypes. Implemented in qdf_nbuf.c
725  */
726 
727 /**
728  * __qdf_nbuf_alloc() - Allocate nbuf
729  * @osdev: Device handle
730  * @size: Netbuf requested size
731  * @reserve: headroom to start with
732  * @align: Align
733  * @prio: Priority
734  * @func: Function name of the call site
735  * @line: line number of the call site
736  *
737  * This allocates an nbuf aligns if needed and reserves some space in the front,
738  * since the reserve is done after alignment the reserve value if being
739  * unaligned will result in an unaligned address.
740  *
741  * Return: nbuf or %NULL if no memory
742  */
743 __qdf_nbuf_t
744 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
745 		 int prio, const char *func, uint32_t line);
746 
747 void __qdf_nbuf_free(struct sk_buff *skb);
748 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
749 			struct sk_buff *skb, qdf_dma_dir_t dir);
750 void __qdf_nbuf_unmap(__qdf_device_t osdev,
751 			struct sk_buff *skb, qdf_dma_dir_t dir);
752 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
753 				 struct sk_buff *skb, qdf_dma_dir_t dir);
754 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
755 			struct sk_buff *skb, qdf_dma_dir_t dir);
756 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
757 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
758 
759 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
760 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
761 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
762 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
763 	qdf_dma_dir_t dir, int nbytes);
764 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
765 	qdf_dma_dir_t dir, int nbytes);
766 
767 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
768 	qdf_dma_dir_t dir);
769 
770 QDF_STATUS __qdf_nbuf_map_nbytes_single(
771 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
772 void __qdf_nbuf_unmap_nbytes_single(
773 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
774 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
775 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
776 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
777 QDF_STATUS __qdf_nbuf_frag_map(
778 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
779 	int offset, qdf_dma_dir_t dir, int cur_frag);
780 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
781 
782 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
783 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
784 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
785 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
786 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
787 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
788 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
789 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
790 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
791 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
792 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
793 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
794 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
795 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
796 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
797 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
798 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
799 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
800 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
801 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
802 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
803 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
804 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
805 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
806 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
807 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
808 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
809 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
810 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
811 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
812 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
813 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
814 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
815 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
816 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
817 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
818 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
819 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
820 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
821 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
822 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
823 
824 #ifdef QDF_NBUF_GLOBAL_COUNT
825 int __qdf_nbuf_count_get(void);
826 void __qdf_nbuf_count_inc(struct sk_buff *skb);
827 void __qdf_nbuf_count_dec(struct sk_buff *skb);
828 void __qdf_nbuf_mod_init(void);
829 void __qdf_nbuf_mod_exit(void);
830 
831 #else
832 
833 static inline int __qdf_nbuf_count_get(void)
834 {
835 	return 0;
836 }
837 
838 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
839 {
840 	return;
841 }
842 
843 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
844 {
845 	return;
846 }
847 
848 static inline void __qdf_nbuf_mod_init(void)
849 {
850 	return;
851 }
852 
853 static inline void __qdf_nbuf_mod_exit(void)
854 {
855 	return;
856 }
857 #endif
858 
859 /**
860  * __qdf_to_status() - OS to QDF status conversion
861  * @error : OS error
862  *
863  * Return: QDF status
864  */
865 static inline QDF_STATUS __qdf_to_status(signed int error)
866 {
867 	switch (error) {
868 	case 0:
869 		return QDF_STATUS_SUCCESS;
870 	case ENOMEM:
871 	case -ENOMEM:
872 		return QDF_STATUS_E_NOMEM;
873 	default:
874 		return QDF_STATUS_E_NOSUPPORT;
875 	}
876 }
877 
878 /**
879  * __qdf_nbuf_len() - return the amount of valid data in the skb
880  * @skb: Pointer to network buffer
881  *
882  * This API returns the amount of valid data in the skb, If there are frags
883  * then it returns total length.
884  *
885  * Return: network buffer length
886  */
887 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
888 {
889 	int i, extra_frag_len = 0;
890 
891 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
892 	if (i > 0)
893 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
894 
895 	return extra_frag_len + skb->len;
896 }
897 
898 /**
899  * __qdf_nbuf_cat() - link two nbufs
900  * @dst: Buffer to piggyback into
901  * @src: Buffer to put
902  *
903  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
904  * It is callers responsibility to free the src skb.
905  *
906  * Return: QDF_STATUS (status of the call) if failed the src skb
907  *         is released
908  */
909 static inline QDF_STATUS
910 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
911 {
912 	QDF_STATUS error = 0;
913 
914 	qdf_assert(dst && src);
915 
916 	/*
917 	 * Since pskb_expand_head unconditionally reallocates the skb->head
918 	 * buffer, first check whether the current buffer is already large
919 	 * enough.
920 	 */
921 	if (skb_tailroom(dst) < src->len) {
922 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
923 		if (error)
924 			return __qdf_to_status(error);
925 	}
926 
927 	memcpy(skb_tail_pointer(dst), src->data, src->len);
928 	skb_put(dst, src->len);
929 	return __qdf_to_status(error);
930 }
931 
932 /*
933  * nbuf manipulation routines
934  */
935 /**
936  * __qdf_nbuf_headroom() - return the amount of tail space available
937  * @buf: Pointer to network buffer
938  *
939  * Return: amount of tail room
940  */
941 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
942 {
943 	return skb_headroom(skb);
944 }
945 
946 /**
947  * __qdf_nbuf_tailroom() - return the amount of tail space available
948  * @buf: Pointer to network buffer
949  *
950  * Return: amount of tail room
951  */
952 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
953 {
954 	return skb_tailroom(skb);
955 }
956 
957 /**
958  * __qdf_nbuf_put_tail() - Puts data in the end
959  * @skb: Pointer to network buffer
960  * @size: size to be pushed
961  *
962  * Return: data pointer of this buf where new data has to be
963  *         put, or NULL if there is not enough room in this buf.
964  */
965 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
966 {
967 	if (skb_tailroom(skb) < size) {
968 		if (unlikely(pskb_expand_head(skb, 0,
969 			size - skb_tailroom(skb), GFP_ATOMIC))) {
970 			dev_kfree_skb_any(skb);
971 			return NULL;
972 		}
973 	}
974 	return skb_put(skb, size);
975 }
976 
977 /**
978  * __qdf_nbuf_trim_tail() - trim data out from the end
979  * @skb: Pointer to network buffer
980  * @size: size to be popped
981  *
982  * Return: none
983  */
984 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
985 {
986 	return skb_trim(skb, skb->len - size);
987 }
988 
989 
990 /*
991  * prototypes. Implemented in qdf_nbuf.c
992  */
993 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
994 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
995 				qdf_nbuf_rx_cksum_t *cksum);
996 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
997 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
998 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
999 void __qdf_nbuf_ref(struct sk_buff *skb);
1000 int __qdf_nbuf_shared(struct sk_buff *skb);
1001 
1002 /*
1003  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1004  */
1005 #define __qdf_nbuf_pool_delete(osdev)
1006 
1007 /**
1008  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
1009  * @skb: Pointer to network buffer
1010  *
1011  * if GFP_ATOMIC is overkill then we can check whether its
1012  * called from interrupt context and then do it or else in
1013  * normal case use GFP_KERNEL
1014  *
1015  * example     use "in_irq() || irqs_disabled()"
1016  *
1017  * Return: cloned skb
1018  */
1019 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
1020 {
1021 	struct sk_buff *skb_new = NULL;
1022 
1023 	skb_new = skb_clone(skb, GFP_ATOMIC);
1024 	if (skb_new)
1025 		__qdf_nbuf_count_inc(skb_new);
1026 
1027 	return skb_new;
1028 }
1029 
1030 /**
1031  * __qdf_nbuf_copy() - returns a private copy of the skb
1032  * @skb: Pointer to network buffer
1033  *
1034  * This API returns a private copy of the skb, the skb returned is completely
1035  *  modifiable by callers
1036  *
1037  * Return: skb or NULL
1038  */
1039 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1040 {
1041 	struct sk_buff *skb_new = NULL;
1042 
1043 	skb_new = skb_copy(skb, GFP_ATOMIC);
1044 	if (skb_new)
1045 		__qdf_nbuf_count_inc(skb_new);
1046 
1047 	return skb_new;
1048 }
1049 
1050 #define __qdf_nbuf_reserve      skb_reserve
1051 
1052 /**
1053  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1054  * @skb: Pointer to network buffer
1055  * @data: data pointer
1056  *
1057  * Return: none
1058  */
1059 static inline void
1060 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1061 {
1062 	skb->data = data;
1063 }
1064 
1065 /**
1066  * __qdf_nbuf_set_len() - set buffer data length
1067  * @skb: Pointer to network buffer
1068  * @len: data length
1069  *
1070  * Return: none
1071  */
1072 static inline void
1073 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1074 {
1075 	skb->len = len;
1076 }
1077 
1078 /**
1079  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1080  * @skb: Pointer to network buffer
1081  * @len: skb data length
1082  *
1083  * Return: none
1084  */
1085 static inline void
1086 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1087 {
1088 	skb_set_tail_pointer(skb, len);
1089 }
1090 
1091 /**
1092  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1093  * @skb: Pointer to network buffer
1094  * @list: list to use
1095  *
1096  * This is a lockless version, driver must acquire locks if it
1097  * needs to synchronize
1098  *
1099  * Return: none
1100  */
1101 static inline void
1102 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1103 {
1104 	__skb_unlink(skb, list);
1105 }
1106 
1107 /**
1108  * __qdf_nbuf_reset() - reset the buffer data and pointer
1109  * @buf: Network buf instance
1110  * @reserve: reserve
1111  * @align: align
1112  *
1113  * Return: none
1114  */
1115 static inline void
1116 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1117 {
1118 	int offset;
1119 
1120 	skb_push(skb, skb_headroom(skb));
1121 	skb_put(skb, skb_tailroom(skb));
1122 	memset(skb->data, 0x0, skb->len);
1123 	skb_trim(skb, 0);
1124 	skb_reserve(skb, NET_SKB_PAD);
1125 	memset(skb->cb, 0x0, sizeof(skb->cb));
1126 
1127 	/*
1128 	 * The default is for netbuf fragments to be interpreted
1129 	 * as wordstreams rather than bytestreams.
1130 	 */
1131 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1132 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1133 
1134 	/*
1135 	 * Align & make sure that the tail & data are adjusted properly
1136 	 */
1137 
1138 	if (align) {
1139 		offset = ((unsigned long)skb->data) % align;
1140 		if (offset)
1141 			skb_reserve(skb, align - offset);
1142 	}
1143 
1144 	skb_reserve(skb, reserve);
1145 }
1146 
1147 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1148 /**
1149  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1150  *                                       in kernel
1151  *
1152  * Return: true if dev_scratch is supported
1153  *         false if dev_scratch is not supported
1154  */
1155 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1156 {
1157 	return true;
1158 }
1159 
1160 /**
1161  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1162  * @skb: Pointer to network buffer
1163  *
1164  * Return: dev_scratch if dev_scratch supported
1165  *         0 if dev_scratch not supported
1166  */
1167 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1168 {
1169 	return skb->dev_scratch;
1170 }
1171 
1172 /**
1173  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1174  * @skb: Pointer to network buffer
1175  * @value: value to be set in dev_scratch of network buffer
1176  *
1177  * Return: void
1178  */
1179 static inline void
1180 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1181 {
1182 	skb->dev_scratch = value;
1183 }
1184 #else
1185 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1186 {
1187 	return false;
1188 }
1189 
1190 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1191 {
1192 	return 0;
1193 }
1194 
1195 static inline void
1196 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1197 {
1198 }
1199 #endif /* KERNEL_VERSION(4, 14, 0) */
1200 
1201 /**
1202  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1203  * @skb: Pointer to network buffer
1204  *
1205  * Return: Pointer to head buffer
1206  */
1207 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1208 {
1209 	return skb->head;
1210 }
1211 
1212 /**
1213  * __qdf_nbuf_data() - return the pointer to data header in the skb
1214  * @skb: Pointer to network buffer
1215  *
1216  * Return: Pointer to skb data
1217  */
1218 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1219 {
1220 	return skb->data;
1221 }
1222 
1223 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1224 {
1225 	return (uint8_t *)&skb->data;
1226 }
1227 
1228 /**
1229  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1230  * @skb: Pointer to network buffer
1231  *
1232  * Return: skb protocol
1233  */
1234 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1235 {
1236 	return skb->protocol;
1237 }
1238 
1239 /**
1240  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1241  * @skb: Pointer to network buffer
1242  *
1243  * Return: skb ip_summed
1244  */
1245 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1246 {
1247 	return skb->ip_summed;
1248 }
1249 
1250 /**
1251  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1252  * @skb: Pointer to network buffer
1253  * @ip_summed: ip checksum
1254  *
1255  * Return: none
1256  */
1257 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1258 		 uint8_t ip_summed)
1259 {
1260 	skb->ip_summed = ip_summed;
1261 }
1262 
1263 /**
1264  * __qdf_nbuf_get_priority() - return the priority value of the skb
1265  * @skb: Pointer to network buffer
1266  *
1267  * Return: skb priority
1268  */
1269 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1270 {
1271 	return skb->priority;
1272 }
1273 
1274 /**
1275  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1276  * @skb: Pointer to network buffer
1277  * @p: priority
1278  *
1279  * Return: none
1280  */
1281 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1282 {
1283 	skb->priority = p;
1284 }
1285 
1286 /**
1287  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1288  * @skb: Current skb
1289  * @next_skb: Next skb
1290  *
1291  * Return: void
1292  */
1293 static inline void
1294 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1295 {
1296 	skb->next = skb_next;
1297 }
1298 
1299 /**
1300  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1301  * @skb: Current skb
1302  *
1303  * Return: the next skb pointed to by the current skb
1304  */
1305 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1306 {
1307 	return skb->next;
1308 }
1309 
1310 /**
1311  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1312  * @skb: Current skb
1313  * @next_skb: Next skb
1314  *
1315  * This fn is used to link up extensions to the head skb. Does not handle
1316  * linking to the head
1317  *
1318  * Return: none
1319  */
1320 static inline void
1321 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1322 {
1323 	skb->next = skb_next;
1324 }
1325 
1326 /**
1327  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1328  * @skb: Current skb
1329  *
1330  * Return: the next skb pointed to by the current skb
1331  */
1332 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1333 {
1334 	return skb->next;
1335 }
1336 
1337 /**
1338  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1339  * @skb_head: head_buf nbuf holding head segment (single)
1340  * @ext_list: nbuf list holding linked extensions to the head
1341  * @ext_len: Total length of all buffers in the extension list
1342  *
1343  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1344  * to the nbuf holding the head segment (seg0)
1345  *
1346  * Return: none
1347  */
1348 static inline void
1349 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1350 			struct sk_buff *ext_list, size_t ext_len)
1351 {
1352 	skb_shinfo(skb_head)->frag_list = ext_list;
1353 	skb_head->data_len = ext_len;
1354 	skb_head->len += skb_head->data_len;
1355 }
1356 
1357 /**
1358  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1359  * @head_buf: Network buf holding head segment (single)
1360  *
1361  * This ext_list is populated when we have Jumbo packet, for example in case of
1362  * monitor mode amsdu packet reception, and are stiched using frags_list.
1363  *
1364  * Return: Network buf list holding linked extensions from head buf.
1365  */
1366 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1367 {
1368 	return (skb_shinfo(head_buf)->frag_list);
1369 }
1370 
1371 /**
1372  * __qdf_nbuf_get_age() - return the checksum value of the skb
1373  * @skb: Pointer to network buffer
1374  *
1375  * Return: checksum value
1376  */
1377 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1378 {
1379 	return skb->csum;
1380 }
1381 
1382 /**
1383  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1384  * @skb: Pointer to network buffer
1385  * @v: Value
1386  *
1387  * Return: none
1388  */
1389 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1390 {
1391 	skb->csum = v;
1392 }
1393 
1394 /**
1395  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1396  * @skb: Pointer to network buffer
1397  * @adj: Adjustment value
1398  *
1399  * Return: none
1400  */
1401 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1402 {
1403 	skb->csum -= adj;
1404 }
1405 
1406 /**
1407  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1408  * @skb: Pointer to network buffer
1409  * @offset: Offset value
1410  * @len: Length
1411  * @to: Destination pointer
1412  *
1413  * Return: length of the copy bits for skb
1414  */
1415 static inline int32_t
1416 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1417 {
1418 	return skb_copy_bits(skb, offset, to, len);
1419 }
1420 
1421 /**
1422  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1423  * @skb: Pointer to network buffer
1424  * @len:  Packet length
1425  *
1426  * Return: none
1427  */
1428 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1429 {
1430 	if (skb->len > len) {
1431 		skb_trim(skb, len);
1432 	} else {
1433 		if (skb_tailroom(skb) < len - skb->len) {
1434 			if (unlikely(pskb_expand_head(skb, 0,
1435 				len - skb->len - skb_tailroom(skb),
1436 				GFP_ATOMIC))) {
1437 				dev_kfree_skb_any(skb);
1438 				qdf_assert(0);
1439 			}
1440 		}
1441 		skb_put(skb, (len - skb->len));
1442 	}
1443 }
1444 
1445 /**
1446  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1447  * @skb: Pointer to network buffer
1448  * @protocol: Protocol type
1449  *
1450  * Return: none
1451  */
1452 static inline void
1453 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1454 {
1455 	skb->protocol = protocol;
1456 }
1457 
1458 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1459 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1460 
1461 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1462 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1463 
1464 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1465 				      uint32_t *lo, uint32_t *hi);
1466 
1467 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1468 	struct qdf_tso_info_t *tso_info);
1469 
1470 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1471 			  struct qdf_tso_seg_elem_t *tso_seg,
1472 			  bool is_last_seg);
1473 
1474 #ifdef FEATURE_TSO
1475 /**
1476  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1477  *                                    payload len
1478  * @skb: buffer
1479  *
1480  * Return: size
1481  */
1482 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1483 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1484 
1485 #else
1486 static inline
1487 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1488 {
1489 	return 0;
1490 }
1491 
1492 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1493 {
1494 	return 0;
1495 }
1496 
1497 #endif /* FEATURE_TSO */
1498 
1499 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1500 {
1501 	if (skb_is_gso(skb) &&
1502 		(skb_is_gso_v6(skb) ||
1503 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1504 		return true;
1505 	else
1506 		return false;
1507 }
1508 
1509 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1510 
1511 int __qdf_nbuf_get_users(struct sk_buff *skb);
1512 
1513 /**
1514  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1515  *			      and get hw_classify by peeking
1516  *			      into packet
1517  * @nbuf:		Network buffer (skb on Linux)
1518  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1519  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1520  *			needs to be set in case of CE classification support
1521  *			Is set by this macro.
1522  * @hw_classify:	This is a flag which is set to indicate
1523  *			CE classification is enabled.
1524  *			Do not set this bit for VLAN packets
1525  *			OR for mcast / bcast frames.
1526  *
1527  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1528  * whether to enable tx_classify bit in CE.
1529  *
1530  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1531  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1532  * it is the length and a 802.3 frame else it is Ethernet Type II
1533  * (RFC 894).
1534  * Bit 4 in pkt_subtype is the tx_classify bit
1535  *
1536  * Return:	void
1537  */
1538 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1539 				pkt_subtype, hw_classify)	\
1540 do {								\
1541 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1542 	uint16_t ether_type = ntohs(eh->h_proto);		\
1543 	bool is_mc_bc;						\
1544 								\
1545 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1546 		   is_multicast_ether_addr((uint8_t *)eh);	\
1547 								\
1548 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1549 		hw_classify = 1;				\
1550 		pkt_subtype = 0x01 <<				\
1551 			HTT_TX_CLASSIFY_BIT_S;			\
1552 	}							\
1553 								\
1554 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1555 		pkt_type = htt_pkt_type_ethernet;		\
1556 								\
1557 } while (0)
1558 
1559 /**
1560  * nbuf private buffer routines
1561  */
1562 
1563 /**
1564  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1565  * @skb: Pointer to network buffer
1566  * @addr: Pointer to store header's addr
1567  * @m_len: network buffer length
1568  *
1569  * Return: none
1570  */
1571 static inline void
1572 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1573 {
1574 	*addr = skb->data;
1575 	*len = skb->len;
1576 }
1577 
1578 /**
1579  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1580  * @head: Head pointer
1581  * @tail: Tail pointer
1582  * @qlen: Queue length
1583  */
1584 typedef struct __qdf_nbuf_qhead {
1585 	struct sk_buff *head;
1586 	struct sk_buff *tail;
1587 	unsigned int qlen;
1588 } __qdf_nbuf_queue_t;
1589 
1590 /******************Functions *************/
1591 
1592 /**
1593  * __qdf_nbuf_queue_init() - initiallize the queue head
1594  * @qhead: Queue head
1595  *
1596  * Return: QDF status
1597  */
1598 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1599 {
1600 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1601 	return QDF_STATUS_SUCCESS;
1602 }
1603 
1604 /**
1605  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1606  * @qhead: Queue head
1607  * @skb: Pointer to network buffer
1608  *
1609  * This is a lockless version, driver must acquire locks if it
1610  * needs to synchronize
1611  *
1612  * Return: none
1613  */
1614 static inline void
1615 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1616 {
1617 	skb->next = NULL;       /*Nullify the next ptr */
1618 
1619 	if (!qhead->head)
1620 		qhead->head = skb;
1621 	else
1622 		qhead->tail->next = skb;
1623 
1624 	qhead->tail = skb;
1625 	qhead->qlen++;
1626 }
1627 
1628 /**
1629  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1630  * @dest: target netbuf queue
1631  * @src:  source netbuf queue
1632  *
1633  * Return: target netbuf queue
1634  */
1635 static inline __qdf_nbuf_queue_t *
1636 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1637 {
1638 	if (!dest)
1639 		return NULL;
1640 	else if (!src || !(src->head))
1641 		return dest;
1642 
1643 	if (!(dest->head))
1644 		dest->head = src->head;
1645 	else
1646 		dest->tail->next = src->head;
1647 
1648 	dest->tail = src->tail;
1649 	dest->qlen += src->qlen;
1650 	return dest;
1651 }
1652 
1653 /**
1654  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1655  * @qhead: Queue head
1656  * @skb: Pointer to network buffer
1657  *
1658  * This is a lockless version, driver must acquire locks if it needs to
1659  * synchronize
1660  *
1661  * Return: none
1662  */
1663 static inline void
1664 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1665 {
1666 	if (!qhead->head) {
1667 		/*Empty queue Tail pointer Must be updated */
1668 		qhead->tail = skb;
1669 	}
1670 	skb->next = qhead->head;
1671 	qhead->head = skb;
1672 	qhead->qlen++;
1673 }
1674 
1675 /**
1676  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1677  * @qhead: Queue head
1678  *
1679  * This is a lockless version. Driver should take care of the locks
1680  *
1681  * Return: skb or NULL
1682  */
1683 static inline
1684 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1685 {
1686 	__qdf_nbuf_t tmp = NULL;
1687 
1688 	if (qhead->head) {
1689 		qhead->qlen--;
1690 		tmp = qhead->head;
1691 		if (qhead->head == qhead->tail) {
1692 			qhead->head = NULL;
1693 			qhead->tail = NULL;
1694 		} else {
1695 			qhead->head = tmp->next;
1696 		}
1697 		tmp->next = NULL;
1698 	}
1699 	return tmp;
1700 }
1701 
1702 /**
1703  * __qdf_nbuf_queue_free() - free a queue
1704  * @qhead: head of queue
1705  *
1706  * Return: QDF status
1707  */
1708 static inline QDF_STATUS
1709 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1710 {
1711 	__qdf_nbuf_t  buf = NULL;
1712 
1713 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1714 		__qdf_nbuf_free(buf);
1715 	return QDF_STATUS_SUCCESS;
1716 }
1717 
1718 
1719 /**
1720  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1721  * @qhead: head of queue
1722  *
1723  * Return: NULL if the queue is empty
1724  */
1725 static inline struct sk_buff *
1726 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1727 {
1728 	return qhead->head;
1729 }
1730 
1731 /**
1732  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1733  * @qhead: head of queue
1734  *
1735  * Return: NULL if the queue is empty
1736  */
1737 static inline struct sk_buff *
1738 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1739 {
1740 	return qhead->tail;
1741 }
1742 
1743 /**
1744  * __qdf_nbuf_queue_len() - return the queue length
1745  * @qhead: Queue head
1746  *
1747  * Return: Queue length
1748  */
1749 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1750 {
1751 	return qhead->qlen;
1752 }
1753 
1754 /**
1755  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1756  * @skb: Pointer to network buffer
1757  *
1758  * This API returns the next skb from packet chain, remember the skb is
1759  * still in the queue
1760  *
1761  * Return: NULL if no packets are there
1762  */
1763 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1764 {
1765 	return skb->next;
1766 }
1767 
1768 /**
1769  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1770  * @qhead: Queue head
1771  *
1772  * Return: true if length is 0 else false
1773  */
1774 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1775 {
1776 	return qhead->qlen == 0;
1777 }
1778 
1779 /*
1780  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1781  * Because the queue head will most likely put in some structure,
1782  * we don't use pointer type as the definition.
1783  */
1784 
1785 /*
1786  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1787  * Because the queue head will most likely put in some structure,
1788  * we don't use pointer type as the definition.
1789  */
1790 
1791 static inline void
1792 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1793 {
1794 }
1795 
1796 /**
1797  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1798  *        expands the headroom
1799  *        in the data region. In case of failure the skb is released.
1800  * @skb: sk buff
1801  * @headroom: size of headroom
1802  *
1803  * Return: skb or NULL
1804  */
1805 static inline struct sk_buff *
1806 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1807 {
1808 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1809 		dev_kfree_skb_any(skb);
1810 		skb = NULL;
1811 	}
1812 	return skb;
1813 }
1814 
1815 /**
1816  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1817  *        exapnds the tailroom
1818  *        in data region. In case of failure it releases the skb.
1819  * @skb: sk buff
1820  * @tailroom: size of tailroom
1821  *
1822  * Return: skb or NULL
1823  */
1824 static inline struct sk_buff *
1825 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1826 {
1827 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1828 		return skb;
1829 	/**
1830 	 * unlikely path
1831 	 */
1832 	dev_kfree_skb_any(skb);
1833 	return NULL;
1834 }
1835 
1836 /**
1837  * __qdf_nbuf_linearize() - skb linearize
1838  * @skb: sk buff
1839  *
1840  * create a version of the specified nbuf whose contents
1841  * can be safely modified without affecting other
1842  * users.If the nbuf is non-linear then this function
1843  * linearize. if unable to linearize returns -ENOMEM on
1844  * success 0 is returned
1845  *
1846  * Return: 0 on Success, -ENOMEM on failure is returned.
1847  */
1848 static inline int
1849 __qdf_nbuf_linearize(struct sk_buff *skb)
1850 {
1851 	return skb_linearize(skb);
1852 }
1853 
1854 /**
1855  * __qdf_nbuf_unshare() - skb unshare
1856  * @skb: sk buff
1857  *
1858  * create a version of the specified nbuf whose contents
1859  * can be safely modified without affecting other
1860  * users.If the nbuf is a clone then this function
1861  * creates a new copy of the data. If the buffer is not
1862  * a clone the original buffer is returned.
1863  *
1864  * Return: skb or NULL
1865  */
1866 static inline struct sk_buff *
1867 __qdf_nbuf_unshare(struct sk_buff *skb)
1868 {
1869 	return skb_unshare(skb, GFP_ATOMIC);
1870 }
1871 
1872 /**
1873  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1874  *@buf: sk buff
1875  *
1876  * Return: true/false
1877  */
1878 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1879 {
1880 	return skb_cloned(skb);
1881 }
1882 
1883 /**
1884  * __qdf_nbuf_pool_init() - init pool
1885  * @net: net handle
1886  *
1887  * Return: QDF status
1888  */
1889 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1890 {
1891 	return QDF_STATUS_SUCCESS;
1892 }
1893 
1894 /*
1895  * adf_nbuf_pool_delete() implementation - do nothing in linux
1896  */
1897 #define __qdf_nbuf_pool_delete(osdev)
1898 
1899 /**
1900  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1901  *        release the skb.
1902  * @skb: sk buff
1903  * @headroom: size of headroom
1904  * @tailroom: size of tailroom
1905  *
1906  * Return: skb or NULL
1907  */
1908 static inline struct sk_buff *
1909 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1910 {
1911 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1912 		return skb;
1913 
1914 	dev_kfree_skb_any(skb);
1915 	return NULL;
1916 }
1917 
1918 /**
1919  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1920  *
1921  * Return: true/false
1922  */
1923 static inline bool
1924 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1925 			 uint8_t **where)
1926 {
1927 	qdf_assert(0);
1928 	return false;
1929 }
1930 
1931 /**
1932  * __qdf_nbuf_reset_ctxt() - mem zero control block
1933  * @nbuf: buffer
1934  *
1935  * Return: none
1936  */
1937 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1938 {
1939 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1940 }
1941 
1942 /**
1943  * __qdf_nbuf_network_header() - get network header
1944  * @buf: buffer
1945  *
1946  * Return: network header pointer
1947  */
1948 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1949 {
1950 	return skb_network_header(buf);
1951 }
1952 
1953 /**
1954  * __qdf_nbuf_transport_header() - get transport header
1955  * @buf: buffer
1956  *
1957  * Return: transport header pointer
1958  */
1959 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1960 {
1961 	return skb_transport_header(buf);
1962 }
1963 
1964 /**
1965  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1966  *  passed as part of network buffer by network stack
1967  * @skb: sk buff
1968  *
1969  * Return: TCP MSS size
1970  *
1971  */
1972 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1973 {
1974 	return skb_shinfo(skb)->gso_size;
1975 }
1976 
1977 /**
1978  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1979  * @nbuf: sk buff
1980  *
1981  * Return: none
1982  */
1983 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1984 
1985 /*
1986  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1987  * @nbuf: sk buff
1988  *
1989  * Return: void ptr
1990  */
1991 static inline void *
1992 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1993 {
1994 	return (void *)nbuf->cb;
1995 }
1996 
1997 /**
1998  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1999  * @skb: sk buff
2000  *
2001  * Return: head size
2002  */
2003 static inline size_t
2004 __qdf_nbuf_headlen(struct sk_buff *skb)
2005 {
2006 	return skb_headlen(skb);
2007 }
2008 
2009 /**
2010  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
2011  * @skb: sk buff
2012  *
2013  * Return: number of fragments
2014  */
2015 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
2016 {
2017 	return skb_shinfo(skb)->nr_frags;
2018 }
2019 
2020 /**
2021  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2022  * @buf: sk buff
2023  *
2024  * Return: true/false
2025  */
2026 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2027 {
2028 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2029 }
2030 
2031 /**
2032  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2033  * @buf: sk buff
2034  *
2035  * Return: true/false
2036  */
2037 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2038 {
2039 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2040 }
2041 
2042 /**
2043  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2044  * @skb: sk buff
2045  *
2046  * Return: size of l2+l3+l4 header length
2047  */
2048 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2049 {
2050 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2051 }
2052 
2053 /**
2054  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2055  * @buf: sk buff
2056  *
2057  * Return:  true/false
2058  */
2059 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2060 {
2061 	if (skb_is_nonlinear(skb))
2062 		return true;
2063 	else
2064 		return false;
2065 }
2066 
2067 /**
2068  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2069  * @buf: sk buff
2070  *
2071  * Return: TCP sequence number
2072  */
2073 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2074 {
2075 	return ntohl(tcp_hdr(skb)->seq);
2076 }
2077 
2078 /**
2079  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2080  *@buf: sk buff
2081  *
2082  * Return: data pointer to typecast into your priv structure
2083  */
2084 static inline uint8_t *
2085 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2086 {
2087 	return &skb->cb[8];
2088 }
2089 
2090 /**
2091  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2092  * @buf: Pointer to nbuf
2093  *
2094  * Return: None
2095  */
2096 static inline void
2097 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2098 {
2099 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2100 }
2101 
2102 /**
2103  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2104  *
2105  * @buf: sk buff
2106  * @queue_id: Queue id
2107  *
2108  * Return: void
2109  */
2110 static inline void
2111 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2112 {
2113 	skb_record_rx_queue(skb, queue_id);
2114 }
2115 
2116 /**
2117  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2118  *
2119  * @buf: sk buff
2120  *
2121  * Return: Queue mapping
2122  */
2123 static inline uint16_t
2124 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2125 {
2126 	return skb->queue_mapping;
2127 }
2128 
2129 /**
2130  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2131  *
2132  * @buf: sk buff
2133  *
2134  * Return: void
2135  */
2136 static inline void
2137 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2138 {
2139 	__net_timestamp(skb);
2140 }
2141 
2142 /**
2143  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2144  *
2145  * @buf: sk buff
2146  *
2147  * Return: timestamp stored in skb in ms
2148  */
2149 static inline uint64_t
2150 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2151 {
2152 	return ktime_to_ms(skb_get_ktime(skb));
2153 }
2154 
2155 /**
2156  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2157  *
2158  * @buf: sk buff
2159  *
2160  * Return: time difference in ms
2161  */
2162 static inline uint64_t
2163 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2164 {
2165 	return ktime_to_ms(net_timedelta(skb->tstamp));
2166 }
2167 
2168 /**
2169  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2170  *
2171  * @buf: sk buff
2172  *
2173  * Return: time difference in micro seconds
2174  */
2175 static inline uint64_t
2176 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2177 {
2178 	return ktime_to_us(net_timedelta(skb->tstamp));
2179 }
2180 
2181 /**
2182  * __qdf_nbuf_orphan() - orphan a nbuf
2183  * @skb: sk buff
2184  *
2185  * If a buffer currently has an owner then we call the
2186  * owner's destructor function
2187  *
2188  * Return: void
2189  */
2190 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2191 {
2192 	return skb_orphan(skb);
2193 }
2194 
2195 static inline struct sk_buff *
2196 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2197 {
2198 	return skb_dequeue(skb_queue_head);
2199 }
2200 
2201 static inline
2202 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2203 {
2204 	return skb_queue_head->qlen;
2205 }
2206 
2207 static inline
2208 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2209 					struct sk_buff *skb)
2210 {
2211 	return skb_queue_tail(skb_queue_head, skb);
2212 }
2213 
2214 static inline
2215 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2216 {
2217 	return skb_queue_head_init(skb_queue_head);
2218 }
2219 
2220 static inline
2221 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2222 {
2223 	return skb_queue_purge(skb_queue_head);
2224 }
2225 
2226 /**
2227  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2228  * @head: skb list for which lock is to be acquired
2229  *
2230  * Return: void
2231  */
2232 static inline
2233 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2234 {
2235 	spin_lock_bh(&skb_queue_head->lock);
2236 }
2237 
2238 /**
2239  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2240  * @head: skb list for which lock is to be release
2241  *
2242  * Return: void
2243  */
2244 static inline
2245 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2246 {
2247 	spin_unlock_bh(&skb_queue_head->lock);
2248 }
2249 
2250 #ifdef CONFIG_NBUF_AP_PLATFORM
2251 #include <i_qdf_nbuf_w.h>
2252 #else
2253 #include <i_qdf_nbuf_m.h>
2254 #endif
2255 #endif /*_I_QDF_NET_BUF_H */
2256