xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *   6. "ipa.owned" bit must be first member in both TX and RX unions
95  *      for the priv_cb_m since it must be at same offset for both
96  *      TX and RX union.
97  *
98  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
99  *
100  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
101  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
102  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
103  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
104  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
105  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
106  *
107  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
108  * @rx.dev.priv_cb_m.flush_ind: flush indication
109  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
110  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
111  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
112  * @rx.dev.priv_cb_m.lro_ctx: LRO context
113  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
114  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
115  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
116  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
117  *
118  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
119  * @rx.tcp_proto: L4 protocol is TCP
120  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
121  * @rx.ipv6_proto: L3 protocol is IPV6
122  * @rx.ip_offset: offset to IP header
123  * @rx.tcp_offset: offset to TCP header
124  * @rx_ctx_id: Rx context id
125  * @num_elements_in_list: number of elements in the nbuf list
126  *
127  * @rx.tcp_udp_chksum: L4 payload checksum
128  * @rx.tcp_wim: TCP window size
129  *
130  * @rx.flow_id: 32bit flow id
131  *
132  * @rx.flag_chfrag_start: first MSDU in an AMSDU
133  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
134  * @rx.flag_chfrag_end: last MSDU in an AMSDU
135  * @rx.flag_retry: flag to indicate MSDU is retried
136  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
137  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
138  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
139  * @rx.flag_is_frag: flag to indicate skb has frag list
140  * @rx.rsrvd: reserved
141  *
142  * @rx.trace: combined structure for DP and protocol trace
143  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
144  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
145  * @rx.trace.dp_trace: flag (Datapath trace)
146  * @rx.trace.packet_track: RX_DATA packet
147  * @rx.trace.rsrvd: enable packet logging
148  *
149  * @rx.vdev_id: vdev_id for RX pkt
150  * @rx.is_raw_frame: RAW frame
151  * @rx.fcs_err: FCS error
152  * @rx.tid_val: tid value
153  * @rx.reserved: reserved
154  * @rx.ftype: mcast2ucast, TSO, SG, MESH
155  *
156  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
157  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
158  *
159  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
160  *                 + (1) CE classification enablement bit
161  *                 + (2) packet type (802.3 or Ethernet type II)
162  *                 + (3) packet offset (usually length of HTC/HTT descr)
163  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
164  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
165  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
166  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
167  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
168  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
169  * @tx.dev.priv_cb_m.reserved: reserved
170  *
171  * @tx.ftype: mcast2ucast, TSO, SG, MESH
172  * @tx.vdev_id: vdev (for protocol trace)
173  * @tx.len: length of efrag pointed by the above pointers
174  *
175  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
176  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
177  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
178  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
179  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
180  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
181  * @tx.flags.bits.flag_ext_header: extended flags
182  * @tx.flags.bits.reserved: reserved
183  * @tx.trace: combined structure for DP and protocol trace
184  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
185  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
186  * @tx.trace.is_packet_priv:
187  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
188  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
189  *                          + (MGMT_ACTION)] - 4 bits
190  * @tx.trace.dp_trace: flag (Datapath trace)
191  * @tx.trace.is_bcast: flag (Broadcast packet)
192  * @tx.trace.is_mcast: flag (Multicast packet)
193  * @tx.trace.packet_type: flag (Packet type)
194  * @tx.trace.htt2_frm: flag (high-latency path only)
195  * @tx.trace.print: enable packet logging
196  *
197  * @tx.vaddr: virtual address of ~
198  * @tx.paddr: physical/DMA address of ~
199  */
200 struct qdf_nbuf_cb {
201 	/* common */
202 	qdf_paddr_t paddr; /* of skb->data */
203 	/* valid only in one direction */
204 	union {
205 		/* Note: MAX: 40 bytes */
206 		struct {
207 			union {
208 				struct {
209 					void *ext_cb_ptr;
210 					void *fctx;
211 					uint16_t msdu_len;
212 					uint16_t peer_id;
213 					uint16_t protocol_tag;
214 					uint16_t flow_tag;
215 				} priv_cb_w;
216 				struct {
217 					/* ipa_owned bit is common between rx
218 					 * control block and tx control block.
219 					 * Do not change location of this bit.
220 					 */
221 					uint32_t ipa_owned:1,
222 						 peer_cached_buf_frm:1,
223 						 flush_ind:1,
224 						 packet_buf_pool:1,
225 						 reserved:12,
226 						 reserved1:16;
227 					uint32_t tcp_seq_num;
228 					uint32_t tcp_ack_num;
229 					union {
230 						struct {
231 							uint16_t msdu_len;
232 							uint16_t peer_id;
233 						} wifi3;
234 						struct {
235 							uint32_t map_index;
236 						} wifi2;
237 					} dp;
238 					unsigned char *lro_ctx;
239 				} priv_cb_m;
240 			} dev;
241 			uint32_t lro_eligible:1,
242 				tcp_proto:1,
243 				tcp_pure_ack:1,
244 				ipv6_proto:1,
245 				ip_offset:7,
246 				tcp_offset:7,
247 				rx_ctx_id:4,
248 				fcs_err:1,
249 				is_raw_frame:1,
250 				num_elements_in_list:8;
251 			uint32_t tcp_udp_chksum:16,
252 				 tcp_win:16;
253 			uint32_t flow_id;
254 			uint8_t flag_chfrag_start:1,
255 				flag_chfrag_cont:1,
256 				flag_chfrag_end:1,
257 				flag_retry:1,
258 				flag_da_mcbc:1,
259 				flag_da_valid:1,
260 				flag_sa_valid:1,
261 				flag_is_frag:1;
262 			union {
263 				uint8_t packet_state;
264 				uint8_t dp_trace:1,
265 					packet_track:4,
266 					rsrvd:3;
267 			} trace;
268 			uint16_t vdev_id:8,
269 				 tid_val:4,
270 				 ftype:4;
271 		} rx;
272 
273 		/* Note: MAX: 40 bytes */
274 		struct {
275 			union {
276 				struct {
277 					void *ext_cb_ptr;
278 					void *fctx;
279 				} priv_cb_w;
280 				struct {
281 					/* ipa_owned bit is common between rx
282 					 * control block and tx control block.
283 					 * Do not change location of this bit.
284 					 */
285 					struct {
286 						uint32_t owned:1,
287 							priv:31;
288 					} ipa;
289 					uint32_t data_attr;
290 					uint16_t desc_id;
291 					uint16_t mgmt_desc_id;
292 					struct {
293 						uint8_t bi_map:1,
294 							reserved:7;
295 					} dma_option;
296 					uint8_t reserved[3];
297 				} priv_cb_m;
298 			} dev;
299 			uint8_t ftype;
300 			uint8_t vdev_id;
301 			uint16_t len;
302 			union {
303 				struct {
304 					uint8_t flag_efrag:1,
305 						flag_nbuf:1,
306 						num:1,
307 						flag_chfrag_start:1,
308 						flag_chfrag_cont:1,
309 						flag_chfrag_end:1,
310 						flag_ext_header:1,
311 						flag_notify_comp:1;
312 				} bits;
313 				uint8_t u8;
314 			} flags;
315 			struct {
316 				uint8_t packet_state:7,
317 					is_packet_priv:1;
318 				uint8_t packet_track:4,
319 					proto_type:4;
320 				uint8_t dp_trace:1,
321 					is_bcast:1,
322 					is_mcast:1,
323 					packet_type:3,
324 					/* used only for hl*/
325 					htt2_frm:1,
326 					print:1;
327 			} trace;
328 			unsigned char *vaddr;
329 			qdf_paddr_t paddr;
330 		} tx;
331 	} u;
332 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
333 
334 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
335 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
336 
337 /**
338  *  access macros to qdf_nbuf_cb
339  *  Note: These macros can be used as L-values as well as R-values.
340  *        When used as R-values, they effectively function as "get" macros
341  *        When used as L_values, they effectively function as "set" macros
342  */
343 
344 #define QDF_NBUF_CB_PADDR(skb) \
345 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
346 
347 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
348 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
349 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
351 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
353 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
354 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
355 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
356 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
357 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
358 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
359 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
360 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
361 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
362 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
363 
364 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
365 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
366 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
367 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
368 
369 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
370 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
371 
372 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
373 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
374 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
375 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
376 
377 #define QDF_NBUF_CB_RX_FTYPE(skb) \
378 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
379 
380 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
381 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
382 
383 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
384 	(((struct qdf_nbuf_cb *) \
385 	((skb)->cb))->u.rx.flag_chfrag_start)
386 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
387 	(((struct qdf_nbuf_cb *) \
388 	((skb)->cb))->u.rx.flag_chfrag_cont)
389 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
390 		(((struct qdf_nbuf_cb *) \
391 		((skb)->cb))->u.rx.flag_chfrag_end)
392 
393 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
394 	(((struct qdf_nbuf_cb *) \
395 	((skb)->cb))->u.rx.flag_da_mcbc)
396 
397 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
398 	(((struct qdf_nbuf_cb *) \
399 	((skb)->cb))->u.rx.flag_da_valid)
400 
401 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
402 	(((struct qdf_nbuf_cb *) \
403 	((skb)->cb))->u.rx.flag_sa_valid)
404 
405 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
406 	(((struct qdf_nbuf_cb *) \
407 	((skb)->cb))->u.rx.flag_retry)
408 
409 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
410 	(((struct qdf_nbuf_cb *) \
411 	((skb)->cb))->u.rx.is_raw_frame)
412 
413 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
414 	(((struct qdf_nbuf_cb *) \
415 	((skb)->cb))->u.rx.tid_val)
416 
417 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
418 	(((struct qdf_nbuf_cb *) \
419 	((skb)->cb))->u.rx.flag_is_frag)
420 
421 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
422 	(((struct qdf_nbuf_cb *) \
423 	((skb)->cb))->u.rx.fcs_err)
424 
425 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
426 	qdf_nbuf_set_state(skb, PACKET_STATE)
427 
428 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
429 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
430 
431 #define QDF_NBUF_CB_TX_FTYPE(skb) \
432 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
433 
434 
435 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
436 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
437 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
438 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
439 
440 /* Tx Flags Accessor Macros*/
441 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
442 	(((struct qdf_nbuf_cb *) \
443 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
444 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
445 	(((struct qdf_nbuf_cb *) \
446 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
447 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
448 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
449 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
450 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
451 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
452 	(((struct qdf_nbuf_cb *) \
453 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
454 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
455 	(((struct qdf_nbuf_cb *) \
456 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
457 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
458 		(((struct qdf_nbuf_cb *) \
459 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
460 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
461 		(((struct qdf_nbuf_cb *) \
462 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
463 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
464 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
465 /* End of Tx Flags Accessor Macros */
466 
467 /* Tx trace accessor macros */
468 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
469 	(((struct qdf_nbuf_cb *) \
470 		((skb)->cb))->u.tx.trace.packet_state)
471 
472 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
473 	(((struct qdf_nbuf_cb *) \
474 		((skb)->cb))->u.tx.trace.is_packet_priv)
475 
476 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
477 	(((struct qdf_nbuf_cb *) \
478 		((skb)->cb))->u.tx.trace.packet_track)
479 
480 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
481 		(((struct qdf_nbuf_cb *) \
482 			((skb)->cb))->u.rx.trace.packet_track)
483 
484 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
485 	(((struct qdf_nbuf_cb *) \
486 		((skb)->cb))->u.tx.trace.proto_type)
487 
488 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
489 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
490 
491 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
492 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
493 
494 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
495 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
496 
497 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
498 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
499 
500 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
501 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
502 
503 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
504 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
505 
506 #define QDF_NBUF_CB_SET_BCAST(skb) \
507 	(((struct qdf_nbuf_cb *) \
508 		((skb)->cb))->u.tx.trace.is_bcast = true)
509 
510 #define QDF_NBUF_CB_SET_MCAST(skb) \
511 	(((struct qdf_nbuf_cb *) \
512 		((skb)->cb))->u.tx.trace.is_mcast = true)
513 /* End of Tx trace accessor macros */
514 
515 
516 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
517 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
518 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
519 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
520 
521 /* assume the OS provides a single fragment */
522 #define __qdf_nbuf_get_num_frags(skb)		   \
523 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
524 
525 #define __qdf_nbuf_reset_num_frags(skb) \
526 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
527 
528 /**
529  *   end of nbuf->cb access macros
530  */
531 
532 typedef void (*qdf_nbuf_trace_update_t)(char *);
533 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
534 
535 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
536 
537 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
538 	(QDF_NBUF_CB_PADDR(skb) = paddr)
539 
540 #define __qdf_nbuf_frag_push_head(					\
541 	skb, frag_len, frag_vaddr, frag_paddr)				\
542 	do {					\
543 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
544 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
545 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
546 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
547 	} while (0)
548 
549 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
550 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
551 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
552 
553 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
554 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
555 
556 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
557 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
558 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
559 	 /* assume that the OS only provides a single fragment */	\
560 	 QDF_NBUF_CB_PADDR(skb))
561 
562 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
563 
564 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
565 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
566 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
567 
568 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
569 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
570 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
571 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
572 
573 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
574 	do {								\
575 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
576 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
577 		if (frag_num)						\
578 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
579 							      is_wstrm; \
580 		else					\
581 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
582 							      is_wstrm; \
583 	} while (0)
584 
585 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
586 	do { \
587 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
588 	} while (0)
589 
590 #define __qdf_nbuf_get_vdev_ctx(skb) \
591 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
592 
593 #define __qdf_nbuf_set_tx_ftype(skb, type) \
594 	do { \
595 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
596 	} while (0)
597 
598 #define __qdf_nbuf_get_tx_ftype(skb) \
599 		 QDF_NBUF_CB_TX_FTYPE((skb))
600 
601 
602 #define __qdf_nbuf_set_rx_ftype(skb, type) \
603 	do { \
604 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
605 	} while (0)
606 
607 #define __qdf_nbuf_get_rx_ftype(skb) \
608 		 QDF_NBUF_CB_RX_FTYPE((skb))
609 
610 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
611 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
612 
613 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
614 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
615 
616 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
617 	do { \
618 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
619 	} while (0)
620 
621 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
622 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
623 
624 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
625 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
626 
627 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
628 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
629 
630 #define __qdf_nbuf_set_da_mcbc(skb, val) \
631 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
632 
633 #define __qdf_nbuf_is_da_mcbc(skb) \
634 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
635 
636 #define __qdf_nbuf_set_da_valid(skb, val) \
637 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
638 
639 #define __qdf_nbuf_is_da_valid(skb) \
640 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
641 
642 #define __qdf_nbuf_set_sa_valid(skb, val) \
643 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
644 
645 #define __qdf_nbuf_is_sa_valid(skb) \
646 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
647 
648 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
649 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
650 
651 #define __qdf_nbuf_is_rx_retry_flag(skb) \
652 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
653 
654 #define __qdf_nbuf_set_raw_frame(skb, val) \
655 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
656 
657 #define __qdf_nbuf_is_raw_frame(skb) \
658 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
659 
660 #define __qdf_nbuf_get_tid_val(skb) \
661 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
662 
663 #define __qdf_nbuf_set_tid_val(skb, val) \
664 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
665 
666 #define __qdf_nbuf_set_is_frag(skb, val) \
667 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
668 
669 #define __qdf_nbuf_is_frag(skb) \
670 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
671 
672 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
673 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
674 
675 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
676 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
677 
678 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
679 	do { \
680 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
681 	} while (0)
682 
683 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
684 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
685 
686 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
687 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
688 
689 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
690 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
691 
692 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
693 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
694 
695 #define __qdf_nbuf_trace_get_proto_type(skb) \
696 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
697 
698 #define __qdf_nbuf_data_attr_get(skb)		\
699 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
700 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
701 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
702 
703 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
704 		skb_queue_walk_safe(queue, var, tvar)
705 
706 /**
707  * __qdf_nbuf_num_frags_init() - init extra frags
708  * @skb: sk buffer
709  *
710  * Return: none
711  */
712 static inline
713 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
714 {
715 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
716 }
717 
718 /*
719  * prototypes. Implemented in qdf_nbuf.c
720  */
721 
722 /**
723  * __qdf_nbuf_alloc() - Allocate nbuf
724  * @osdev: Device handle
725  * @size: Netbuf requested size
726  * @reserve: headroom to start with
727  * @align: Align
728  * @prio: Priority
729  * @func: Function name of the call site
730  * @line: line number of the call site
731  *
732  * This allocates an nbuf aligns if needed and reserves some space in the front,
733  * since the reserve is done after alignment the reserve value if being
734  * unaligned will result in an unaligned address.
735  *
736  * Return: nbuf or %NULL if no memory
737  */
738 __qdf_nbuf_t
739 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
740 		 int prio, const char *func, uint32_t line);
741 
742 void __qdf_nbuf_free(struct sk_buff *skb);
743 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
744 			struct sk_buff *skb, qdf_dma_dir_t dir);
745 void __qdf_nbuf_unmap(__qdf_device_t osdev,
746 			struct sk_buff *skb, qdf_dma_dir_t dir);
747 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
748 				 struct sk_buff *skb, qdf_dma_dir_t dir);
749 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
750 			struct sk_buff *skb, qdf_dma_dir_t dir);
751 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
752 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
753 
754 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
755 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
756 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
757 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
758 	qdf_dma_dir_t dir, int nbytes);
759 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
760 	qdf_dma_dir_t dir, int nbytes);
761 
762 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
763 	qdf_dma_dir_t dir);
764 
765 QDF_STATUS __qdf_nbuf_map_nbytes_single(
766 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
767 void __qdf_nbuf_unmap_nbytes_single(
768 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
769 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
770 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
771 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
772 QDF_STATUS __qdf_nbuf_frag_map(
773 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
774 	int offset, qdf_dma_dir_t dir, int cur_frag);
775 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
776 
777 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
778 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
779 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
780 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
781 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
782 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
783 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
784 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
785 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
786 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
787 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
788 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
789 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
790 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
791 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
792 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
793 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
794 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
795 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
796 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
797 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
798 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
799 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
800 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
801 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
802 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
803 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
804 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
805 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
806 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
807 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
808 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
809 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
810 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
811 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
812 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
813 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
814 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
815 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
816 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
817 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
818 
819 #ifdef QDF_NBUF_GLOBAL_COUNT
820 int __qdf_nbuf_count_get(void);
821 void __qdf_nbuf_count_inc(struct sk_buff *skb);
822 void __qdf_nbuf_count_dec(struct sk_buff *skb);
823 void __qdf_nbuf_mod_init(void);
824 void __qdf_nbuf_mod_exit(void);
825 
826 #else
827 
828 static inline int __qdf_nbuf_count_get(void)
829 {
830 	return 0;
831 }
832 
833 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
834 {
835 	return;
836 }
837 
838 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
839 {
840 	return;
841 }
842 
843 static inline void __qdf_nbuf_mod_init(void)
844 {
845 	return;
846 }
847 
848 static inline void __qdf_nbuf_mod_exit(void)
849 {
850 	return;
851 }
852 #endif
853 
854 /**
855  * __qdf_to_status() - OS to QDF status conversion
856  * @error : OS error
857  *
858  * Return: QDF status
859  */
860 static inline QDF_STATUS __qdf_to_status(signed int error)
861 {
862 	switch (error) {
863 	case 0:
864 		return QDF_STATUS_SUCCESS;
865 	case ENOMEM:
866 	case -ENOMEM:
867 		return QDF_STATUS_E_NOMEM;
868 	default:
869 		return QDF_STATUS_E_NOSUPPORT;
870 	}
871 }
872 
873 /**
874  * __qdf_nbuf_len() - return the amount of valid data in the skb
875  * @skb: Pointer to network buffer
876  *
877  * This API returns the amount of valid data in the skb, If there are frags
878  * then it returns total length.
879  *
880  * Return: network buffer length
881  */
882 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
883 {
884 	int i, extra_frag_len = 0;
885 
886 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
887 	if (i > 0)
888 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
889 
890 	return extra_frag_len + skb->len;
891 }
892 
893 /**
894  * __qdf_nbuf_cat() - link two nbufs
895  * @dst: Buffer to piggyback into
896  * @src: Buffer to put
897  *
898  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
899  * It is callers responsibility to free the src skb.
900  *
901  * Return: QDF_STATUS (status of the call) if failed the src skb
902  *         is released
903  */
904 static inline QDF_STATUS
905 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
906 {
907 	QDF_STATUS error = 0;
908 
909 	qdf_assert(dst && src);
910 
911 	/*
912 	 * Since pskb_expand_head unconditionally reallocates the skb->head
913 	 * buffer, first check whether the current buffer is already large
914 	 * enough.
915 	 */
916 	if (skb_tailroom(dst) < src->len) {
917 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
918 		if (error)
919 			return __qdf_to_status(error);
920 	}
921 
922 	memcpy(skb_tail_pointer(dst), src->data, src->len);
923 	skb_put(dst, src->len);
924 	return __qdf_to_status(error);
925 }
926 
927 /*
928  * nbuf manipulation routines
929  */
930 /**
931  * __qdf_nbuf_headroom() - return the amount of tail space available
932  * @buf: Pointer to network buffer
933  *
934  * Return: amount of tail room
935  */
936 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
937 {
938 	return skb_headroom(skb);
939 }
940 
941 /**
942  * __qdf_nbuf_tailroom() - return the amount of tail space available
943  * @buf: Pointer to network buffer
944  *
945  * Return: amount of tail room
946  */
947 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
948 {
949 	return skb_tailroom(skb);
950 }
951 
952 /**
953  * __qdf_nbuf_put_tail() - Puts data in the end
954  * @skb: Pointer to network buffer
955  * @size: size to be pushed
956  *
957  * Return: data pointer of this buf where new data has to be
958  *         put, or NULL if there is not enough room in this buf.
959  */
960 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
961 {
962 	if (skb_tailroom(skb) < size) {
963 		if (unlikely(pskb_expand_head(skb, 0,
964 			size - skb_tailroom(skb), GFP_ATOMIC))) {
965 			dev_kfree_skb_any(skb);
966 			return NULL;
967 		}
968 	}
969 	return skb_put(skb, size);
970 }
971 
972 /**
973  * __qdf_nbuf_trim_tail() - trim data out from the end
974  * @skb: Pointer to network buffer
975  * @size: size to be popped
976  *
977  * Return: none
978  */
979 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
980 {
981 	return skb_trim(skb, skb->len - size);
982 }
983 
984 
985 /*
986  * prototypes. Implemented in qdf_nbuf.c
987  */
988 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
989 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
990 				qdf_nbuf_rx_cksum_t *cksum);
991 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
992 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
993 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
994 void __qdf_nbuf_ref(struct sk_buff *skb);
995 int __qdf_nbuf_shared(struct sk_buff *skb);
996 
997 /*
998  * qdf_nbuf_pool_delete() implementation - do nothing in linux
999  */
1000 #define __qdf_nbuf_pool_delete(osdev)
1001 
1002 /**
1003  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
1004  * @skb: Pointer to network buffer
1005  *
1006  * if GFP_ATOMIC is overkill then we can check whether its
1007  * called from interrupt context and then do it or else in
1008  * normal case use GFP_KERNEL
1009  *
1010  * example     use "in_irq() || irqs_disabled()"
1011  *
1012  * Return: cloned skb
1013  */
1014 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
1015 {
1016 	struct sk_buff *skb_new = NULL;
1017 
1018 	skb_new = skb_clone(skb, GFP_ATOMIC);
1019 	if (skb_new)
1020 		__qdf_nbuf_count_inc(skb_new);
1021 
1022 	return skb_new;
1023 }
1024 
1025 /**
1026  * __qdf_nbuf_copy() - returns a private copy of the skb
1027  * @skb: Pointer to network buffer
1028  *
1029  * This API returns a private copy of the skb, the skb returned is completely
1030  *  modifiable by callers
1031  *
1032  * Return: skb or NULL
1033  */
1034 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1035 {
1036 	struct sk_buff *skb_new = NULL;
1037 
1038 	skb_new = skb_copy(skb, GFP_ATOMIC);
1039 	if (skb_new)
1040 		__qdf_nbuf_count_inc(skb_new);
1041 
1042 	return skb_new;
1043 }
1044 
1045 #define __qdf_nbuf_reserve      skb_reserve
1046 
1047 /**
1048  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1049  * @skb: Pointer to network buffer
1050  * @data: data pointer
1051  *
1052  * Return: none
1053  */
1054 static inline void
1055 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1056 {
1057 	skb->data = data;
1058 }
1059 
1060 /**
1061  * __qdf_nbuf_set_len() - set buffer data length
1062  * @skb: Pointer to network buffer
1063  * @len: data length
1064  *
1065  * Return: none
1066  */
1067 static inline void
1068 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1069 {
1070 	skb->len = len;
1071 }
1072 
1073 /**
1074  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1075  * @skb: Pointer to network buffer
1076  * @len: skb data length
1077  *
1078  * Return: none
1079  */
1080 static inline void
1081 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1082 {
1083 	skb_set_tail_pointer(skb, len);
1084 }
1085 
1086 /**
1087  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1088  * @skb: Pointer to network buffer
1089  * @list: list to use
1090  *
1091  * This is a lockless version, driver must acquire locks if it
1092  * needs to synchronize
1093  *
1094  * Return: none
1095  */
1096 static inline void
1097 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1098 {
1099 	__skb_unlink(skb, list);
1100 }
1101 
1102 /**
1103  * __qdf_nbuf_reset() - reset the buffer data and pointer
1104  * @buf: Network buf instance
1105  * @reserve: reserve
1106  * @align: align
1107  *
1108  * Return: none
1109  */
1110 static inline void
1111 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1112 {
1113 	int offset;
1114 
1115 	skb_push(skb, skb_headroom(skb));
1116 	skb_put(skb, skb_tailroom(skb));
1117 	memset(skb->data, 0x0, skb->len);
1118 	skb_trim(skb, 0);
1119 	skb_reserve(skb, NET_SKB_PAD);
1120 	memset(skb->cb, 0x0, sizeof(skb->cb));
1121 
1122 	/*
1123 	 * The default is for netbuf fragments to be interpreted
1124 	 * as wordstreams rather than bytestreams.
1125 	 */
1126 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1127 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1128 
1129 	/*
1130 	 * Align & make sure that the tail & data are adjusted properly
1131 	 */
1132 
1133 	if (align) {
1134 		offset = ((unsigned long)skb->data) % align;
1135 		if (offset)
1136 			skb_reserve(skb, align - offset);
1137 	}
1138 
1139 	skb_reserve(skb, reserve);
1140 }
1141 
1142 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1143 /**
1144  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1145  *                                       in kernel
1146  *
1147  * Return: true if dev_scratch is supported
1148  *         false if dev_scratch is not supported
1149  */
1150 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1151 {
1152 	return true;
1153 }
1154 
1155 /**
1156  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1157  * @skb: Pointer to network buffer
1158  *
1159  * Return: dev_scratch if dev_scratch supported
1160  *         0 if dev_scratch not supported
1161  */
1162 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1163 {
1164 	return skb->dev_scratch;
1165 }
1166 
1167 /**
1168  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1169  * @skb: Pointer to network buffer
1170  * @value: value to be set in dev_scratch of network buffer
1171  *
1172  * Return: void
1173  */
1174 static inline void
1175 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1176 {
1177 	skb->dev_scratch = value;
1178 }
1179 #else
1180 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1181 {
1182 	return false;
1183 }
1184 
1185 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1186 {
1187 	return 0;
1188 }
1189 
1190 static inline void
1191 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1192 {
1193 }
1194 #endif /* KERNEL_VERSION(4, 14, 0) */
1195 
1196 /**
1197  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1198  * @skb: Pointer to network buffer
1199  *
1200  * Return: Pointer to head buffer
1201  */
1202 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1203 {
1204 	return skb->head;
1205 }
1206 
1207 /**
1208  * __qdf_nbuf_data() - return the pointer to data header in the skb
1209  * @skb: Pointer to network buffer
1210  *
1211  * Return: Pointer to skb data
1212  */
1213 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1214 {
1215 	return skb->data;
1216 }
1217 
1218 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1219 {
1220 	return (uint8_t *)&skb->data;
1221 }
1222 
1223 /**
1224  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1225  * @skb: Pointer to network buffer
1226  *
1227  * Return: skb protocol
1228  */
1229 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1230 {
1231 	return skb->protocol;
1232 }
1233 
1234 /**
1235  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1236  * @skb: Pointer to network buffer
1237  *
1238  * Return: skb ip_summed
1239  */
1240 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1241 {
1242 	return skb->ip_summed;
1243 }
1244 
1245 /**
1246  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1247  * @skb: Pointer to network buffer
1248  * @ip_summed: ip checksum
1249  *
1250  * Return: none
1251  */
1252 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1253 		 uint8_t ip_summed)
1254 {
1255 	skb->ip_summed = ip_summed;
1256 }
1257 
1258 /**
1259  * __qdf_nbuf_get_priority() - return the priority value of the skb
1260  * @skb: Pointer to network buffer
1261  *
1262  * Return: skb priority
1263  */
1264 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1265 {
1266 	return skb->priority;
1267 }
1268 
1269 /**
1270  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1271  * @skb: Pointer to network buffer
1272  * @p: priority
1273  *
1274  * Return: none
1275  */
1276 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1277 {
1278 	skb->priority = p;
1279 }
1280 
1281 /**
1282  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1283  * @skb: Current skb
1284  * @next_skb: Next skb
1285  *
1286  * Return: void
1287  */
1288 static inline void
1289 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1290 {
1291 	skb->next = skb_next;
1292 }
1293 
1294 /**
1295  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1296  * @skb: Current skb
1297  *
1298  * Return: the next skb pointed to by the current skb
1299  */
1300 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1301 {
1302 	return skb->next;
1303 }
1304 
1305 /**
1306  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1307  * @skb: Current skb
1308  * @next_skb: Next skb
1309  *
1310  * This fn is used to link up extensions to the head skb. Does not handle
1311  * linking to the head
1312  *
1313  * Return: none
1314  */
1315 static inline void
1316 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1317 {
1318 	skb->next = skb_next;
1319 }
1320 
1321 /**
1322  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1323  * @skb: Current skb
1324  *
1325  * Return: the next skb pointed to by the current skb
1326  */
1327 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1328 {
1329 	return skb->next;
1330 }
1331 
1332 /**
1333  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1334  * @skb_head: head_buf nbuf holding head segment (single)
1335  * @ext_list: nbuf list holding linked extensions to the head
1336  * @ext_len: Total length of all buffers in the extension list
1337  *
1338  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1339  * to the nbuf holding the head segment (seg0)
1340  *
1341  * Return: none
1342  */
1343 static inline void
1344 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1345 			struct sk_buff *ext_list, size_t ext_len)
1346 {
1347 	skb_shinfo(skb_head)->frag_list = ext_list;
1348 	skb_head->data_len = ext_len;
1349 	skb_head->len += skb_head->data_len;
1350 }
1351 
1352 /**
1353  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1354  * @head_buf: Network buf holding head segment (single)
1355  *
1356  * This ext_list is populated when we have Jumbo packet, for example in case of
1357  * monitor mode amsdu packet reception, and are stiched using frags_list.
1358  *
1359  * Return: Network buf list holding linked extensions from head buf.
1360  */
1361 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1362 {
1363 	return (skb_shinfo(head_buf)->frag_list);
1364 }
1365 
1366 /**
1367  * __qdf_nbuf_get_age() - return the checksum value of the skb
1368  * @skb: Pointer to network buffer
1369  *
1370  * Return: checksum value
1371  */
1372 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1373 {
1374 	return skb->csum;
1375 }
1376 
1377 /**
1378  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1379  * @skb: Pointer to network buffer
1380  * @v: Value
1381  *
1382  * Return: none
1383  */
1384 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1385 {
1386 	skb->csum = v;
1387 }
1388 
1389 /**
1390  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1391  * @skb: Pointer to network buffer
1392  * @adj: Adjustment value
1393  *
1394  * Return: none
1395  */
1396 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1397 {
1398 	skb->csum -= adj;
1399 }
1400 
1401 /**
1402  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1403  * @skb: Pointer to network buffer
1404  * @offset: Offset value
1405  * @len: Length
1406  * @to: Destination pointer
1407  *
1408  * Return: length of the copy bits for skb
1409  */
1410 static inline int32_t
1411 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1412 {
1413 	return skb_copy_bits(skb, offset, to, len);
1414 }
1415 
1416 /**
1417  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1418  * @skb: Pointer to network buffer
1419  * @len:  Packet length
1420  *
1421  * Return: none
1422  */
1423 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1424 {
1425 	if (skb->len > len) {
1426 		skb_trim(skb, len);
1427 	} else {
1428 		if (skb_tailroom(skb) < len - skb->len) {
1429 			if (unlikely(pskb_expand_head(skb, 0,
1430 				len - skb->len - skb_tailroom(skb),
1431 				GFP_ATOMIC))) {
1432 				dev_kfree_skb_any(skb);
1433 				qdf_assert(0);
1434 			}
1435 		}
1436 		skb_put(skb, (len - skb->len));
1437 	}
1438 }
1439 
1440 /**
1441  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1442  * @skb: Pointer to network buffer
1443  * @protocol: Protocol type
1444  *
1445  * Return: none
1446  */
1447 static inline void
1448 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1449 {
1450 	skb->protocol = protocol;
1451 }
1452 
1453 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1454 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1455 
1456 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1457 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1458 
1459 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1460 				      uint32_t *lo, uint32_t *hi);
1461 
1462 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1463 	struct qdf_tso_info_t *tso_info);
1464 
1465 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1466 			  struct qdf_tso_seg_elem_t *tso_seg,
1467 			  bool is_last_seg);
1468 
1469 #ifdef FEATURE_TSO
1470 /**
1471  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1472  *                                    payload len
1473  * @skb: buffer
1474  *
1475  * Return: size
1476  */
1477 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1478 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1479 
1480 #else
1481 static inline
1482 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1483 {
1484 	return 0;
1485 }
1486 
1487 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1488 {
1489 	return 0;
1490 }
1491 
1492 #endif /* FEATURE_TSO */
1493 
1494 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1495 {
1496 	if (skb_is_gso(skb) &&
1497 		(skb_is_gso_v6(skb) ||
1498 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1499 		return true;
1500 	else
1501 		return false;
1502 }
1503 
1504 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1505 
1506 int __qdf_nbuf_get_users(struct sk_buff *skb);
1507 
1508 /**
1509  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1510  *			      and get hw_classify by peeking
1511  *			      into packet
1512  * @nbuf:		Network buffer (skb on Linux)
1513  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1514  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1515  *			needs to be set in case of CE classification support
1516  *			Is set by this macro.
1517  * @hw_classify:	This is a flag which is set to indicate
1518  *			CE classification is enabled.
1519  *			Do not set this bit for VLAN packets
1520  *			OR for mcast / bcast frames.
1521  *
1522  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1523  * whether to enable tx_classify bit in CE.
1524  *
1525  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1526  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1527  * it is the length and a 802.3 frame else it is Ethernet Type II
1528  * (RFC 894).
1529  * Bit 4 in pkt_subtype is the tx_classify bit
1530  *
1531  * Return:	void
1532  */
1533 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1534 				pkt_subtype, hw_classify)	\
1535 do {								\
1536 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1537 	uint16_t ether_type = ntohs(eh->h_proto);		\
1538 	bool is_mc_bc;						\
1539 								\
1540 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1541 		   is_multicast_ether_addr((uint8_t *)eh);	\
1542 								\
1543 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1544 		hw_classify = 1;				\
1545 		pkt_subtype = 0x01 <<				\
1546 			HTT_TX_CLASSIFY_BIT_S;			\
1547 	}							\
1548 								\
1549 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1550 		pkt_type = htt_pkt_type_ethernet;		\
1551 								\
1552 } while (0)
1553 
1554 /**
1555  * nbuf private buffer routines
1556  */
1557 
1558 /**
1559  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1560  * @skb: Pointer to network buffer
1561  * @addr: Pointer to store header's addr
1562  * @m_len: network buffer length
1563  *
1564  * Return: none
1565  */
1566 static inline void
1567 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1568 {
1569 	*addr = skb->data;
1570 	*len = skb->len;
1571 }
1572 
1573 /**
1574  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1575  * @head: Head pointer
1576  * @tail: Tail pointer
1577  * @qlen: Queue length
1578  */
1579 typedef struct __qdf_nbuf_qhead {
1580 	struct sk_buff *head;
1581 	struct sk_buff *tail;
1582 	unsigned int qlen;
1583 } __qdf_nbuf_queue_t;
1584 
1585 /******************Functions *************/
1586 
1587 /**
1588  * __qdf_nbuf_queue_init() - initiallize the queue head
1589  * @qhead: Queue head
1590  *
1591  * Return: QDF status
1592  */
1593 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1594 {
1595 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1596 	return QDF_STATUS_SUCCESS;
1597 }
1598 
1599 /**
1600  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1601  * @qhead: Queue head
1602  * @skb: Pointer to network buffer
1603  *
1604  * This is a lockless version, driver must acquire locks if it
1605  * needs to synchronize
1606  *
1607  * Return: none
1608  */
1609 static inline void
1610 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1611 {
1612 	skb->next = NULL;       /*Nullify the next ptr */
1613 
1614 	if (!qhead->head)
1615 		qhead->head = skb;
1616 	else
1617 		qhead->tail->next = skb;
1618 
1619 	qhead->tail = skb;
1620 	qhead->qlen++;
1621 }
1622 
1623 /**
1624  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1625  * @dest: target netbuf queue
1626  * @src:  source netbuf queue
1627  *
1628  * Return: target netbuf queue
1629  */
1630 static inline __qdf_nbuf_queue_t *
1631 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1632 {
1633 	if (!dest)
1634 		return NULL;
1635 	else if (!src || !(src->head))
1636 		return dest;
1637 
1638 	if (!(dest->head))
1639 		dest->head = src->head;
1640 	else
1641 		dest->tail->next = src->head;
1642 
1643 	dest->tail = src->tail;
1644 	dest->qlen += src->qlen;
1645 	return dest;
1646 }
1647 
1648 /**
1649  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1650  * @qhead: Queue head
1651  * @skb: Pointer to network buffer
1652  *
1653  * This is a lockless version, driver must acquire locks if it needs to
1654  * synchronize
1655  *
1656  * Return: none
1657  */
1658 static inline void
1659 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1660 {
1661 	if (!qhead->head) {
1662 		/*Empty queue Tail pointer Must be updated */
1663 		qhead->tail = skb;
1664 	}
1665 	skb->next = qhead->head;
1666 	qhead->head = skb;
1667 	qhead->qlen++;
1668 }
1669 
1670 /**
1671  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1672  * @qhead: Queue head
1673  *
1674  * This is a lockless version. Driver should take care of the locks
1675  *
1676  * Return: skb or NULL
1677  */
1678 static inline
1679 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1680 {
1681 	__qdf_nbuf_t tmp = NULL;
1682 
1683 	if (qhead->head) {
1684 		qhead->qlen--;
1685 		tmp = qhead->head;
1686 		if (qhead->head == qhead->tail) {
1687 			qhead->head = NULL;
1688 			qhead->tail = NULL;
1689 		} else {
1690 			qhead->head = tmp->next;
1691 		}
1692 		tmp->next = NULL;
1693 	}
1694 	return tmp;
1695 }
1696 
1697 /**
1698  * __qdf_nbuf_queue_free() - free a queue
1699  * @qhead: head of queue
1700  *
1701  * Return: QDF status
1702  */
1703 static inline QDF_STATUS
1704 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1705 {
1706 	__qdf_nbuf_t  buf = NULL;
1707 
1708 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1709 		__qdf_nbuf_free(buf);
1710 	return QDF_STATUS_SUCCESS;
1711 }
1712 
1713 
1714 /**
1715  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1716  * @qhead: head of queue
1717  *
1718  * Return: NULL if the queue is empty
1719  */
1720 static inline struct sk_buff *
1721 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1722 {
1723 	return qhead->head;
1724 }
1725 
1726 /**
1727  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1728  * @qhead: head of queue
1729  *
1730  * Return: NULL if the queue is empty
1731  */
1732 static inline struct sk_buff *
1733 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1734 {
1735 	return qhead->tail;
1736 }
1737 
1738 /**
1739  * __qdf_nbuf_queue_len() - return the queue length
1740  * @qhead: Queue head
1741  *
1742  * Return: Queue length
1743  */
1744 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1745 {
1746 	return qhead->qlen;
1747 }
1748 
1749 /**
1750  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1751  * @skb: Pointer to network buffer
1752  *
1753  * This API returns the next skb from packet chain, remember the skb is
1754  * still in the queue
1755  *
1756  * Return: NULL if no packets are there
1757  */
1758 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1759 {
1760 	return skb->next;
1761 }
1762 
1763 /**
1764  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1765  * @qhead: Queue head
1766  *
1767  * Return: true if length is 0 else false
1768  */
1769 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1770 {
1771 	return qhead->qlen == 0;
1772 }
1773 
1774 /*
1775  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1776  * Because the queue head will most likely put in some structure,
1777  * we don't use pointer type as the definition.
1778  */
1779 
1780 /*
1781  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1782  * Because the queue head will most likely put in some structure,
1783  * we don't use pointer type as the definition.
1784  */
1785 
1786 static inline void
1787 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1788 {
1789 }
1790 
1791 /**
1792  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1793  *        expands the headroom
1794  *        in the data region. In case of failure the skb is released.
1795  * @skb: sk buff
1796  * @headroom: size of headroom
1797  *
1798  * Return: skb or NULL
1799  */
1800 static inline struct sk_buff *
1801 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1802 {
1803 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1804 		dev_kfree_skb_any(skb);
1805 		skb = NULL;
1806 	}
1807 	return skb;
1808 }
1809 
1810 /**
1811  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1812  *        exapnds the tailroom
1813  *        in data region. In case of failure it releases the skb.
1814  * @skb: sk buff
1815  * @tailroom: size of tailroom
1816  *
1817  * Return: skb or NULL
1818  */
1819 static inline struct sk_buff *
1820 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1821 {
1822 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1823 		return skb;
1824 	/**
1825 	 * unlikely path
1826 	 */
1827 	dev_kfree_skb_any(skb);
1828 	return NULL;
1829 }
1830 
1831 /**
1832  * __qdf_nbuf_linearize() - skb linearize
1833  * @skb: sk buff
1834  *
1835  * create a version of the specified nbuf whose contents
1836  * can be safely modified without affecting other
1837  * users.If the nbuf is non-linear then this function
1838  * linearize. if unable to linearize returns -ENOMEM on
1839  * success 0 is returned
1840  *
1841  * Return: 0 on Success, -ENOMEM on failure is returned.
1842  */
1843 static inline int
1844 __qdf_nbuf_linearize(struct sk_buff *skb)
1845 {
1846 	return skb_linearize(skb);
1847 }
1848 
1849 /**
1850  * __qdf_nbuf_unshare() - skb unshare
1851  * @skb: sk buff
1852  *
1853  * create a version of the specified nbuf whose contents
1854  * can be safely modified without affecting other
1855  * users.If the nbuf is a clone then this function
1856  * creates a new copy of the data. If the buffer is not
1857  * a clone the original buffer is returned.
1858  *
1859  * Return: skb or NULL
1860  */
1861 static inline struct sk_buff *
1862 __qdf_nbuf_unshare(struct sk_buff *skb)
1863 {
1864 	return skb_unshare(skb, GFP_ATOMIC);
1865 }
1866 
1867 /**
1868  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1869  *@buf: sk buff
1870  *
1871  * Return: true/false
1872  */
1873 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1874 {
1875 	return skb_cloned(skb);
1876 }
1877 
1878 /**
1879  * __qdf_nbuf_pool_init() - init pool
1880  * @net: net handle
1881  *
1882  * Return: QDF status
1883  */
1884 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1885 {
1886 	return QDF_STATUS_SUCCESS;
1887 }
1888 
1889 /*
1890  * adf_nbuf_pool_delete() implementation - do nothing in linux
1891  */
1892 #define __qdf_nbuf_pool_delete(osdev)
1893 
1894 /**
1895  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1896  *        release the skb.
1897  * @skb: sk buff
1898  * @headroom: size of headroom
1899  * @tailroom: size of tailroom
1900  *
1901  * Return: skb or NULL
1902  */
1903 static inline struct sk_buff *
1904 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1905 {
1906 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1907 		return skb;
1908 
1909 	dev_kfree_skb_any(skb);
1910 	return NULL;
1911 }
1912 
1913 /**
1914  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1915  * @buf: Network buf instance
1916  * @headroom: Additional headroom to be added
1917  * @tailroom: Additional tailroom to be added
1918  *
1919  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1920  *	or NULL if there is no memory
1921  */
1922 static inline struct sk_buff *
1923 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1924 {
1925 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1926 }
1927 
1928 /**
1929  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1930  *
1931  * Return: true/false
1932  */
1933 static inline bool
1934 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1935 			 uint8_t **where)
1936 {
1937 	qdf_assert(0);
1938 	return false;
1939 }
1940 
1941 /**
1942  * __qdf_nbuf_reset_ctxt() - mem zero control block
1943  * @nbuf: buffer
1944  *
1945  * Return: none
1946  */
1947 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1948 {
1949 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1950 }
1951 
1952 /**
1953  * __qdf_nbuf_network_header() - get network header
1954  * @buf: buffer
1955  *
1956  * Return: network header pointer
1957  */
1958 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1959 {
1960 	return skb_network_header(buf);
1961 }
1962 
1963 /**
1964  * __qdf_nbuf_transport_header() - get transport header
1965  * @buf: buffer
1966  *
1967  * Return: transport header pointer
1968  */
1969 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1970 {
1971 	return skb_transport_header(buf);
1972 }
1973 
1974 /**
1975  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1976  *  passed as part of network buffer by network stack
1977  * @skb: sk buff
1978  *
1979  * Return: TCP MSS size
1980  *
1981  */
1982 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1983 {
1984 	return skb_shinfo(skb)->gso_size;
1985 }
1986 
1987 /**
1988  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1989  * @nbuf: sk buff
1990  *
1991  * Return: none
1992  */
1993 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1994 
1995 /*
1996  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1997  * @nbuf: sk buff
1998  *
1999  * Return: void ptr
2000  */
2001 static inline void *
2002 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2003 {
2004 	return (void *)nbuf->cb;
2005 }
2006 
2007 /**
2008  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2009  * @skb: sk buff
2010  *
2011  * Return: head size
2012  */
2013 static inline size_t
2014 __qdf_nbuf_headlen(struct sk_buff *skb)
2015 {
2016 	return skb_headlen(skb);
2017 }
2018 
2019 /**
2020  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
2021  * @skb: sk buff
2022  *
2023  * Return: number of fragments
2024  */
2025 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
2026 {
2027 	return skb_shinfo(skb)->nr_frags;
2028 }
2029 
2030 /**
2031  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2032  * @buf: sk buff
2033  *
2034  * Return: true/false
2035  */
2036 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2037 {
2038 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2039 }
2040 
2041 /**
2042  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2043  * @buf: sk buff
2044  *
2045  * Return: true/false
2046  */
2047 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2048 {
2049 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2050 }
2051 
2052 /**
2053  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2054  * @skb: sk buff
2055  *
2056  * Return: size of l2+l3+l4 header length
2057  */
2058 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2059 {
2060 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2061 }
2062 
2063 /**
2064  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2065  * @buf: sk buff
2066  *
2067  * Return:  true/false
2068  */
2069 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2070 {
2071 	if (skb_is_nonlinear(skb))
2072 		return true;
2073 	else
2074 		return false;
2075 }
2076 
2077 /**
2078  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2079  * @buf: sk buff
2080  *
2081  * Return: TCP sequence number
2082  */
2083 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2084 {
2085 	return ntohl(tcp_hdr(skb)->seq);
2086 }
2087 
2088 /**
2089  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2090  *@buf: sk buff
2091  *
2092  * Return: data pointer to typecast into your priv structure
2093  */
2094 static inline uint8_t *
2095 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2096 {
2097 	return &skb->cb[8];
2098 }
2099 
2100 /**
2101  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2102  * @buf: Pointer to nbuf
2103  *
2104  * Return: None
2105  */
2106 static inline void
2107 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2108 {
2109 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2110 }
2111 
2112 /**
2113  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2114  *
2115  * @buf: sk buff
2116  * @queue_id: Queue id
2117  *
2118  * Return: void
2119  */
2120 static inline void
2121 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2122 {
2123 	skb_record_rx_queue(skb, queue_id);
2124 }
2125 
2126 /**
2127  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2128  *
2129  * @buf: sk buff
2130  *
2131  * Return: Queue mapping
2132  */
2133 static inline uint16_t
2134 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2135 {
2136 	return skb->queue_mapping;
2137 }
2138 
2139 /**
2140  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2141  *
2142  * @buf: sk buff
2143  *
2144  * Return: void
2145  */
2146 static inline void
2147 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2148 {
2149 	__net_timestamp(skb);
2150 }
2151 
2152 /**
2153  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2154  *
2155  * @buf: sk buff
2156  *
2157  * Return: timestamp stored in skb in ms
2158  */
2159 static inline uint64_t
2160 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2161 {
2162 	return ktime_to_ms(skb_get_ktime(skb));
2163 }
2164 
2165 /**
2166  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2167  *
2168  * @buf: sk buff
2169  *
2170  * Return: time difference in ms
2171  */
2172 static inline uint64_t
2173 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2174 {
2175 	return ktime_to_ms(net_timedelta(skb->tstamp));
2176 }
2177 
2178 /**
2179  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2180  *
2181  * @buf: sk buff
2182  *
2183  * Return: time difference in micro seconds
2184  */
2185 static inline uint64_t
2186 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2187 {
2188 	return ktime_to_us(net_timedelta(skb->tstamp));
2189 }
2190 
2191 /**
2192  * __qdf_nbuf_orphan() - orphan a nbuf
2193  * @skb: sk buff
2194  *
2195  * If a buffer currently has an owner then we call the
2196  * owner's destructor function
2197  *
2198  * Return: void
2199  */
2200 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2201 {
2202 	return skb_orphan(skb);
2203 }
2204 
2205 static inline struct sk_buff *
2206 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2207 {
2208 	return skb_dequeue(skb_queue_head);
2209 }
2210 
2211 static inline
2212 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2213 {
2214 	return skb_queue_head->qlen;
2215 }
2216 
2217 static inline
2218 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2219 					struct sk_buff *skb)
2220 {
2221 	return skb_queue_tail(skb_queue_head, skb);
2222 }
2223 
2224 static inline
2225 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2226 {
2227 	return skb_queue_head_init(skb_queue_head);
2228 }
2229 
2230 static inline
2231 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2232 {
2233 	return skb_queue_purge(skb_queue_head);
2234 }
2235 
2236 /**
2237  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2238  * @head: skb list for which lock is to be acquired
2239  *
2240  * Return: void
2241  */
2242 static inline
2243 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2244 {
2245 	spin_lock_bh(&skb_queue_head->lock);
2246 }
2247 
2248 /**
2249  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2250  * @head: skb list for which lock is to be release
2251  *
2252  * Return: void
2253  */
2254 static inline
2255 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2256 {
2257 	spin_unlock_bh(&skb_queue_head->lock);
2258 }
2259 
2260 #ifdef CONFIG_NBUF_AP_PLATFORM
2261 #include <i_qdf_nbuf_w.h>
2262 #else
2263 #include <i_qdf_nbuf_m.h>
2264 #endif
2265 #endif /*_I_QDF_NET_BUF_H */
2266