xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *
95  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
96  *
97  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
98  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
99  * @rx.dev.priv_cb_w.reserved1: reserved
100  * @rx.dev.priv_cb_w.reserved2: reserved
101  *
102  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
103  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
104  * @rx.dev.priv_cb_m.lro_ctx: LRO context
105  * @rx.dev.priv_cb_m.map_index:
106  * @rx.dev.priv_cb_m.peer_local_id: peer_local_id for RX pkt
107  *
108  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
109  * @rx.peer_cached_buf_frm: peer cached buffer
110  * @rx.tcp_proto: L4 protocol is TCP
111  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
112  * @rx.ipv6_proto: L3 protocol is IPV6
113  * @rx.ip_offset: offset to IP header
114  * @rx.tcp_offset: offset to TCP header
115  * @rx_ctx_id: Rx context id
116  * @flush_ind: flush indication
117  * @num_elements_in_list: number of elements in the nbuf list
118  *
119  * @rx.tcp_udp_chksum: L4 payload checksum
120  * @rx.tcp_wim: TCP window size
121  *
122  * @rx.flow_id: 32bit flow id
123  *
124  * @rx.flag_chfrag_start: first MSDU in an AMSDU
125  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
126  * @rx.flag_chfrag_end: last MSDU in an AMSDU
127  * @rx.packet_buff_pool: indicate packet from pre-allocated pool for Rx ring
128  * @rx.rsrvd: reserved
129  *
130  * @rx.trace: combined structure for DP and protocol trace
131  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
132  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
133  * @rx.trace.dp_trace: flag (Datapath trace)
134  * @rx.trace.packet_track: RX_DATA packet
135  * @rx.trace.rsrvd: enable packet logging
136  *
137  * @rx.ftype: mcast2ucast, TSO, SG, MESH
138  * @rx.reserved: reserved
139  *
140  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
141  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
142  *
143  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
144  *                 + (1) CE classification enablement bit
145  *                 + (2) packet type (802.3 or Ethernet type II)
146  *                 + (3) packet offset (usually length of HTC/HTT descr)
147  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
148  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
149  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
150  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
151  * @tx.dev.priv_cb_m.reserved: reserved
152  *
153  * @tx.ftype: mcast2ucast, TSO, SG, MESH
154  * @tx.vdev_id: vdev (for protocol trace)
155  * @tx.len: length of efrag pointed by the above pointers
156  *
157  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
158  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
159  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
160  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
161  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
162  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
163  * @tx.flags.bits.flag_ext_header: extended flags
164  * @tx.flags.bits.reserved: reserved
165  * @tx.trace: combined structure for DP and protocol trace
166  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
167  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
168  * @tx.trace.is_packet_priv:
169  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
170  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
171  *                          + (MGMT_ACTION)] - 4 bits
172  * @tx.trace.dp_trace: flag (Datapath trace)
173  * @tx.trace.is_bcast: flag (Broadcast packet)
174  * @tx.trace.is_mcast: flag (Multicast packet)
175  * @tx.trace.packet_type: flag (Packet type)
176  * @tx.trace.htt2_frm: flag (high-latency path only)
177  * @tx.trace.print: enable packet logging
178  *
179  * @tx.vaddr: virtual address of ~
180  * @tx.paddr: physical/DMA address of ~
181  */
182 struct qdf_nbuf_cb {
183 	/* common */
184 	qdf_paddr_t paddr; /* of skb->data */
185 	/* valid only in one direction */
186 	union {
187 		/* Note: MAX: 40 bytes */
188 		struct {
189 			union {
190 				struct {
191 					void *ext_cb_ptr;
192 					void *fctx;
193 					uint32_t reserved1;
194 					uint32_t reserved2;
195 				} priv_cb_w;
196 				struct {
197 					uint32_t tcp_seq_num;
198 					uint32_t tcp_ack_num;
199 					unsigned char *lro_ctx;
200 					uint32_t map_index;
201 					uint32_t peer_local_id;
202 				} priv_cb_m;
203 			} dev;
204 			uint32_t lro_eligible:1,
205 				peer_cached_buf_frm:1,
206 				tcp_proto:1,
207 				tcp_pure_ack:1,
208 				ipv6_proto:1,
209 				ip_offset:7,
210 				tcp_offset:7,
211 				rx_ctx_id:4,
212 				flush_ind:1,
213 				num_elements_in_list:8;
214 			uint32_t tcp_udp_chksum:16,
215 				 tcp_win:16;
216 			uint32_t flow_id;
217 			uint8_t flag_chfrag_start:1,
218 				flag_chfrag_cont:1,
219 				flag_chfrag_end:1,
220 				packet_buff_pool:1,
221 				rsrvd:4;
222 			union {
223 				uint8_t packet_state;
224 				uint8_t dp_trace:1,
225 					packet_track:4,
226 					rsrvd:3;
227 			} trace;
228 			uint8_t ftype;
229 			uint8_t reserved;
230 		} rx;
231 
232 		/* Note: MAX: 40 bytes */
233 		struct {
234 			union {
235 				struct {
236 					void *ext_cb_ptr;
237 					void *fctx;
238 				} priv_cb_w;
239 				struct {
240 					uint32_t data_attr;
241 					struct {
242 						uint32_t owned:1,
243 							priv:31;
244 					} ipa;
245 					uint16_t desc_id;
246 					uint16_t mgmt_desc_id;
247 					uint32_t reserved;
248 				} priv_cb_m;
249 			} dev;
250 			uint8_t ftype;
251 			uint8_t vdev_id;
252 			uint16_t len;
253 			union {
254 				struct {
255 					uint8_t flag_efrag:1,
256 						flag_nbuf:1,
257 						num:1,
258 						flag_chfrag_start:1,
259 						flag_chfrag_cont:1,
260 						flag_chfrag_end:1,
261 						flag_ext_header:1,
262 						flag_notify_comp:1;
263 				} bits;
264 				uint8_t u8;
265 			} flags;
266 			struct {
267 				uint8_t packet_state:7,
268 					is_packet_priv:1;
269 				uint8_t packet_track:4,
270 					proto_type:4;
271 				uint8_t dp_trace:1,
272 					is_bcast:1,
273 					is_mcast:1,
274 					packet_type:3,
275 					/* used only for hl*/
276 					htt2_frm:1,
277 					print:1;
278 			} trace;
279 			unsigned char *vaddr;
280 			qdf_paddr_t paddr;
281 		} tx;
282 	} u;
283 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
284 
285 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
286 	(sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
287 
288 /**
289  *  access macros to qdf_nbuf_cb
290  *  Note: These macros can be used as L-values as well as R-values.
291  *        When used as R-values, they effectively function as "get" macros
292  *        When used as L_values, they effectively function as "set" macros
293  */
294 
295 #define QDF_NBUF_CB_PADDR(skb) \
296 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
297 
298 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
299 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
300 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
301 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.peer_cached_buf_frm)
302 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
303 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
304 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
305 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
306 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
307 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
308 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
309 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
310 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
311 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
312 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
313 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
314 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \
315 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flush_ind)
316 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
317 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
318 
319 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
320 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
321 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
322 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
323 
324 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
325 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
326 
327 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
328 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
329 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
330 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
331 
332 #define QDF_NBUF_CB_RX_FTYPE(skb) \
333 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
334 
335 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
336 	(((struct qdf_nbuf_cb *) \
337 	((skb)->cb))->u.rx.flag_chfrag_start)
338 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
339 	(((struct qdf_nbuf_cb *) \
340 	((skb)->cb))->u.rx.flag_chfrag_cont)
341 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
342 		(((struct qdf_nbuf_cb *) \
343 		((skb)->cb))->u.rx.flag_chfrag_end)
344 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
345 		(((struct qdf_nbuf_cb *) \
346 		((skb)->cb))->u.rx.packet_buff_pool)
347 
348 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
349 	qdf_nbuf_set_state(skb, PACKET_STATE)
350 
351 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
352 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
353 
354 #define QDF_NBUF_CB_TX_FTYPE(skb) \
355 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
356 
357 
358 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
359 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
360 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
361 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
362 
363 /* Tx Flags Accessor Macros*/
364 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
365 	(((struct qdf_nbuf_cb *) \
366 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
367 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
368 	(((struct qdf_nbuf_cb *) \
369 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
370 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
371 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
372 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
373 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
374 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
375 	(((struct qdf_nbuf_cb *) \
376 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
377 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
378 	(((struct qdf_nbuf_cb *) \
379 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
380 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
381 		(((struct qdf_nbuf_cb *) \
382 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
383 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
384 		(((struct qdf_nbuf_cb *) \
385 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
386 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
387 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
388 /* End of Tx Flags Accessor Macros */
389 
390 /* Tx trace accessor macros */
391 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
392 	(((struct qdf_nbuf_cb *) \
393 		((skb)->cb))->u.tx.trace.packet_state)
394 
395 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
396 	(((struct qdf_nbuf_cb *) \
397 		((skb)->cb))->u.tx.trace.is_packet_priv)
398 
399 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
400 	(((struct qdf_nbuf_cb *) \
401 		((skb)->cb))->u.tx.trace.packet_track)
402 
403 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
404 		(((struct qdf_nbuf_cb *) \
405 			((skb)->cb))->u.rx.trace.packet_track)
406 
407 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
408 	(((struct qdf_nbuf_cb *) \
409 		((skb)->cb))->u.tx.trace.proto_type)
410 
411 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
412 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
413 
414 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
415 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
416 
417 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
418 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
419 
420 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
421 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
422 
423 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
424 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
425 
426 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
427 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
428 
429 #define QDF_NBUF_CB_SET_BCAST(skb) \
430 	(((struct qdf_nbuf_cb *) \
431 		((skb)->cb))->u.tx.trace.is_bcast = true)
432 
433 #define QDF_NBUF_CB_SET_MCAST(skb) \
434 	(((struct qdf_nbuf_cb *) \
435 		((skb)->cb))->u.tx.trace.is_mcast = true)
436 /* End of Tx trace accessor macros */
437 
438 
439 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
440 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
441 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
442 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
443 
444 /* assume the OS provides a single fragment */
445 #define __qdf_nbuf_get_num_frags(skb)		   \
446 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
447 
448 #define __qdf_nbuf_reset_num_frags(skb) \
449 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
450 
451 /**
452  *   end of nbuf->cb access macros
453  */
454 
455 typedef void (*qdf_nbuf_trace_update_t)(char *);
456 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
457 
458 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
459 
460 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
461 	(QDF_NBUF_CB_PADDR(skb) = paddr)
462 
463 #define __qdf_nbuf_frag_push_head(					\
464 	skb, frag_len, frag_vaddr, frag_paddr)				\
465 	do {					\
466 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
467 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
468 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
469 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
470 	} while (0)
471 
472 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
473 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
474 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
475 
476 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
477 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
478 
479 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
480 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
481 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
482 	 /* assume that the OS only provides a single fragment */	\
483 	 QDF_NBUF_CB_PADDR(skb))
484 
485 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
486 
487 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
488 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
489 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
490 
491 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
492 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
493 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
494 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
495 
496 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
497 	do {								\
498 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
499 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
500 		if (frag_num)						\
501 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
502 							      is_wstrm; \
503 		else					\
504 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
505 							      is_wstrm; \
506 	} while (0)
507 
508 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
509 	do { \
510 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
511 	} while (0)
512 
513 #define __qdf_nbuf_get_vdev_ctx(skb) \
514 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
515 
516 #define __qdf_nbuf_set_tx_ftype(skb, type) \
517 	do { \
518 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
519 	} while (0)
520 
521 #define __qdf_nbuf_get_tx_ftype(skb) \
522 		 QDF_NBUF_CB_TX_FTYPE((skb))
523 
524 
525 #define __qdf_nbuf_set_rx_ftype(skb, type) \
526 	do { \
527 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
528 	} while (0)
529 
530 #define __qdf_nbuf_get_rx_ftype(skb) \
531 		 QDF_NBUF_CB_RX_FTYPE((skb))
532 
533 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
534 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
535 
536 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
537 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
538 
539 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
540 	do { \
541 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
542 	} while (0)
543 
544 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
545 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
546 
547 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
548 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
549 
550 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
551 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
552 
553 
554 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
555 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
556 
557 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
558 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
559 
560 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
561 	do { \
562 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
563 	} while (0)
564 
565 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
566 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
567 
568 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
569 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
570 
571 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
572 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
573 
574 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
575 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
576 
577 #define __qdf_nbuf_trace_get_proto_type(skb) \
578 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
579 
580 #define __qdf_nbuf_data_attr_get(skb)		\
581 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
582 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
583 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
584 
585 /**
586  * __qdf_nbuf_num_frags_init() - init extra frags
587  * @skb: sk buffer
588  *
589  * Return: none
590  */
591 static inline
592 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
593 {
594 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
595 }
596 
597 /*
598  * prototypes. Implemented in qdf_nbuf.c
599  */
600 
601 /**
602  * __qdf_nbuf_alloc() - Allocate nbuf
603  * @osdev: Device handle
604  * @size: Netbuf requested size
605  * @reserve: headroom to start with
606  * @align: Align
607  * @prio: Priority
608  * @func: Function name of the call site
609  * @line: line number of the call site
610  *
611  * This allocates an nbuf aligns if needed and reserves some space in the front,
612  * since the reserve is done after alignment the reserve value if being
613  * unaligned will result in an unaligned address.
614  *
615  * Return: nbuf or %NULL if no memory
616  */
617 __qdf_nbuf_t
618 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
619 		 int prio, const char *func, uint32_t line);
620 
621 void __qdf_nbuf_free(struct sk_buff *skb);
622 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
623 			struct sk_buff *skb, qdf_dma_dir_t dir);
624 void __qdf_nbuf_unmap(__qdf_device_t osdev,
625 			struct sk_buff *skb, qdf_dma_dir_t dir);
626 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
627 				 struct sk_buff *skb, qdf_dma_dir_t dir);
628 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
629 			struct sk_buff *skb, qdf_dma_dir_t dir);
630 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
631 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
632 
633 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
634 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
635 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
636 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
637 	qdf_dma_dir_t dir, int nbytes);
638 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
639 	qdf_dma_dir_t dir, int nbytes);
640 
641 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
642 	qdf_dma_dir_t dir);
643 
644 QDF_STATUS __qdf_nbuf_map_nbytes_single(
645 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
646 void __qdf_nbuf_unmap_nbytes_single(
647 	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
648 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
649 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
650 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
651 QDF_STATUS __qdf_nbuf_frag_map(
652 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
653 	int offset, qdf_dma_dir_t dir, int cur_frag);
654 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
655 
656 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
657 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
658 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
659 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
660 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
661 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
662 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
663 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
664 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
665 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
666 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
667 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
668 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
669 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
670 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
671 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
672 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
673 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
674 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
675 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
676 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
677 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
678 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
679 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
680 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
681 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
682 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
683 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
684 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
685 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
686 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
687 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
688 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
689 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
690 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
691 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
692 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
693 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
694 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
695 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
696 
697 #ifdef QDF_NBUF_GLOBAL_COUNT
698 int __qdf_nbuf_count_get(void);
699 void __qdf_nbuf_count_inc(struct sk_buff *skb);
700 void __qdf_nbuf_count_dec(struct sk_buff *skb);
701 void __qdf_nbuf_mod_init(void);
702 void __qdf_nbuf_mod_exit(void);
703 
704 #else
705 
706 static inline int __qdf_nbuf_count_get(void)
707 {
708 	return 0;
709 }
710 
711 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
712 {
713 	return;
714 }
715 
716 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
717 {
718 	return;
719 }
720 
721 static inline void __qdf_nbuf_mod_init(void)
722 {
723 	return;
724 }
725 
726 static inline void __qdf_nbuf_mod_exit(void)
727 {
728 	return;
729 }
730 #endif
731 
732 /**
733  * __qdf_to_status() - OS to QDF status conversion
734  * @error : OS error
735  *
736  * Return: QDF status
737  */
738 static inline QDF_STATUS __qdf_to_status(signed int error)
739 {
740 	switch (error) {
741 	case 0:
742 		return QDF_STATUS_SUCCESS;
743 	case ENOMEM:
744 	case -ENOMEM:
745 		return QDF_STATUS_E_NOMEM;
746 	default:
747 		return QDF_STATUS_E_NOSUPPORT;
748 	}
749 }
750 
751 /**
752  * __qdf_nbuf_len() - return the amount of valid data in the skb
753  * @skb: Pointer to network buffer
754  *
755  * This API returns the amount of valid data in the skb, If there are frags
756  * then it returns total length.
757  *
758  * Return: network buffer length
759  */
760 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
761 {
762 	int i, extra_frag_len = 0;
763 
764 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
765 	if (i > 0)
766 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
767 
768 	return extra_frag_len + skb->len;
769 }
770 
771 /**
772  * __qdf_nbuf_cat() - link two nbufs
773  * @dst: Buffer to piggyback into
774  * @src: Buffer to put
775  *
776  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
777  * It is callers responsibility to free the src skb.
778  *
779  * Return: QDF_STATUS (status of the call) if failed the src skb
780  *         is released
781  */
782 static inline QDF_STATUS
783 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
784 {
785 	QDF_STATUS error = 0;
786 
787 	qdf_assert(dst && src);
788 
789 	/*
790 	 * Since pskb_expand_head unconditionally reallocates the skb->head
791 	 * buffer, first check whether the current buffer is already large
792 	 * enough.
793 	 */
794 	if (skb_tailroom(dst) < src->len) {
795 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
796 		if (error)
797 			return __qdf_to_status(error);
798 	}
799 
800 	memcpy(skb_tail_pointer(dst), src->data, src->len);
801 	skb_put(dst, src->len);
802 	return __qdf_to_status(error);
803 }
804 
805 /*
806  * nbuf manipulation routines
807  */
808 /**
809  * __qdf_nbuf_headroom() - return the amount of tail space available
810  * @buf: Pointer to network buffer
811  *
812  * Return: amount of tail room
813  */
814 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
815 {
816 	return skb_headroom(skb);
817 }
818 
819 /**
820  * __qdf_nbuf_tailroom() - return the amount of tail space available
821  * @buf: Pointer to network buffer
822  *
823  * Return: amount of tail room
824  */
825 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
826 {
827 	return skb_tailroom(skb);
828 }
829 
830 /**
831  * __qdf_nbuf_put_tail() - Puts data in the end
832  * @skb: Pointer to network buffer
833  * @size: size to be pushed
834  *
835  * Return: data pointer of this buf where new data has to be
836  *         put, or NULL if there is not enough room in this buf.
837  */
838 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
839 {
840 	if (skb_tailroom(skb) < size) {
841 		if (unlikely(pskb_expand_head(skb, 0,
842 			size - skb_tailroom(skb), GFP_ATOMIC))) {
843 			dev_kfree_skb_any(skb);
844 			return NULL;
845 		}
846 	}
847 	return skb_put(skb, size);
848 }
849 
850 /**
851  * __qdf_nbuf_trim_tail() - trim data out from the end
852  * @skb: Pointer to network buffer
853  * @size: size to be popped
854  *
855  * Return: none
856  */
857 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
858 {
859 	return skb_trim(skb, skb->len - size);
860 }
861 
862 
863 /*
864  * prototypes. Implemented in qdf_nbuf.c
865  */
866 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
867 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
868 				qdf_nbuf_rx_cksum_t *cksum);
869 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
870 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
871 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
872 void __qdf_nbuf_ref(struct sk_buff *skb);
873 int __qdf_nbuf_shared(struct sk_buff *skb);
874 
875 /*
876  * qdf_nbuf_pool_delete() implementation - do nothing in linux
877  */
878 #define __qdf_nbuf_pool_delete(osdev)
879 
880 /**
881  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
882  * @skb: Pointer to network buffer
883  *
884  * if GFP_ATOMIC is overkill then we can check whether its
885  * called from interrupt context and then do it or else in
886  * normal case use GFP_KERNEL
887  *
888  * example     use "in_irq() || irqs_disabled()"
889  *
890  * Return: cloned skb
891  */
892 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
893 {
894 	struct sk_buff *skb_new = NULL;
895 
896 	skb_new = skb_clone(skb, GFP_ATOMIC);
897 	if (skb_new)
898 		__qdf_nbuf_count_inc(skb_new);
899 
900 	return skb_new;
901 }
902 
903 /**
904  * __qdf_nbuf_copy() - returns a private copy of the skb
905  * @skb: Pointer to network buffer
906  *
907  * This API returns a private copy of the skb, the skb returned is completely
908  *  modifiable by callers
909  *
910  * Return: skb or NULL
911  */
912 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
913 {
914 	struct sk_buff *skb_new = NULL;
915 
916 	skb_new = skb_copy(skb, GFP_ATOMIC);
917 	if (skb_new)
918 		__qdf_nbuf_count_inc(skb_new);
919 
920 	return skb_new;
921 }
922 
923 #define __qdf_nbuf_reserve      skb_reserve
924 
925 /**
926  * __qdf_nbuf_reset() - reset the buffer data and pointer
927  * @buf: Network buf instance
928  * @reserve: reserve
929  * @align: align
930  *
931  * Return: none
932  */
933 static inline void
934 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
935 {
936 	int offset;
937 
938 	skb_push(skb, skb_headroom(skb));
939 	skb_put(skb, skb_tailroom(skb));
940 	memset(skb->data, 0x0, skb->len);
941 	skb_trim(skb, 0);
942 	skb_reserve(skb, NET_SKB_PAD);
943 	memset(skb->cb, 0x0, sizeof(skb->cb));
944 
945 	/*
946 	 * The default is for netbuf fragments to be interpreted
947 	 * as wordstreams rather than bytestreams.
948 	 */
949 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
950 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
951 
952 	/*
953 	 * Align & make sure that the tail & data are adjusted properly
954 	 */
955 
956 	if (align) {
957 		offset = ((unsigned long)skb->data) % align;
958 		if (offset)
959 			skb_reserve(skb, align - offset);
960 	}
961 
962 	skb_reserve(skb, reserve);
963 }
964 
965 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
966 /**
967  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
968  *                                       in kernel
969  *
970  * Return: true if dev_scratch is supported
971  *         false if dev_scratch is not supported
972  */
973 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
974 {
975 	return true;
976 }
977 
978 /**
979  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
980  * @skb: Pointer to network buffer
981  *
982  * Return: dev_scratch if dev_scratch supported
983  *         0 if dev_scratch not supported
984  */
985 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
986 {
987 	return skb->dev_scratch;
988 }
989 
990 /**
991  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
992  * @skb: Pointer to network buffer
993  * @value: value to be set in dev_scratch of network buffer
994  *
995  * Return: void
996  */
997 static inline void
998 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
999 {
1000 	skb->dev_scratch = value;
1001 }
1002 #else
1003 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1004 {
1005 	return false;
1006 }
1007 
1008 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1009 {
1010 	return 0;
1011 }
1012 
1013 static inline void
1014 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1015 {
1016 }
1017 #endif /* KERNEL_VERSION(4, 14, 0) */
1018 
1019 /**
1020  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1021  * @skb: Pointer to network buffer
1022  *
1023  * Return: Pointer to head buffer
1024  */
1025 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1026 {
1027 	return skb->head;
1028 }
1029 
1030 /**
1031  * __qdf_nbuf_data() - return the pointer to data header in the skb
1032  * @skb: Pointer to network buffer
1033  *
1034  * Return: Pointer to skb data
1035  */
1036 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1037 {
1038 	return skb->data;
1039 }
1040 
1041 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1042 {
1043 	return (uint8_t *)&skb->data;
1044 }
1045 
1046 /**
1047  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1048  * @skb: Pointer to network buffer
1049  *
1050  * Return: skb protocol
1051  */
1052 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1053 {
1054 	return skb->protocol;
1055 }
1056 
1057 /**
1058  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1059  * @skb: Pointer to network buffer
1060  *
1061  * Return: skb ip_summed
1062  */
1063 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1064 {
1065 	return skb->ip_summed;
1066 }
1067 
1068 /**
1069  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1070  * @skb: Pointer to network buffer
1071  * @ip_summed: ip checksum
1072  *
1073  * Return: none
1074  */
1075 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1076 		 uint8_t ip_summed)
1077 {
1078 	skb->ip_summed = ip_summed;
1079 }
1080 
1081 /**
1082  * __qdf_nbuf_get_priority() - return the priority value of the skb
1083  * @skb: Pointer to network buffer
1084  *
1085  * Return: skb priority
1086  */
1087 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1088 {
1089 	return skb->priority;
1090 }
1091 
1092 /**
1093  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1094  * @skb: Pointer to network buffer
1095  * @p: priority
1096  *
1097  * Return: none
1098  */
1099 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1100 {
1101 	skb->priority = p;
1102 }
1103 
1104 /**
1105  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1106  * @skb: Current skb
1107  * @next_skb: Next skb
1108  *
1109  * Return: void
1110  */
1111 static inline void
1112 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1113 {
1114 	skb->next = skb_next;
1115 }
1116 
1117 /**
1118  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1119  * @skb: Current skb
1120  *
1121  * Return: the next skb pointed to by the current skb
1122  */
1123 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1124 {
1125 	return skb->next;
1126 }
1127 
1128 /**
1129  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1130  * @skb: Current skb
1131  * @next_skb: Next skb
1132  *
1133  * This fn is used to link up extensions to the head skb. Does not handle
1134  * linking to the head
1135  *
1136  * Return: none
1137  */
1138 static inline void
1139 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1140 {
1141 	skb->next = skb_next;
1142 }
1143 
1144 /**
1145  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1146  * @skb: Current skb
1147  *
1148  * Return: the next skb pointed to by the current skb
1149  */
1150 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1151 {
1152 	return skb->next;
1153 }
1154 
1155 /**
1156  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1157  * @skb_head: head_buf nbuf holding head segment (single)
1158  * @ext_list: nbuf list holding linked extensions to the head
1159  * @ext_len: Total length of all buffers in the extension list
1160  *
1161  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1162  * to the nbuf holding the head segment (seg0)
1163  *
1164  * Return: none
1165  */
1166 static inline void
1167 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1168 			struct sk_buff *ext_list, size_t ext_len)
1169 {
1170 	skb_shinfo(skb_head)->frag_list = ext_list;
1171 	skb_head->data_len = ext_len;
1172 	skb_head->len += skb_head->data_len;
1173 }
1174 
1175 /**
1176  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1177  * @head_buf: Network buf holding head segment (single)
1178  *
1179  * This ext_list is populated when we have Jumbo packet, for example in case of
1180  * monitor mode amsdu packet reception, and are stiched using frags_list.
1181  *
1182  * Return: Network buf list holding linked extensions from head buf.
1183  */
1184 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1185 {
1186 	return (skb_shinfo(head_buf)->frag_list);
1187 }
1188 
1189 /**
1190  * __qdf_nbuf_get_age() - return the checksum value of the skb
1191  * @skb: Pointer to network buffer
1192  *
1193  * Return: checksum value
1194  */
1195 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1196 {
1197 	return skb->csum;
1198 }
1199 
1200 /**
1201  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1202  * @skb: Pointer to network buffer
1203  * @v: Value
1204  *
1205  * Return: none
1206  */
1207 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1208 {
1209 	skb->csum = v;
1210 }
1211 
1212 /**
1213  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1214  * @skb: Pointer to network buffer
1215  * @adj: Adjustment value
1216  *
1217  * Return: none
1218  */
1219 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1220 {
1221 	skb->csum -= adj;
1222 }
1223 
1224 /**
1225  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1226  * @skb: Pointer to network buffer
1227  * @offset: Offset value
1228  * @len: Length
1229  * @to: Destination pointer
1230  *
1231  * Return: length of the copy bits for skb
1232  */
1233 static inline int32_t
1234 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1235 {
1236 	return skb_copy_bits(skb, offset, to, len);
1237 }
1238 
1239 /**
1240  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1241  * @skb: Pointer to network buffer
1242  * @len:  Packet length
1243  *
1244  * Return: none
1245  */
1246 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1247 {
1248 	if (skb->len > len) {
1249 		skb_trim(skb, len);
1250 	} else {
1251 		if (skb_tailroom(skb) < len - skb->len) {
1252 			if (unlikely(pskb_expand_head(skb, 0,
1253 				len - skb->len - skb_tailroom(skb),
1254 				GFP_ATOMIC))) {
1255 				dev_kfree_skb_any(skb);
1256 				qdf_assert(0);
1257 			}
1258 		}
1259 		skb_put(skb, (len - skb->len));
1260 	}
1261 }
1262 
1263 /**
1264  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1265  * @skb: Pointer to network buffer
1266  * @protocol: Protocol type
1267  *
1268  * Return: none
1269  */
1270 static inline void
1271 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1272 {
1273 	skb->protocol = protocol;
1274 }
1275 
1276 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1277 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1278 
1279 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1280 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1281 
1282 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1283 				      uint32_t *lo, uint32_t *hi);
1284 
1285 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1286 	struct qdf_tso_info_t *tso_info);
1287 
1288 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1289 			  struct qdf_tso_seg_elem_t *tso_seg,
1290 			  bool is_last_seg);
1291 
1292 #ifdef FEATURE_TSO
1293 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1294 
1295 #else
1296 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1297 {
1298 	return 0;
1299 }
1300 
1301 #endif /* FEATURE_TSO */
1302 
1303 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1304 {
1305 	if (skb_is_gso(skb) &&
1306 		(skb_is_gso_v6(skb) ||
1307 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1308 		return true;
1309 	else
1310 		return false;
1311 }
1312 
1313 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1314 
1315 int __qdf_nbuf_get_users(struct sk_buff *skb);
1316 
1317 /**
1318  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1319  *			      and get hw_classify by peeking
1320  *			      into packet
1321  * @nbuf:		Network buffer (skb on Linux)
1322  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1323  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1324  *			needs to be set in case of CE classification support
1325  *			Is set by this macro.
1326  * @hw_classify:	This is a flag which is set to indicate
1327  *			CE classification is enabled.
1328  *			Do not set this bit for VLAN packets
1329  *			OR for mcast / bcast frames.
1330  *
1331  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1332  * whether to enable tx_classify bit in CE.
1333  *
1334  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1335  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1336  * it is the length and a 802.3 frame else it is Ethernet Type II
1337  * (RFC 894).
1338  * Bit 4 in pkt_subtype is the tx_classify bit
1339  *
1340  * Return:	void
1341  */
1342 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1343 				pkt_subtype, hw_classify)	\
1344 do {								\
1345 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1346 	uint16_t ether_type = ntohs(eh->h_proto);		\
1347 	bool is_mc_bc;						\
1348 								\
1349 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1350 		   is_multicast_ether_addr((uint8_t *)eh);	\
1351 								\
1352 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1353 		hw_classify = 1;				\
1354 		pkt_subtype = 0x01 <<				\
1355 			HTT_TX_CLASSIFY_BIT_S;			\
1356 	}							\
1357 								\
1358 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1359 		pkt_type = htt_pkt_type_ethernet;		\
1360 								\
1361 } while (0)
1362 
1363 /**
1364  * nbuf private buffer routines
1365  */
1366 
1367 /**
1368  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1369  * @skb: Pointer to network buffer
1370  * @addr: Pointer to store header's addr
1371  * @m_len: network buffer length
1372  *
1373  * Return: none
1374  */
1375 static inline void
1376 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1377 {
1378 	*addr = skb->data;
1379 	*len = skb->len;
1380 }
1381 
1382 /**
1383  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1384  * @head: Head pointer
1385  * @tail: Tail pointer
1386  * @qlen: Queue length
1387  */
1388 typedef struct __qdf_nbuf_qhead {
1389 	struct sk_buff *head;
1390 	struct sk_buff *tail;
1391 	unsigned int qlen;
1392 } __qdf_nbuf_queue_t;
1393 
1394 /******************Functions *************/
1395 
1396 /**
1397  * __qdf_nbuf_queue_init() - initiallize the queue head
1398  * @qhead: Queue head
1399  *
1400  * Return: QDF status
1401  */
1402 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1403 {
1404 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1405 	return QDF_STATUS_SUCCESS;
1406 }
1407 
1408 /**
1409  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1410  * @qhead: Queue head
1411  * @skb: Pointer to network buffer
1412  *
1413  * This is a lockless version, driver must acquire locks if it
1414  * needs to synchronize
1415  *
1416  * Return: none
1417  */
1418 static inline void
1419 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1420 {
1421 	skb->next = NULL;       /*Nullify the next ptr */
1422 
1423 	if (!qhead->head)
1424 		qhead->head = skb;
1425 	else
1426 		qhead->tail->next = skb;
1427 
1428 	qhead->tail = skb;
1429 	qhead->qlen++;
1430 }
1431 
1432 /**
1433  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1434  * @dest: target netbuf queue
1435  * @src:  source netbuf queue
1436  *
1437  * Return: target netbuf queue
1438  */
1439 static inline __qdf_nbuf_queue_t *
1440 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1441 {
1442 	if (!dest)
1443 		return NULL;
1444 	else if (!src || !(src->head))
1445 		return dest;
1446 
1447 	if (!(dest->head))
1448 		dest->head = src->head;
1449 	else
1450 		dest->tail->next = src->head;
1451 
1452 	dest->tail = src->tail;
1453 	dest->qlen += src->qlen;
1454 	return dest;
1455 }
1456 
1457 /**
1458  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1459  * @qhead: Queue head
1460  * @skb: Pointer to network buffer
1461  *
1462  * This is a lockless version, driver must acquire locks if it needs to
1463  * synchronize
1464  *
1465  * Return: none
1466  */
1467 static inline void
1468 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1469 {
1470 	if (!qhead->head) {
1471 		/*Empty queue Tail pointer Must be updated */
1472 		qhead->tail = skb;
1473 	}
1474 	skb->next = qhead->head;
1475 	qhead->head = skb;
1476 	qhead->qlen++;
1477 }
1478 
1479 /**
1480  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1481  * @qhead: Queue head
1482  *
1483  * This is a lockless version. Driver should take care of the locks
1484  *
1485  * Return: skb or NULL
1486  */
1487 static inline
1488 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1489 {
1490 	__qdf_nbuf_t tmp = NULL;
1491 
1492 	if (qhead->head) {
1493 		qhead->qlen--;
1494 		tmp = qhead->head;
1495 		if (qhead->head == qhead->tail) {
1496 			qhead->head = NULL;
1497 			qhead->tail = NULL;
1498 		} else {
1499 			qhead->head = tmp->next;
1500 		}
1501 		tmp->next = NULL;
1502 	}
1503 	return tmp;
1504 }
1505 
1506 /**
1507  * __qdf_nbuf_queue_free() - free a queue
1508  * @qhead: head of queue
1509  *
1510  * Return: QDF status
1511  */
1512 static inline QDF_STATUS
1513 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1514 {
1515 	__qdf_nbuf_t  buf = NULL;
1516 
1517 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1518 		__qdf_nbuf_free(buf);
1519 	return QDF_STATUS_SUCCESS;
1520 }
1521 
1522 
1523 /**
1524  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1525  * @qhead: head of queue
1526  *
1527  * Return: NULL if the queue is empty
1528  */
1529 static inline struct sk_buff *
1530 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1531 {
1532 	return qhead->head;
1533 }
1534 
1535 /**
1536  * __qdf_nbuf_queue_len() - return the queue length
1537  * @qhead: Queue head
1538  *
1539  * Return: Queue length
1540  */
1541 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1542 {
1543 	return qhead->qlen;
1544 }
1545 
1546 /**
1547  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1548  * @skb: Pointer to network buffer
1549  *
1550  * This API returns the next skb from packet chain, remember the skb is
1551  * still in the queue
1552  *
1553  * Return: NULL if no packets are there
1554  */
1555 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1556 {
1557 	return skb->next;
1558 }
1559 
1560 /**
1561  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1562  * @qhead: Queue head
1563  *
1564  * Return: true if length is 0 else false
1565  */
1566 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1567 {
1568 	return qhead->qlen == 0;
1569 }
1570 
1571 /*
1572  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1573  * Because the queue head will most likely put in some structure,
1574  * we don't use pointer type as the definition.
1575  */
1576 
1577 /*
1578  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1579  * Because the queue head will most likely put in some structure,
1580  * we don't use pointer type as the definition.
1581  */
1582 
1583 static inline void
1584 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1585 {
1586 }
1587 
1588 /**
1589  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1590  *        expands the headroom
1591  *        in the data region. In case of failure the skb is released.
1592  * @skb: sk buff
1593  * @headroom: size of headroom
1594  *
1595  * Return: skb or NULL
1596  */
1597 static inline struct sk_buff *
1598 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1599 {
1600 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1601 		dev_kfree_skb_any(skb);
1602 		skb = NULL;
1603 	}
1604 	return skb;
1605 }
1606 
1607 /**
1608  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1609  *        exapnds the tailroom
1610  *        in data region. In case of failure it releases the skb.
1611  * @skb: sk buff
1612  * @tailroom: size of tailroom
1613  *
1614  * Return: skb or NULL
1615  */
1616 static inline struct sk_buff *
1617 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1618 {
1619 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1620 		return skb;
1621 	/**
1622 	 * unlikely path
1623 	 */
1624 	dev_kfree_skb_any(skb);
1625 	return NULL;
1626 }
1627 
1628 /**
1629  * __qdf_nbuf_linearize() - skb linearize
1630  * @skb: sk buff
1631  *
1632  * create a version of the specified nbuf whose contents
1633  * can be safely modified without affecting other
1634  * users.If the nbuf is non-linear then this function
1635  * linearize. if unable to linearize returns -ENOMEM on
1636  * success 0 is returned
1637  *
1638  * Return: 0 on Success, -ENOMEM on failure is returned.
1639  */
1640 static inline int
1641 __qdf_nbuf_linearize(struct sk_buff *skb)
1642 {
1643 	return skb_linearize(skb);
1644 }
1645 
1646 /**
1647  * __qdf_nbuf_unshare() - skb unshare
1648  * @skb: sk buff
1649  *
1650  * create a version of the specified nbuf whose contents
1651  * can be safely modified without affecting other
1652  * users.If the nbuf is a clone then this function
1653  * creates a new copy of the data. If the buffer is not
1654  * a clone the original buffer is returned.
1655  *
1656  * Return: skb or NULL
1657  */
1658 static inline struct sk_buff *
1659 __qdf_nbuf_unshare(struct sk_buff *skb)
1660 {
1661 	return skb_unshare(skb, GFP_ATOMIC);
1662 }
1663 
1664 /**
1665  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1666  *@buf: sk buff
1667  *
1668  * Return: true/false
1669  */
1670 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1671 {
1672 	return skb_cloned(skb);
1673 }
1674 
1675 /**
1676  * __qdf_nbuf_pool_init() - init pool
1677  * @net: net handle
1678  *
1679  * Return: QDF status
1680  */
1681 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1682 {
1683 	return QDF_STATUS_SUCCESS;
1684 }
1685 
1686 /*
1687  * adf_nbuf_pool_delete() implementation - do nothing in linux
1688  */
1689 #define __qdf_nbuf_pool_delete(osdev)
1690 
1691 /**
1692  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1693  *        release the skb.
1694  * @skb: sk buff
1695  * @headroom: size of headroom
1696  * @tailroom: size of tailroom
1697  *
1698  * Return: skb or NULL
1699  */
1700 static inline struct sk_buff *
1701 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1702 {
1703 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1704 		return skb;
1705 
1706 	dev_kfree_skb_any(skb);
1707 	return NULL;
1708 }
1709 
1710 /**
1711  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1712  *
1713  * Return: true/false
1714  */
1715 static inline bool
1716 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1717 			 uint8_t **where)
1718 {
1719 	qdf_assert(0);
1720 	return false;
1721 }
1722 
1723 /**
1724  * __qdf_nbuf_reset_ctxt() - mem zero control block
1725  * @nbuf: buffer
1726  *
1727  * Return: none
1728  */
1729 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1730 {
1731 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1732 }
1733 
1734 /**
1735  * __qdf_nbuf_network_header() - get network header
1736  * @buf: buffer
1737  *
1738  * Return: network header pointer
1739  */
1740 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1741 {
1742 	return skb_network_header(buf);
1743 }
1744 
1745 /**
1746  * __qdf_nbuf_transport_header() - get transport header
1747  * @buf: buffer
1748  *
1749  * Return: transport header pointer
1750  */
1751 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1752 {
1753 	return skb_transport_header(buf);
1754 }
1755 
1756 /**
1757  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1758  *  passed as part of network buffer by network stack
1759  * @skb: sk buff
1760  *
1761  * Return: TCP MSS size
1762  *
1763  */
1764 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
1765 {
1766 	return skb_shinfo(skb)->gso_size;
1767 }
1768 
1769 /**
1770  * __qdf_nbuf_init() - Re-initializes the skb for re-use
1771  * @nbuf: sk buff
1772  *
1773  * Return: none
1774  */
1775 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
1776 
1777 /*
1778  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
1779  * @nbuf: sk buff
1780  *
1781  * Return: void ptr
1782  */
1783 static inline void *
1784 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
1785 {
1786 	return (void *)nbuf->cb;
1787 }
1788 
1789 /**
1790  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
1791  * @skb: sk buff
1792  *
1793  * Return: head size
1794  */
1795 static inline size_t
1796 __qdf_nbuf_headlen(struct sk_buff *skb)
1797 {
1798 	return skb_headlen(skb);
1799 }
1800 
1801 /**
1802  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1803  * @skb: sk buff
1804  *
1805  * Return: number of fragments
1806  */
1807 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1808 {
1809 	return skb_shinfo(skb)->nr_frags;
1810 }
1811 
1812 /**
1813  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
1814  * @buf: sk buff
1815  *
1816  * Return: true/false
1817  */
1818 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
1819 {
1820 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
1821 }
1822 
1823 /**
1824  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
1825  * @buf: sk buff
1826  *
1827  * Return: true/false
1828  */
1829 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
1830 {
1831 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
1832 }
1833 
1834 /**
1835  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
1836  * @skb: sk buff
1837  *
1838  * Return: size of l2+l3+l4 header length
1839  */
1840 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
1841 {
1842 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
1843 }
1844 
1845 /**
1846  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
1847  * @buf: sk buff
1848  *
1849  * Return:  true/false
1850  */
1851 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
1852 {
1853 	if (skb_is_nonlinear(skb))
1854 		return true;
1855 	else
1856 		return false;
1857 }
1858 
1859 /**
1860  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
1861  * @buf: sk buff
1862  *
1863  * Return: TCP sequence number
1864  */
1865 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
1866 {
1867 	return ntohl(tcp_hdr(skb)->seq);
1868 }
1869 
1870 /**
1871  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
1872  *@buf: sk buff
1873  *
1874  * Return: data pointer to typecast into your priv structure
1875  */
1876 static inline uint8_t *
1877 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
1878 {
1879 	return &skb->cb[8];
1880 }
1881 
1882 /**
1883  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
1884  * @buf: Pointer to nbuf
1885  *
1886  * Return: None
1887  */
1888 static inline void
1889 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
1890 {
1891 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
1892 }
1893 
1894 /**
1895  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
1896  *
1897  * @buf: sk buff
1898  * @queue_id: Queue id
1899  *
1900  * Return: void
1901  */
1902 static inline void
1903 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
1904 {
1905 	skb_record_rx_queue(skb, queue_id);
1906 }
1907 
1908 /**
1909  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
1910  *
1911  * @buf: sk buff
1912  *
1913  * Return: Queue mapping
1914  */
1915 static inline uint16_t
1916 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
1917 {
1918 	return skb->queue_mapping;
1919 }
1920 
1921 /**
1922  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
1923  *
1924  * @buf: sk buff
1925  *
1926  * Return: void
1927  */
1928 static inline void
1929 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
1930 {
1931 	__net_timestamp(skb);
1932 }
1933 
1934 /**
1935  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
1936  *
1937  * @buf: sk buff
1938  *
1939  * Return: time difference in ms
1940  */
1941 static inline uint64_t
1942 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
1943 {
1944 	return ktime_to_ms(net_timedelta(skb->tstamp));
1945 }
1946 
1947 /**
1948  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
1949  *
1950  * @buf: sk buff
1951  *
1952  * Return: time difference in micro seconds
1953  */
1954 static inline uint64_t
1955 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
1956 {
1957 	return ktime_to_us(net_timedelta(skb->tstamp));
1958 }
1959 
1960 /**
1961  * __qdf_nbuf_orphan() - orphan a nbuf
1962  * @skb: sk buff
1963  *
1964  * If a buffer currently has an owner then we call the
1965  * owner's destructor function
1966  *
1967  * Return: void
1968  */
1969 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
1970 {
1971 	return skb_orphan(skb);
1972 }
1973 
1974 static inline struct sk_buff *
1975 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
1976 {
1977 	return skb_dequeue(skb_queue_head);
1978 }
1979 
1980 static inline
1981 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
1982 {
1983 	return skb_queue_head->qlen;
1984 }
1985 
1986 static inline
1987 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
1988 					struct sk_buff *skb)
1989 {
1990 	return skb_queue_tail(skb_queue_head, skb);
1991 }
1992 
1993 static inline
1994 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
1995 {
1996 	return skb_queue_head_init(skb_queue_head);
1997 }
1998 
1999 static inline
2000 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2001 {
2002 	return skb_queue_purge(skb_queue_head);
2003 }
2004 
2005 #ifdef CONFIG_WIN
2006 #include <i_qdf_nbuf_w.h>
2007 #else
2008 #include <i_qdf_nbuf_m.h>
2009 #endif
2010 #endif /*_I_QDF_NET_BUF_H */
2011