xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf.h (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_nbuf.h
21  * This file provides OS dependent nbuf API's.
22  */
23 
24 #ifndef _I_QDF_NBUF_H
25 #define _I_QDF_NBUF_H
26 
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/version.h>
32 #include <asm/cacheflush.h>
33 #include <qdf_types.h>
34 #include <qdf_net_types.h>
35 #include <qdf_status.h>
36 #include <qdf_util.h>
37 #include <qdf_mem.h>
38 #include <linux/tcp.h>
39 #include <qdf_util.h>
40 
41 /*
42  * Use socket buffer as the underlying implementation as skbuf .
43  * Linux use sk_buff to represent both packet and data,
44  * so we use sk_buffer to represent both skbuf .
45  */
46 typedef struct sk_buff *__qdf_nbuf_t;
47 
48 /**
49  * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
50  *
51  * This is used for skb queue management via linux skb buff head APIs
52  */
53 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
54 
55 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
56 
57 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
58  * max tx fragments added by the driver
59  * The driver will always add one tx fragment (the tx descriptor)
60  */
61 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
62 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL  1
63 #define QDF_NBUF_CB_PACKET_TYPE_ARP    2
64 #define QDF_NBUF_CB_PACKET_TYPE_WAPI   3
65 #define QDF_NBUF_CB_PACKET_TYPE_DHCP   4
66 #define QDF_NBUF_CB_PACKET_TYPE_ICMP   5
67 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
68 
69 
70 /* mark the first packet after wow wakeup */
71 #define QDF_MARK_FIRST_WAKEUP_PACKET   0x80000000
72 
73 /*
74  * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
75  */
76 typedef union {
77 	uint64_t       u64;
78 	qdf_dma_addr_t dma_addr;
79 } qdf_paddr_t;
80 
81 /**
82  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
83  *                    - data passed between layers of the driver.
84  *
85  * Notes:
86  *   1. Hard limited to 48 bytes. Please count your bytes
87  *   2. The size of this structure has to be easily calculatable and
88  *      consistently so: do not use any conditional compile flags
89  *   3. Split into a common part followed by a tx/rx overlay
90  *   4. There is only one extra frag, which represents the HTC/HTT header
91  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
92  *      for the priv_cb_w since it must be at same offset for both
93  *      TX and RX union
94  *   6. "ipa.owned" bit must be first member in both TX and RX unions
95  *      for the priv_cb_m since it must be at same offset for both
96  *      TX and RX union.
97  *
98  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
99  *
100  * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
101  * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
102  * @rx.dev.priv_cb_w.msdu_len: length of RX packet
103  * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
104  * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
105  * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
106  *
107  * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
108  * @rx.dev.priv_cb_m.flush_ind: flush indication
109  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
110  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
111  * @rx.dev.priv_cb_m.exc_frm: exception frame
112  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
113  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
114  * @rx.dev.priv_cb_m.lro_ctx: LRO context
115  * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
116  * @rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
117  * @rx.dev.priv_cb_m.dp.wifi2.map_index:
118  * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
119  *
120  * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
121  * @rx.tcp_proto: L4 protocol is TCP
122  * @rx.tcp_pure_ack: A TCP ACK packet with no payload
123  * @rx.ipv6_proto: L3 protocol is IPV6
124  * @rx.ip_offset: offset to IP header
125  * @rx.tcp_offset: offset to TCP header
126  * @rx_ctx_id: Rx context id
127  * @num_elements_in_list: number of elements in the nbuf list
128  *
129  * @rx.tcp_udp_chksum: L4 payload checksum
130  * @rx.tcp_wim: TCP window size
131  *
132  * @rx.flow_id: 32bit flow id
133  *
134  * @rx.flag_chfrag_start: first MSDU in an AMSDU
135  * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
136  * @rx.flag_chfrag_end: last MSDU in an AMSDU
137  * @rx.flag_retry: flag to indicate MSDU is retried
138  * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
139  * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
140  * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
141  * @rx.flag_is_frag: flag to indicate skb has frag list
142  * @rx.rsrvd: reserved
143  *
144  * @rx.trace: combined structure for DP and protocol trace
145  * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
146  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
147  * @rx.trace.dp_trace: flag (Datapath trace)
148  * @rx.trace.packet_track: RX_DATA packet
149  * @rx.trace.rsrvd: enable packet logging
150  *
151  * @rx.vdev_id: vdev_id for RX pkt
152  * @rx.is_raw_frame: RAW frame
153  * @rx.fcs_err: FCS error
154  * @rx.tid_val: tid value
155  * @rx.reserved: reserved
156  * @rx.ftype: mcast2ucast, TSO, SG, MESH
157  *
158  * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
159  * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
160  *
161  * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
162  *                 + (1) CE classification enablement bit
163  *                 + (2) packet type (802.3 or Ethernet type II)
164  *                 + (3) packet offset (usually length of HTC/HTT descr)
165  * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
166  * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
167  * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
168  * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
169  * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
170  * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
171  * @tx.dev.priv_cb_m.reserved: reserved
172  *
173  * @tx.ftype: mcast2ucast, TSO, SG, MESH
174  * @tx.vdev_id: vdev (for protocol trace)
175  * @tx.len: length of efrag pointed by the above pointers
176  *
177  * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
178  * @tx.flags.bits.num: number of extra frags ( 0 or 1)
179  * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
180  * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
181  * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
182  * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
183  * @tx.flags.bits.flag_ext_header: extended flags
184  * @tx.flags.bits.reserved: reserved
185  * @tx.trace: combined structure for DP and protocol trace
186  * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
187  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
188  * @tx.trace.is_packet_priv:
189  * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
190  * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
191  *                          + (MGMT_ACTION)] - 4 bits
192  * @tx.trace.dp_trace: flag (Datapath trace)
193  * @tx.trace.is_bcast: flag (Broadcast packet)
194  * @tx.trace.is_mcast: flag (Multicast packet)
195  * @tx.trace.packet_type: flag (Packet type)
196  * @tx.trace.htt2_frm: flag (high-latency path only)
197  * @tx.trace.print: enable packet logging
198  *
199  * @tx.vaddr: virtual address of ~
200  * @tx.paddr: physical/DMA address of ~
201  */
202 struct qdf_nbuf_cb {
203 	/* common */
204 	qdf_paddr_t paddr; /* of skb->data */
205 	/* valid only in one direction */
206 	union {
207 		/* Note: MAX: 40 bytes */
208 		struct {
209 			union {
210 				struct {
211 					void *ext_cb_ptr;
212 					void *fctx;
213 					uint16_t msdu_len;
214 					uint16_t peer_id;
215 					uint16_t protocol_tag;
216 					uint16_t flow_tag;
217 				} priv_cb_w;
218 				struct {
219 					/* ipa_owned bit is common between rx
220 					 * control block and tx control block.
221 					 * Do not change location of this bit.
222 					 */
223 					uint32_t ipa_owned:1,
224 						 peer_cached_buf_frm:1,
225 						 flush_ind:1,
226 						 packet_buf_pool:1,
227 						 l3_hdr_pad:3,
228 						 /* exception frame flag */
229 						 exc_frm:1,
230 						 reserved:8,
231 						 reserved1:16;
232 					uint32_t tcp_seq_num;
233 					uint32_t tcp_ack_num;
234 					union {
235 						struct {
236 							uint16_t msdu_len;
237 							uint16_t peer_id;
238 						} wifi3;
239 						struct {
240 							uint32_t map_index;
241 						} wifi2;
242 					} dp;
243 					unsigned char *lro_ctx;
244 				} priv_cb_m;
245 			} dev;
246 			uint32_t lro_eligible:1,
247 				tcp_proto:1,
248 				tcp_pure_ack:1,
249 				ipv6_proto:1,
250 				ip_offset:7,
251 				tcp_offset:7,
252 				rx_ctx_id:4,
253 				fcs_err:1,
254 				is_raw_frame:1,
255 				num_elements_in_list:8;
256 			uint32_t tcp_udp_chksum:16,
257 				 tcp_win:16;
258 			uint32_t flow_id;
259 			uint8_t flag_chfrag_start:1,
260 				flag_chfrag_cont:1,
261 				flag_chfrag_end:1,
262 				flag_retry:1,
263 				flag_da_mcbc:1,
264 				flag_da_valid:1,
265 				flag_sa_valid:1,
266 				flag_is_frag:1;
267 			union {
268 				uint8_t packet_state;
269 				uint8_t dp_trace:1,
270 					packet_track:4,
271 					rsrvd:3;
272 			} trace;
273 			uint16_t vdev_id:8,
274 				 tid_val:4,
275 				 ftype:4;
276 		} rx;
277 
278 		/* Note: MAX: 40 bytes */
279 		struct {
280 			union {
281 				struct {
282 					void *ext_cb_ptr;
283 					void *fctx;
284 				} priv_cb_w;
285 				struct {
286 					/* ipa_owned bit is common between rx
287 					 * control block and tx control block.
288 					 * Do not change location of this bit.
289 					 */
290 					struct {
291 						uint32_t owned:1,
292 							priv:31;
293 					} ipa;
294 					uint32_t data_attr;
295 					uint16_t desc_id;
296 					uint16_t mgmt_desc_id;
297 					struct {
298 						uint8_t bi_map:1,
299 							reserved:7;
300 					} dma_option;
301 					uint8_t reserved[3];
302 				} priv_cb_m;
303 			} dev;
304 			uint8_t ftype;
305 			uint8_t vdev_id;
306 			uint16_t len;
307 			union {
308 				struct {
309 					uint8_t flag_efrag:1,
310 						flag_nbuf:1,
311 						num:1,
312 						flag_chfrag_start:1,
313 						flag_chfrag_cont:1,
314 						flag_chfrag_end:1,
315 						flag_ext_header:1,
316 						flag_notify_comp:1;
317 				} bits;
318 				uint8_t u8;
319 			} flags;
320 			struct {
321 				uint8_t packet_state:7,
322 					is_packet_priv:1;
323 				uint8_t packet_track:4,
324 					proto_type:4;
325 				uint8_t dp_trace:1,
326 					is_bcast:1,
327 					is_mcast:1,
328 					packet_type:3,
329 					/* used only for hl*/
330 					htt2_frm:1,
331 					print:1;
332 			} trace;
333 			unsigned char *vaddr;
334 			qdf_paddr_t paddr;
335 		} tx;
336 	} u;
337 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
338 
339 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
340 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
341 			(sizeof(struct qdf_nbuf_cb)) <=
342 			sizeof_field(struct sk_buff, cb));
343 #else
344 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
345 			(sizeof(struct qdf_nbuf_cb)) <=
346 			FIELD_SIZEOF(struct sk_buff, cb));
347 #endif
348 
349 /**
350  *  access macros to qdf_nbuf_cb
351  *  Note: These macros can be used as L-values as well as R-values.
352  *        When used as R-values, they effectively function as "get" macros
353  *        When used as L_values, they effectively function as "set" macros
354  */
355 
356 #define QDF_NBUF_CB_PADDR(skb) \
357 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
358 
359 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
360 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
361 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
362 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
363 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
364 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
365 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
366 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
367 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
368 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
369 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
370 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
371 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
372 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
373 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
374 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
375 
376 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
377 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
378 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
379 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
380 
381 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
382 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
383 
384 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
385 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
386 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
387 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
388 
389 #define QDF_NBUF_CB_RX_FTYPE(skb) \
390 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
391 
392 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
393 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
394 
395 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
396 	(((struct qdf_nbuf_cb *) \
397 	((skb)->cb))->u.rx.flag_chfrag_start)
398 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
399 	(((struct qdf_nbuf_cb *) \
400 	((skb)->cb))->u.rx.flag_chfrag_cont)
401 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
402 		(((struct qdf_nbuf_cb *) \
403 		((skb)->cb))->u.rx.flag_chfrag_end)
404 
405 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
406 	(((struct qdf_nbuf_cb *) \
407 	((skb)->cb))->u.rx.flag_da_mcbc)
408 
409 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
410 	(((struct qdf_nbuf_cb *) \
411 	((skb)->cb))->u.rx.flag_da_valid)
412 
413 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
414 	(((struct qdf_nbuf_cb *) \
415 	((skb)->cb))->u.rx.flag_sa_valid)
416 
417 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
418 	(((struct qdf_nbuf_cb *) \
419 	((skb)->cb))->u.rx.flag_retry)
420 
421 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
422 	(((struct qdf_nbuf_cb *) \
423 	((skb)->cb))->u.rx.is_raw_frame)
424 
425 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
426 	(((struct qdf_nbuf_cb *) \
427 	((skb)->cb))->u.rx.tid_val)
428 
429 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
430 	(((struct qdf_nbuf_cb *) \
431 	((skb)->cb))->u.rx.flag_is_frag)
432 
433 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
434 	(((struct qdf_nbuf_cb *) \
435 	((skb)->cb))->u.rx.fcs_err)
436 
437 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
438 	qdf_nbuf_set_state(skb, PACKET_STATE)
439 
440 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
441 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
442 
443 #define QDF_NBUF_CB_TX_FTYPE(skb) \
444 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
445 
446 
447 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
448 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
449 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
450 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
451 
452 /* Tx Flags Accessor Macros*/
453 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
454 	(((struct qdf_nbuf_cb *) \
455 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
456 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
459 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
460 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
461 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
462 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
463 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
464 	(((struct qdf_nbuf_cb *) \
465 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
466 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
467 	(((struct qdf_nbuf_cb *) \
468 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
469 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
470 		(((struct qdf_nbuf_cb *) \
471 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
472 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
473 		(((struct qdf_nbuf_cb *) \
474 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
475 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
476 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
477 /* End of Tx Flags Accessor Macros */
478 
479 /* Tx trace accessor macros */
480 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
481 	(((struct qdf_nbuf_cb *) \
482 		((skb)->cb))->u.tx.trace.packet_state)
483 
484 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
485 	(((struct qdf_nbuf_cb *) \
486 		((skb)->cb))->u.tx.trace.is_packet_priv)
487 
488 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
489 	(((struct qdf_nbuf_cb *) \
490 		((skb)->cb))->u.tx.trace.packet_track)
491 
492 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
493 		(((struct qdf_nbuf_cb *) \
494 			((skb)->cb))->u.rx.trace.packet_track)
495 
496 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
497 	(((struct qdf_nbuf_cb *) \
498 		((skb)->cb))->u.tx.trace.proto_type)
499 
500 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
501 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
502 
503 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
504 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
505 
506 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
507 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
508 
509 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
510 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
511 
512 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
513 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
514 
515 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
516 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
517 
518 #define QDF_NBUF_CB_SET_BCAST(skb) \
519 	(((struct qdf_nbuf_cb *) \
520 		((skb)->cb))->u.tx.trace.is_bcast = true)
521 
522 #define QDF_NBUF_CB_SET_MCAST(skb) \
523 	(((struct qdf_nbuf_cb *) \
524 		((skb)->cb))->u.tx.trace.is_mcast = true)
525 /* End of Tx trace accessor macros */
526 
527 
528 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
529 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
530 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
531 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
532 
533 /* assume the OS provides a single fragment */
534 #define __qdf_nbuf_get_num_frags(skb)		   \
535 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
536 
537 #define __qdf_nbuf_reset_num_frags(skb) \
538 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
539 
540 /**
541  *   end of nbuf->cb access macros
542  */
543 
544 typedef void (*qdf_nbuf_trace_update_t)(char *);
545 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
546 
547 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
548 
549 #define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
550 	(QDF_NBUF_CB_PADDR(skb) = paddr)
551 
552 #define __qdf_nbuf_frag_push_head(					\
553 	skb, frag_len, frag_vaddr, frag_paddr)				\
554 	do {					\
555 		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
556 		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
557 		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
558 		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
559 	} while (0)
560 
561 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
562 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
563 	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
564 
565 #define __qdf_nbuf_get_frag_vaddr_always(skb)       \
566 			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
567 
568 #define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
569 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
570 	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
571 	 /* assume that the OS only provides a single fragment */	\
572 	 QDF_NBUF_CB_PADDR(skb))
573 
574 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
575 
576 #define __qdf_nbuf_get_frag_len(skb, frag_num)			\
577 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
578 	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
579 
580 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
581 	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
582 	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
583 	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
584 
585 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
586 	do {								\
587 		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
588 			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
589 		if (frag_num)						\
590 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
591 							      is_wstrm; \
592 		else					\
593 			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
594 							      is_wstrm; \
595 	} while (0)
596 
597 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
598 	do { \
599 		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
600 	} while (0)
601 
602 #define __qdf_nbuf_get_vdev_ctx(skb) \
603 	QDF_NBUF_CB_TX_VDEV_CTX((skb))
604 
605 #define __qdf_nbuf_set_tx_ftype(skb, type) \
606 	do { \
607 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
608 	} while (0)
609 
610 #define __qdf_nbuf_get_tx_ftype(skb) \
611 		 QDF_NBUF_CB_TX_FTYPE((skb))
612 
613 
614 #define __qdf_nbuf_set_rx_ftype(skb, type) \
615 	do { \
616 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
617 	} while (0)
618 
619 #define __qdf_nbuf_get_rx_ftype(skb) \
620 		 QDF_NBUF_CB_RX_FTYPE((skb))
621 
622 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
623 	((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
624 
625 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
626 	(QDF_NBUF_CB_RX_CHFRAG_START((skb)))
627 
628 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
629 	do { \
630 		(QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
631 	} while (0)
632 
633 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
634 	(QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
635 
636 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
637 	((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
638 
639 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
640 	(QDF_NBUF_CB_RX_CHFRAG_END((skb)))
641 
642 #define __qdf_nbuf_set_da_mcbc(skb, val) \
643 	((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
644 
645 #define __qdf_nbuf_is_da_mcbc(skb) \
646 	(QDF_NBUF_CB_RX_DA_MCBC((skb)))
647 
648 #define __qdf_nbuf_set_da_valid(skb, val) \
649 	((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
650 
651 #define __qdf_nbuf_is_da_valid(skb) \
652 	(QDF_NBUF_CB_RX_DA_VALID((skb)))
653 
654 #define __qdf_nbuf_set_sa_valid(skb, val) \
655 	((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
656 
657 #define __qdf_nbuf_is_sa_valid(skb) \
658 	(QDF_NBUF_CB_RX_SA_VALID((skb)))
659 
660 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
661 	((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
662 
663 #define __qdf_nbuf_is_rx_retry_flag(skb) \
664 	(QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
665 
666 #define __qdf_nbuf_set_raw_frame(skb, val) \
667 	((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
668 
669 #define __qdf_nbuf_is_raw_frame(skb) \
670 	(QDF_NBUF_CB_RX_RAW_FRAME((skb)))
671 
672 #define __qdf_nbuf_get_tid_val(skb) \
673 	(QDF_NBUF_CB_RX_TID_VAL((skb)))
674 
675 #define __qdf_nbuf_set_tid_val(skb, val) \
676 	((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
677 
678 #define __qdf_nbuf_set_is_frag(skb, val) \
679 	((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
680 
681 #define __qdf_nbuf_is_frag(skb) \
682 	(QDF_NBUF_CB_RX_IS_FRAG((skb)))
683 
684 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
685 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
686 
687 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
688 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
689 
690 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
691 	do { \
692 		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
693 	} while (0)
694 
695 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
696 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
697 
698 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
699 	((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
700 
701 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
702 	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
703 
704 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type)  \
705 	(QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
706 
707 #define __qdf_nbuf_trace_get_proto_type(skb) \
708 	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
709 
710 #define __qdf_nbuf_data_attr_get(skb)		\
711 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
712 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
713 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
714 
715 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar)	\
716 		skb_queue_walk_safe(queue, var, tvar)
717 
718 /**
719  * __qdf_nbuf_num_frags_init() - init extra frags
720  * @skb: sk buffer
721  *
722  * Return: none
723  */
724 static inline
725 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
726 {
727 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
728 }
729 
730 /*
731  * prototypes. Implemented in qdf_nbuf.c
732  */
733 
734 /**
735  * __qdf_nbuf_alloc() - Allocate nbuf
736  * @osdev: Device handle
737  * @size: Netbuf requested size
738  * @reserve: headroom to start with
739  * @align: Align
740  * @prio: Priority
741  * @func: Function name of the call site
742  * @line: line number of the call site
743  *
744  * This allocates an nbuf aligns if needed and reserves some space in the front,
745  * since the reserve is done after alignment the reserve value if being
746  * unaligned will result in an unaligned address.
747  *
748  * Return: nbuf or %NULL if no memory
749  */
750 __qdf_nbuf_t
751 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
752 		 int prio, const char *func, uint32_t line);
753 
754 void __qdf_nbuf_free(struct sk_buff *skb);
755 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
756 			struct sk_buff *skb, qdf_dma_dir_t dir);
757 void __qdf_nbuf_unmap(__qdf_device_t osdev,
758 			struct sk_buff *skb, qdf_dma_dir_t dir);
759 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
760 				 struct sk_buff *skb, qdf_dma_dir_t dir);
761 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
762 			struct sk_buff *skb, qdf_dma_dir_t dir);
763 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
764 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
765 
766 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
767 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
768 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
769 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
770 	qdf_dma_dir_t dir, int nbytes);
771 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
772 	qdf_dma_dir_t dir, int nbytes);
773 
774 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
775 	qdf_dma_dir_t dir);
776 
777 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
778 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
779 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
780 QDF_STATUS __qdf_nbuf_frag_map(
781 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
782 	int offset, qdf_dma_dir_t dir, int cur_frag);
783 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
784 
785 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
786 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
787 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
788 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
789 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
790 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
791 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
792 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
793 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
794 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
795 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
796 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
797 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
798 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
799 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
800 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
801 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
802 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
803 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
804 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
805 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
806 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
807 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
808 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
809 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
810 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
811 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
812 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
813 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
814 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
815 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
816 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
817 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
818 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
819 enum qdf_proto_subtype  __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
820 enum qdf_proto_subtype  __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
821 enum qdf_proto_subtype  __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
822 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
823 enum qdf_proto_subtype  __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
824 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
825 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
826 
827 #ifdef QDF_NBUF_GLOBAL_COUNT
828 int __qdf_nbuf_count_get(void);
829 void __qdf_nbuf_count_inc(struct sk_buff *skb);
830 void __qdf_nbuf_count_dec(struct sk_buff *skb);
831 void __qdf_nbuf_mod_init(void);
832 void __qdf_nbuf_mod_exit(void);
833 
834 #else
835 
836 static inline int __qdf_nbuf_count_get(void)
837 {
838 	return 0;
839 }
840 
841 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
842 {
843 	return;
844 }
845 
846 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
847 {
848 	return;
849 }
850 
851 static inline void __qdf_nbuf_mod_init(void)
852 {
853 	return;
854 }
855 
856 static inline void __qdf_nbuf_mod_exit(void)
857 {
858 	return;
859 }
860 #endif
861 
862 /**
863  * __qdf_to_status() - OS to QDF status conversion
864  * @error : OS error
865  *
866  * Return: QDF status
867  */
868 static inline QDF_STATUS __qdf_to_status(signed int error)
869 {
870 	switch (error) {
871 	case 0:
872 		return QDF_STATUS_SUCCESS;
873 	case ENOMEM:
874 	case -ENOMEM:
875 		return QDF_STATUS_E_NOMEM;
876 	default:
877 		return QDF_STATUS_E_NOSUPPORT;
878 	}
879 }
880 
881 /**
882  * __qdf_nbuf_len() - return the amount of valid data in the skb
883  * @skb: Pointer to network buffer
884  *
885  * This API returns the amount of valid data in the skb, If there are frags
886  * then it returns total length.
887  *
888  * Return: network buffer length
889  */
890 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
891 {
892 	int i, extra_frag_len = 0;
893 
894 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
895 	if (i > 0)
896 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
897 
898 	return extra_frag_len + skb->len;
899 }
900 
901 /**
902  * __qdf_nbuf_cat() - link two nbufs
903  * @dst: Buffer to piggyback into
904  * @src: Buffer to put
905  *
906  * Concat two nbufs, the new buf(src) is piggybacked into the older one.
907  * It is callers responsibility to free the src skb.
908  *
909  * Return: QDF_STATUS (status of the call) if failed the src skb
910  *         is released
911  */
912 static inline QDF_STATUS
913 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
914 {
915 	QDF_STATUS error = 0;
916 
917 	qdf_assert(dst && src);
918 
919 	/*
920 	 * Since pskb_expand_head unconditionally reallocates the skb->head
921 	 * buffer, first check whether the current buffer is already large
922 	 * enough.
923 	 */
924 	if (skb_tailroom(dst) < src->len) {
925 		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
926 		if (error)
927 			return __qdf_to_status(error);
928 	}
929 
930 	memcpy(skb_tail_pointer(dst), src->data, src->len);
931 	skb_put(dst, src->len);
932 	return __qdf_to_status(error);
933 }
934 
935 /*
936  * nbuf manipulation routines
937  */
938 /**
939  * __qdf_nbuf_headroom() - return the amount of tail space available
940  * @buf: Pointer to network buffer
941  *
942  * Return: amount of tail room
943  */
944 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
945 {
946 	return skb_headroom(skb);
947 }
948 
949 /**
950  * __qdf_nbuf_tailroom() - return the amount of tail space available
951  * @buf: Pointer to network buffer
952  *
953  * Return: amount of tail room
954  */
955 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
956 {
957 	return skb_tailroom(skb);
958 }
959 
960 /**
961  * __qdf_nbuf_put_tail() - Puts data in the end
962  * @skb: Pointer to network buffer
963  * @size: size to be pushed
964  *
965  * Return: data pointer of this buf where new data has to be
966  *         put, or NULL if there is not enough room in this buf.
967  */
968 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
969 {
970 	if (skb_tailroom(skb) < size) {
971 		if (unlikely(pskb_expand_head(skb, 0,
972 			size - skb_tailroom(skb), GFP_ATOMIC))) {
973 			dev_kfree_skb_any(skb);
974 			return NULL;
975 		}
976 	}
977 	return skb_put(skb, size);
978 }
979 
980 /**
981  * __qdf_nbuf_trim_tail() - trim data out from the end
982  * @skb: Pointer to network buffer
983  * @size: size to be popped
984  *
985  * Return: none
986  */
987 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
988 {
989 	return skb_trim(skb, skb->len - size);
990 }
991 
992 
993 /*
994  * prototypes. Implemented in qdf_nbuf.c
995  */
996 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
997 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
998 				qdf_nbuf_rx_cksum_t *cksum);
999 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1000 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1001 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1002 void __qdf_nbuf_ref(struct sk_buff *skb);
1003 int __qdf_nbuf_shared(struct sk_buff *skb);
1004 
1005 /*
1006  * qdf_nbuf_pool_delete() implementation - do nothing in linux
1007  */
1008 #define __qdf_nbuf_pool_delete(osdev)
1009 
1010 /**
1011  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
1012  * @skb: Pointer to network buffer
1013  *
1014  * if GFP_ATOMIC is overkill then we can check whether its
1015  * called from interrupt context and then do it or else in
1016  * normal case use GFP_KERNEL
1017  *
1018  * example     use "in_irq() || irqs_disabled()"
1019  *
1020  * Return: cloned skb
1021  */
1022 static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
1023 {
1024 	struct sk_buff *skb_new = NULL;
1025 
1026 	skb_new = skb_clone(skb, GFP_ATOMIC);
1027 	if (skb_new)
1028 		__qdf_nbuf_count_inc(skb_new);
1029 
1030 	return skb_new;
1031 }
1032 
1033 /**
1034  * __qdf_nbuf_copy() - returns a private copy of the skb
1035  * @skb: Pointer to network buffer
1036  *
1037  * This API returns a private copy of the skb, the skb returned is completely
1038  *  modifiable by callers
1039  *
1040  * Return: skb or NULL
1041  */
1042 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1043 {
1044 	struct sk_buff *skb_new = NULL;
1045 
1046 	skb_new = skb_copy(skb, GFP_ATOMIC);
1047 	if (skb_new)
1048 		__qdf_nbuf_count_inc(skb_new);
1049 
1050 	return skb_new;
1051 }
1052 
1053 #define __qdf_nbuf_reserve      skb_reserve
1054 
1055 /**
1056  * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1057  * @skb: Pointer to network buffer
1058  * @data: data pointer
1059  *
1060  * Return: none
1061  */
1062 static inline void
1063 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1064 {
1065 	skb->data = data;
1066 }
1067 
1068 /**
1069  * __qdf_nbuf_set_len() - set buffer data length
1070  * @skb: Pointer to network buffer
1071  * @len: data length
1072  *
1073  * Return: none
1074  */
1075 static inline void
1076 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1077 {
1078 	skb->len = len;
1079 }
1080 
1081 /**
1082  * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1083  * @skb: Pointer to network buffer
1084  * @len: skb data length
1085  *
1086  * Return: none
1087  */
1088 static inline void
1089 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1090 {
1091 	skb_set_tail_pointer(skb, len);
1092 }
1093 
1094 /**
1095  * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1096  * @skb: Pointer to network buffer
1097  * @list: list to use
1098  *
1099  * This is a lockless version, driver must acquire locks if it
1100  * needs to synchronize
1101  *
1102  * Return: none
1103  */
1104 static inline void
1105 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1106 {
1107 	__skb_unlink(skb, list);
1108 }
1109 
1110 /**
1111  * __qdf_nbuf_reset() - reset the buffer data and pointer
1112  * @buf: Network buf instance
1113  * @reserve: reserve
1114  * @align: align
1115  *
1116  * Return: none
1117  */
1118 static inline void
1119 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
1120 {
1121 	int offset;
1122 
1123 	skb_push(skb, skb_headroom(skb));
1124 	skb_put(skb, skb_tailroom(skb));
1125 	memset(skb->data, 0x0, skb->len);
1126 	skb_trim(skb, 0);
1127 	skb_reserve(skb, NET_SKB_PAD);
1128 	memset(skb->cb, 0x0, sizeof(skb->cb));
1129 
1130 	/*
1131 	 * The default is for netbuf fragments to be interpreted
1132 	 * as wordstreams rather than bytestreams.
1133 	 */
1134 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
1135 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
1136 
1137 	/*
1138 	 * Align & make sure that the tail & data are adjusted properly
1139 	 */
1140 
1141 	if (align) {
1142 		offset = ((unsigned long)skb->data) % align;
1143 		if (offset)
1144 			skb_reserve(skb, align - offset);
1145 	}
1146 
1147 	skb_reserve(skb, reserve);
1148 }
1149 
1150 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1151 /**
1152  * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
1153  *                                       in kernel
1154  *
1155  * Return: true if dev_scratch is supported
1156  *         false if dev_scratch is not supported
1157  */
1158 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1159 {
1160 	return true;
1161 }
1162 
1163 /**
1164  * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1165  * @skb: Pointer to network buffer
1166  *
1167  * Return: dev_scratch if dev_scratch supported
1168  *         0 if dev_scratch not supported
1169  */
1170 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1171 {
1172 	return skb->dev_scratch;
1173 }
1174 
1175 /**
1176  * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1177  * @skb: Pointer to network buffer
1178  * @value: value to be set in dev_scratch of network buffer
1179  *
1180  * Return: void
1181  */
1182 static inline void
1183 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1184 {
1185 	skb->dev_scratch = value;
1186 }
1187 #else
1188 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1189 {
1190 	return false;
1191 }
1192 
1193 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1194 {
1195 	return 0;
1196 }
1197 
1198 static inline void
1199 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1200 {
1201 }
1202 #endif /* KERNEL_VERSION(4, 14, 0) */
1203 
1204 /**
1205  * __qdf_nbuf_head() - return the pointer the skb's head pointer
1206  * @skb: Pointer to network buffer
1207  *
1208  * Return: Pointer to head buffer
1209  */
1210 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1211 {
1212 	return skb->head;
1213 }
1214 
1215 /**
1216  * __qdf_nbuf_data() - return the pointer to data header in the skb
1217  * @skb: Pointer to network buffer
1218  *
1219  * Return: Pointer to skb data
1220  */
1221 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1222 {
1223 	return skb->data;
1224 }
1225 
1226 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1227 {
1228 	return (uint8_t *)&skb->data;
1229 }
1230 
1231 /**
1232  * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1233  * @skb: Pointer to network buffer
1234  *
1235  * Return: skb protocol
1236  */
1237 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1238 {
1239 	return skb->protocol;
1240 }
1241 
1242 /**
1243  * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1244  * @skb: Pointer to network buffer
1245  *
1246  * Return: skb ip_summed
1247  */
1248 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1249 {
1250 	return skb->ip_summed;
1251 }
1252 
1253 /**
1254  * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1255  * @skb: Pointer to network buffer
1256  * @ip_summed: ip checksum
1257  *
1258  * Return: none
1259  */
1260 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1261 		 uint8_t ip_summed)
1262 {
1263 	skb->ip_summed = ip_summed;
1264 }
1265 
1266 /**
1267  * __qdf_nbuf_get_priority() - return the priority value of the skb
1268  * @skb: Pointer to network buffer
1269  *
1270  * Return: skb priority
1271  */
1272 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1273 {
1274 	return skb->priority;
1275 }
1276 
1277 /**
1278  * __qdf_nbuf_set_priority() - sets the priority value of the skb
1279  * @skb: Pointer to network buffer
1280  * @p: priority
1281  *
1282  * Return: none
1283  */
1284 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1285 {
1286 	skb->priority = p;
1287 }
1288 
1289 /**
1290  * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1291  * @skb: Current skb
1292  * @next_skb: Next skb
1293  *
1294  * Return: void
1295  */
1296 static inline void
1297 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1298 {
1299 	skb->next = skb_next;
1300 }
1301 
1302 /**
1303  * __qdf_nbuf_next() - return the next skb pointer of the current skb
1304  * @skb: Current skb
1305  *
1306  * Return: the next skb pointed to by the current skb
1307  */
1308 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1309 {
1310 	return skb->next;
1311 }
1312 
1313 /**
1314  * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1315  * @skb: Current skb
1316  * @next_skb: Next skb
1317  *
1318  * This fn is used to link up extensions to the head skb. Does not handle
1319  * linking to the head
1320  *
1321  * Return: none
1322  */
1323 static inline void
1324 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1325 {
1326 	skb->next = skb_next;
1327 }
1328 
1329 /**
1330  * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1331  * @skb: Current skb
1332  *
1333  * Return: the next skb pointed to by the current skb
1334  */
1335 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1336 {
1337 	return skb->next;
1338 }
1339 
1340 /**
1341  * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1342  * @skb_head: head_buf nbuf holding head segment (single)
1343  * @ext_list: nbuf list holding linked extensions to the head
1344  * @ext_len: Total length of all buffers in the extension list
1345  *
1346  * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
1347  * to the nbuf holding the head segment (seg0)
1348  *
1349  * Return: none
1350  */
1351 static inline void
1352 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1353 			struct sk_buff *ext_list, size_t ext_len)
1354 {
1355 	skb_shinfo(skb_head)->frag_list = ext_list;
1356 	skb_head->data_len = ext_len;
1357 	skb_head->len += skb_head->data_len;
1358 }
1359 
1360 /**
1361  * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1362  * @head_buf: Network buf holding head segment (single)
1363  *
1364  * This ext_list is populated when we have Jumbo packet, for example in case of
1365  * monitor mode amsdu packet reception, and are stiched using frags_list.
1366  *
1367  * Return: Network buf list holding linked extensions from head buf.
1368  */
1369 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1370 {
1371 	return (skb_shinfo(head_buf)->frag_list);
1372 }
1373 
1374 /**
1375  * __qdf_nbuf_get_age() - return the checksum value of the skb
1376  * @skb: Pointer to network buffer
1377  *
1378  * Return: checksum value
1379  */
1380 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1381 {
1382 	return skb->csum;
1383 }
1384 
1385 /**
1386  * __qdf_nbuf_set_age() - sets the checksum value of the skb
1387  * @skb: Pointer to network buffer
1388  * @v: Value
1389  *
1390  * Return: none
1391  */
1392 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1393 {
1394 	skb->csum = v;
1395 }
1396 
1397 /**
1398  * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1399  * @skb: Pointer to network buffer
1400  * @adj: Adjustment value
1401  *
1402  * Return: none
1403  */
1404 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1405 {
1406 	skb->csum -= adj;
1407 }
1408 
1409 /**
1410  * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1411  * @skb: Pointer to network buffer
1412  * @offset: Offset value
1413  * @len: Length
1414  * @to: Destination pointer
1415  *
1416  * Return: length of the copy bits for skb
1417  */
1418 static inline int32_t
1419 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1420 {
1421 	return skb_copy_bits(skb, offset, to, len);
1422 }
1423 
1424 /**
1425  * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1426  * @skb: Pointer to network buffer
1427  * @len:  Packet length
1428  *
1429  * Return: none
1430  */
1431 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1432 {
1433 	if (skb->len > len) {
1434 		skb_trim(skb, len);
1435 	} else {
1436 		if (skb_tailroom(skb) < len - skb->len) {
1437 			if (unlikely(pskb_expand_head(skb, 0,
1438 				len - skb->len - skb_tailroom(skb),
1439 				GFP_ATOMIC))) {
1440 				dev_kfree_skb_any(skb);
1441 				qdf_assert(0);
1442 			}
1443 		}
1444 		skb_put(skb, (len - skb->len));
1445 	}
1446 }
1447 
1448 /**
1449  * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1450  * @skb: Pointer to network buffer
1451  * @protocol: Protocol type
1452  *
1453  * Return: none
1454  */
1455 static inline void
1456 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1457 {
1458 	skb->protocol = protocol;
1459 }
1460 
1461 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1462 	(QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1463 
1464 #define __qdf_nbuf_get_tx_htt2_frm(skb)	\
1465 	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1466 
1467 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1468 				      uint32_t *lo, uint32_t *hi);
1469 
1470 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1471 	struct qdf_tso_info_t *tso_info);
1472 
1473 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1474 			  struct qdf_tso_seg_elem_t *tso_seg,
1475 			  bool is_last_seg);
1476 
1477 #ifdef FEATURE_TSO
1478 /**
1479  * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1480  *                                    payload len
1481  * @skb: buffer
1482  *
1483  * Return: size
1484  */
1485 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1486 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
1487 
1488 #else
1489 static inline
1490 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
1491 {
1492 	return 0;
1493 }
1494 
1495 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
1496 {
1497 	return 0;
1498 }
1499 
1500 #endif /* FEATURE_TSO */
1501 
1502 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
1503 {
1504 	if (skb_is_gso(skb) &&
1505 		(skb_is_gso_v6(skb) ||
1506 		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
1507 		return true;
1508 	else
1509 		return false;
1510 }
1511 
1512 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
1513 
1514 int __qdf_nbuf_get_users(struct sk_buff *skb);
1515 
1516 /**
1517  * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
1518  *			      and get hw_classify by peeking
1519  *			      into packet
1520  * @nbuf:		Network buffer (skb on Linux)
1521  * @pkt_type:		Pkt type (from enum htt_pkt_type)
1522  * @pkt_subtype:	Bit 4 of this field in HTT descriptor
1523  *			needs to be set in case of CE classification support
1524  *			Is set by this macro.
1525  * @hw_classify:	This is a flag which is set to indicate
1526  *			CE classification is enabled.
1527  *			Do not set this bit for VLAN packets
1528  *			OR for mcast / bcast frames.
1529  *
1530  * This macro parses the payload to figure out relevant Tx meta-data e.g.
1531  * whether to enable tx_classify bit in CE.
1532  *
1533  * Overrides pkt_type only if required for 802.3 frames (original ethernet)
1534  * If protocol is less than ETH_P_802_3_MIN (0x600), then
1535  * it is the length and a 802.3 frame else it is Ethernet Type II
1536  * (RFC 894).
1537  * Bit 4 in pkt_subtype is the tx_classify bit
1538  *
1539  * Return:	void
1540  */
1541 #define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
1542 				pkt_subtype, hw_classify)	\
1543 do {								\
1544 	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
1545 	uint16_t ether_type = ntohs(eh->h_proto);		\
1546 	bool is_mc_bc;						\
1547 								\
1548 	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
1549 		   is_multicast_ether_addr((uint8_t *)eh);	\
1550 								\
1551 	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
1552 		hw_classify = 1;				\
1553 		pkt_subtype = 0x01 <<				\
1554 			HTT_TX_CLASSIFY_BIT_S;			\
1555 	}							\
1556 								\
1557 	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
1558 		pkt_type = htt_pkt_type_ethernet;		\
1559 								\
1560 } while (0)
1561 
1562 /**
1563  * nbuf private buffer routines
1564  */
1565 
1566 /**
1567  * __qdf_nbuf_peek_header() - return the header's addr & m_len
1568  * @skb: Pointer to network buffer
1569  * @addr: Pointer to store header's addr
1570  * @m_len: network buffer length
1571  *
1572  * Return: none
1573  */
1574 static inline void
1575 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
1576 {
1577 	*addr = skb->data;
1578 	*len = skb->len;
1579 }
1580 
1581 /**
1582  * typedef struct __qdf_nbuf_queue_t -  network buffer queue
1583  * @head: Head pointer
1584  * @tail: Tail pointer
1585  * @qlen: Queue length
1586  */
1587 typedef struct __qdf_nbuf_qhead {
1588 	struct sk_buff *head;
1589 	struct sk_buff *tail;
1590 	unsigned int qlen;
1591 } __qdf_nbuf_queue_t;
1592 
1593 /******************Functions *************/
1594 
1595 /**
1596  * __qdf_nbuf_queue_init() - initiallize the queue head
1597  * @qhead: Queue head
1598  *
1599  * Return: QDF status
1600  */
1601 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
1602 {
1603 	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
1604 	return QDF_STATUS_SUCCESS;
1605 }
1606 
1607 /**
1608  * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
1609  * @qhead: Queue head
1610  * @skb: Pointer to network buffer
1611  *
1612  * This is a lockless version, driver must acquire locks if it
1613  * needs to synchronize
1614  *
1615  * Return: none
1616  */
1617 static inline void
1618 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
1619 {
1620 	skb->next = NULL;       /*Nullify the next ptr */
1621 
1622 	if (!qhead->head)
1623 		qhead->head = skb;
1624 	else
1625 		qhead->tail->next = skb;
1626 
1627 	qhead->tail = skb;
1628 	qhead->qlen++;
1629 }
1630 
1631 /**
1632  * __qdf_nbuf_queue_append() - Append src list at the end of dest list
1633  * @dest: target netbuf queue
1634  * @src:  source netbuf queue
1635  *
1636  * Return: target netbuf queue
1637  */
1638 static inline __qdf_nbuf_queue_t *
1639 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
1640 {
1641 	if (!dest)
1642 		return NULL;
1643 	else if (!src || !(src->head))
1644 		return dest;
1645 
1646 	if (!(dest->head))
1647 		dest->head = src->head;
1648 	else
1649 		dest->tail->next = src->head;
1650 
1651 	dest->tail = src->tail;
1652 	dest->qlen += src->qlen;
1653 	return dest;
1654 }
1655 
1656 /**
1657  * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
1658  * @qhead: Queue head
1659  * @skb: Pointer to network buffer
1660  *
1661  * This is a lockless version, driver must acquire locks if it needs to
1662  * synchronize
1663  *
1664  * Return: none
1665  */
1666 static inline void
1667 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
1668 {
1669 	if (!qhead->head) {
1670 		/*Empty queue Tail pointer Must be updated */
1671 		qhead->tail = skb;
1672 	}
1673 	skb->next = qhead->head;
1674 	qhead->head = skb;
1675 	qhead->qlen++;
1676 }
1677 
1678 /**
1679  * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
1680  * @qhead: Queue head
1681  *
1682  * This is a lockless version. Driver should take care of the locks
1683  *
1684  * Return: skb or NULL
1685  */
1686 static inline
1687 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
1688 {
1689 	__qdf_nbuf_t tmp = NULL;
1690 
1691 	if (qhead->head) {
1692 		qhead->qlen--;
1693 		tmp = qhead->head;
1694 		if (qhead->head == qhead->tail) {
1695 			qhead->head = NULL;
1696 			qhead->tail = NULL;
1697 		} else {
1698 			qhead->head = tmp->next;
1699 		}
1700 		tmp->next = NULL;
1701 	}
1702 	return tmp;
1703 }
1704 
1705 /**
1706  * __qdf_nbuf_queue_free() - free a queue
1707  * @qhead: head of queue
1708  *
1709  * Return: QDF status
1710  */
1711 static inline QDF_STATUS
1712 __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
1713 {
1714 	__qdf_nbuf_t  buf = NULL;
1715 
1716 	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
1717 		__qdf_nbuf_free(buf);
1718 	return QDF_STATUS_SUCCESS;
1719 }
1720 
1721 
1722 /**
1723  * __qdf_nbuf_queue_first() - returns the first skb in the queue
1724  * @qhead: head of queue
1725  *
1726  * Return: NULL if the queue is empty
1727  */
1728 static inline struct sk_buff *
1729 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
1730 {
1731 	return qhead->head;
1732 }
1733 
1734 /**
1735  * __qdf_nbuf_queue_last() - returns the last skb in the queue
1736  * @qhead: head of queue
1737  *
1738  * Return: NULL if the queue is empty
1739  */
1740 static inline struct sk_buff *
1741 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
1742 {
1743 	return qhead->tail;
1744 }
1745 
1746 /**
1747  * __qdf_nbuf_queue_len() - return the queue length
1748  * @qhead: Queue head
1749  *
1750  * Return: Queue length
1751  */
1752 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
1753 {
1754 	return qhead->qlen;
1755 }
1756 
1757 /**
1758  * __qdf_nbuf_queue_next() - return the next skb from packet chain
1759  * @skb: Pointer to network buffer
1760  *
1761  * This API returns the next skb from packet chain, remember the skb is
1762  * still in the queue
1763  *
1764  * Return: NULL if no packets are there
1765  */
1766 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
1767 {
1768 	return skb->next;
1769 }
1770 
1771 /**
1772  * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
1773  * @qhead: Queue head
1774  *
1775  * Return: true if length is 0 else false
1776  */
1777 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
1778 {
1779 	return qhead->qlen == 0;
1780 }
1781 
1782 /*
1783  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1784  * Because the queue head will most likely put in some structure,
1785  * we don't use pointer type as the definition.
1786  */
1787 
1788 /*
1789  * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
1790  * Because the queue head will most likely put in some structure,
1791  * we don't use pointer type as the definition.
1792  */
1793 
1794 static inline void
1795 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
1796 {
1797 }
1798 
1799 /**
1800  * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
1801  *        expands the headroom
1802  *        in the data region. In case of failure the skb is released.
1803  * @skb: sk buff
1804  * @headroom: size of headroom
1805  *
1806  * Return: skb or NULL
1807  */
1808 static inline struct sk_buff *
1809 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
1810 {
1811 	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1812 		dev_kfree_skb_any(skb);
1813 		skb = NULL;
1814 	}
1815 	return skb;
1816 }
1817 
1818 /**
1819  * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
1820  *        exapnds the tailroom
1821  *        in data region. In case of failure it releases the skb.
1822  * @skb: sk buff
1823  * @tailroom: size of tailroom
1824  *
1825  * Return: skb or NULL
1826  */
1827 static inline struct sk_buff *
1828 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
1829 {
1830 	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
1831 		return skb;
1832 	/**
1833 	 * unlikely path
1834 	 */
1835 	dev_kfree_skb_any(skb);
1836 	return NULL;
1837 }
1838 
1839 /**
1840  * __qdf_nbuf_linearize() - skb linearize
1841  * @skb: sk buff
1842  *
1843  * create a version of the specified nbuf whose contents
1844  * can be safely modified without affecting other
1845  * users.If the nbuf is non-linear then this function
1846  * linearize. if unable to linearize returns -ENOMEM on
1847  * success 0 is returned
1848  *
1849  * Return: 0 on Success, -ENOMEM on failure is returned.
1850  */
1851 static inline int
1852 __qdf_nbuf_linearize(struct sk_buff *skb)
1853 {
1854 	return skb_linearize(skb);
1855 }
1856 
1857 /**
1858  * __qdf_nbuf_unshare() - skb unshare
1859  * @skb: sk buff
1860  *
1861  * create a version of the specified nbuf whose contents
1862  * can be safely modified without affecting other
1863  * users.If the nbuf is a clone then this function
1864  * creates a new copy of the data. If the buffer is not
1865  * a clone the original buffer is returned.
1866  *
1867  * Return: skb or NULL
1868  */
1869 static inline struct sk_buff *
1870 __qdf_nbuf_unshare(struct sk_buff *skb)
1871 {
1872 	return skb_unshare(skb, GFP_ATOMIC);
1873 }
1874 
1875 /**
1876  * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
1877  *@buf: sk buff
1878  *
1879  * Return: true/false
1880  */
1881 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
1882 {
1883 	return skb_cloned(skb);
1884 }
1885 
1886 /**
1887  * __qdf_nbuf_pool_init() - init pool
1888  * @net: net handle
1889  *
1890  * Return: QDF status
1891  */
1892 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
1893 {
1894 	return QDF_STATUS_SUCCESS;
1895 }
1896 
1897 /*
1898  * adf_nbuf_pool_delete() implementation - do nothing in linux
1899  */
1900 #define __qdf_nbuf_pool_delete(osdev)
1901 
1902 /**
1903  * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
1904  *        release the skb.
1905  * @skb: sk buff
1906  * @headroom: size of headroom
1907  * @tailroom: size of tailroom
1908  *
1909  * Return: skb or NULL
1910  */
1911 static inline struct sk_buff *
1912 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
1913 {
1914 	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
1915 		return skb;
1916 
1917 	dev_kfree_skb_any(skb);
1918 	return NULL;
1919 }
1920 
1921 /**
1922  * __qdf_nbuf_copy_expand() - copy and expand nbuf
1923  * @buf: Network buf instance
1924  * @headroom: Additional headroom to be added
1925  * @tailroom: Additional tailroom to be added
1926  *
1927  * Return: New nbuf that is a copy of buf, with additional head and tailroom
1928  *	or NULL if there is no memory
1929  */
1930 static inline struct sk_buff *
1931 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
1932 {
1933 	return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
1934 }
1935 
1936 /**
1937  * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
1938  * @buf: Network buf instance
1939  *
1940  * Return: void
1941  */
1942 static inline void
1943 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
1944 {
1945 	struct sk_buff *list;
1946 
1947 	skb_walk_frags(buf, list)
1948 		skb_get(list);
1949 }
1950 
1951 /**
1952  * __qdf_nbuf_tx_cksum_info() - tx checksum info
1953  *
1954  * Return: true/false
1955  */
1956 static inline bool
1957 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
1958 			 uint8_t **where)
1959 {
1960 	qdf_assert(0);
1961 	return false;
1962 }
1963 
1964 /**
1965  * __qdf_nbuf_reset_ctxt() - mem zero control block
1966  * @nbuf: buffer
1967  *
1968  * Return: none
1969  */
1970 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
1971 {
1972 	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
1973 }
1974 
1975 /**
1976  * __qdf_nbuf_network_header() - get network header
1977  * @buf: buffer
1978  *
1979  * Return: network header pointer
1980  */
1981 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
1982 {
1983 	return skb_network_header(buf);
1984 }
1985 
1986 /**
1987  * __qdf_nbuf_transport_header() - get transport header
1988  * @buf: buffer
1989  *
1990  * Return: transport header pointer
1991  */
1992 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
1993 {
1994 	return skb_transport_header(buf);
1995 }
1996 
1997 /**
1998  *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
1999  *  passed as part of network buffer by network stack
2000  * @skb: sk buff
2001  *
2002  * Return: TCP MSS size
2003  *
2004  */
2005 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2006 {
2007 	return skb_shinfo(skb)->gso_size;
2008 }
2009 
2010 /**
2011  * __qdf_nbuf_init() - Re-initializes the skb for re-use
2012  * @nbuf: sk buff
2013  *
2014  * Return: none
2015  */
2016 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2017 
2018 /*
2019  *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2020  * @nbuf: sk buff
2021  *
2022  * Return: void ptr
2023  */
2024 static inline void *
2025 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2026 {
2027 	return (void *)nbuf->cb;
2028 }
2029 
2030 /**
2031  * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2032  * @skb: sk buff
2033  *
2034  * Return: head size
2035  */
2036 static inline size_t
2037 __qdf_nbuf_headlen(struct sk_buff *skb)
2038 {
2039 	return skb_headlen(skb);
2040 }
2041 
2042 /**
2043  * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
2044  * @skb: sk buff
2045  *
2046  * Return: number of fragments
2047  */
2048 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
2049 {
2050 	return skb_shinfo(skb)->nr_frags;
2051 }
2052 
2053 /**
2054  * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2055  * @buf: sk buff
2056  *
2057  * Return: true/false
2058  */
2059 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2060 {
2061 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2062 }
2063 
2064 /**
2065  * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2066  * @buf: sk buff
2067  *
2068  * Return: true/false
2069  */
2070 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2071 {
2072 	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2073 }
2074 
2075 /**
2076  * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2077  * @skb: sk buff
2078  *
2079  * Return: size of l2+l3+l4 header length
2080  */
2081 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2082 {
2083 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
2084 }
2085 
2086 /**
2087  * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2088  * @buf: sk buff
2089  *
2090  * Return:  true/false
2091  */
2092 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2093 {
2094 	if (skb_is_nonlinear(skb))
2095 		return true;
2096 	else
2097 		return false;
2098 }
2099 
2100 /**
2101  * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
2102  * @buf: sk buff
2103  *
2104  * Return: TCP sequence number
2105  */
2106 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2107 {
2108 	return ntohl(tcp_hdr(skb)->seq);
2109 }
2110 
2111 /**
2112  * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2113  *@buf: sk buff
2114  *
2115  * Return: data pointer to typecast into your priv structure
2116  */
2117 static inline uint8_t *
2118 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2119 {
2120 	return &skb->cb[8];
2121 }
2122 
2123 /**
2124  * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2125  * @buf: Pointer to nbuf
2126  *
2127  * Return: None
2128  */
2129 static inline void
2130 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2131 {
2132 	buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2133 }
2134 
2135 /**
2136  * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2137  *
2138  * @buf: sk buff
2139  * @queue_id: Queue id
2140  *
2141  * Return: void
2142  */
2143 static inline void
2144 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2145 {
2146 	skb_record_rx_queue(skb, queue_id);
2147 }
2148 
2149 /**
2150  * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2151  *
2152  * @buf: sk buff
2153  *
2154  * Return: Queue mapping
2155  */
2156 static inline uint16_t
2157 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2158 {
2159 	return skb->queue_mapping;
2160 }
2161 
2162 /**
2163  * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2164  *
2165  * @buf: sk buff
2166  *
2167  * Return: void
2168  */
2169 static inline void
2170 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2171 {
2172 	__net_timestamp(skb);
2173 }
2174 
2175 /**
2176  * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2177  *
2178  * @buf: sk buff
2179  *
2180  * Return: timestamp stored in skb in ms
2181  */
2182 static inline uint64_t
2183 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2184 {
2185 	return ktime_to_ms(skb_get_ktime(skb));
2186 }
2187 
2188 /**
2189  * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2190  *
2191  * @buf: sk buff
2192  *
2193  * Return: time difference in ms
2194  */
2195 static inline uint64_t
2196 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2197 {
2198 	return ktime_to_ms(net_timedelta(skb->tstamp));
2199 }
2200 
2201 /**
2202  * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2203  *
2204  * @buf: sk buff
2205  *
2206  * Return: time difference in micro seconds
2207  */
2208 static inline uint64_t
2209 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2210 {
2211 	return ktime_to_us(net_timedelta(skb->tstamp));
2212 }
2213 
2214 /**
2215  * __qdf_nbuf_orphan() - orphan a nbuf
2216  * @skb: sk buff
2217  *
2218  * If a buffer currently has an owner then we call the
2219  * owner's destructor function
2220  *
2221  * Return: void
2222  */
2223 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2224 {
2225 	return skb_orphan(skb);
2226 }
2227 
2228 /**
2229  * __qdf_nbuf_map_nbytes_single() - map nbytes
2230  * @osdev: os device
2231  * @buf: buffer
2232  * @dir: direction
2233  * @nbytes: number of bytes
2234  *
2235  * Return: QDF_STATUS
2236  */
2237 #ifdef A_SIMOS_DEVHOST
2238 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2239 		qdf_device_t osdev, struct sk_buff *buf,
2240 		qdf_dma_dir_t dir, int nbytes)
2241 {
2242 	qdf_dma_addr_t paddr;
2243 
2244 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
2245 	return QDF_STATUS_SUCCESS;
2246 }
2247 #else
2248 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
2249 		qdf_device_t osdev, struct sk_buff *buf,
2250 		qdf_dma_dir_t dir, int nbytes)
2251 {
2252 	qdf_dma_addr_t paddr;
2253 
2254 	/* assume that the OS only provides a single fragment */
2255 	QDF_NBUF_CB_PADDR(buf) = paddr =
2256 		dma_map_single(osdev->dev, buf->data,
2257 			       nbytes, __qdf_dma_dir_to_os(dir));
2258 	return dma_mapping_error(osdev->dev, paddr) ?
2259 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
2260 }
2261 #endif
2262 /**
2263  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
2264  * @osdev: os device
2265  * @buf: buffer
2266  * @dir: direction
2267  * @nbytes: number of bytes
2268  *
2269  * Return: none
2270  */
2271 #if defined(A_SIMOS_DEVHOST)
2272 static inline void
2273 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2274 			       qdf_dma_dir_t dir, int nbytes)
2275 {
2276 }
2277 
2278 #else
2279 static inline void
2280 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
2281 			       qdf_dma_dir_t dir, int nbytes)
2282 {
2283 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
2284 
2285 	if (qdf_likely(paddr)) {
2286 		dma_unmap_single(osdev->dev, paddr, nbytes,
2287 				 __qdf_dma_dir_to_os(dir));
2288 		return;
2289 	}
2290 }
2291 #endif
2292 
2293 static inline struct sk_buff *
2294 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2295 {
2296 	return skb_dequeue(skb_queue_head);
2297 }
2298 
2299 static inline
2300 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2301 {
2302 	return skb_queue_head->qlen;
2303 }
2304 
2305 static inline
2306 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2307 					struct sk_buff *skb)
2308 {
2309 	return skb_queue_tail(skb_queue_head, skb);
2310 }
2311 
2312 static inline
2313 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2314 {
2315 	return skb_queue_head_init(skb_queue_head);
2316 }
2317 
2318 static inline
2319 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2320 {
2321 	return skb_queue_purge(skb_queue_head);
2322 }
2323 
2324 /**
2325  * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2326  * @head: skb list for which lock is to be acquired
2327  *
2328  * Return: void
2329  */
2330 static inline
2331 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2332 {
2333 	spin_lock_bh(&skb_queue_head->lock);
2334 }
2335 
2336 /**
2337  * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2338  * @head: skb list for which lock is to be release
2339  *
2340  * Return: void
2341  */
2342 static inline
2343 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2344 {
2345 	spin_unlock_bh(&skb_queue_head->lock);
2346 }
2347 
2348 #ifdef CONFIG_NBUF_AP_PLATFORM
2349 #include <i_qdf_nbuf_w.h>
2350 #else
2351 #include <i_qdf_nbuf_m.h>
2352 #endif
2353 #endif /*_I_QDF_NET_BUF_H */
2354