xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_m.h (revision 06f9ae280111c1da9db0809f457076854959e02d)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf_m.h
22  *
23  * This file provides platform specific nbuf API's.
24  * Included by i_qdf_nbuf.h and should not be included
25  * directly from other files.
26  */
27 
28 #ifndef _I_QDF_NBUF_M_H
29 #define _I_QDF_NBUF_M_H
30 /**
31  * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
32  *                    - data passed between layers of the driver.
33  *
34  * Notes:
35  *   1. Hard limited to 48 bytes. Please count your bytes
36  *   2. The size of this structure has to be easily calculable and
37  *      consistently so: do not use any conditional compile flags
38  *   3. Split into a common part followed by a tx/rx overlay
39  *   4. There is only one extra frag, which represents the HTC/HTT header
40  *   5. "ext_cb_pt" must be the first member in both TX and RX unions
41  *      for the priv_cb_w since it must be at same offset for both
42  *      TX and RX union
43  *   6. "ipa.owned" bit must be first member in both TX and RX unions
44  *      for the priv_cb_m since it must be at same offset for both
45  *      TX and RX union.
46  *
47  * @paddr   : physical addressed retrieved by dma_map of nbuf->data
48  * @u: union of rx and tx data
49  * @u.rx: rx data
50  * @u.rx.dev: union of priv_cb_w and priv_cb_m
51  *
52  * @u.rx.dev.priv_cb_w:
53  * @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
54  * @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
55  * @u.rx.dev.priv_cb_w.msdu_len: length of RX packet
56  * @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
57  * @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
58  * @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet
59  * @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet
60  *                                   type
61  * @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
62  *
63  * @u.rx.dev.priv_cb_m:
64  * @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA
65  * @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
66  * @u.rx.dev.priv_cb_m.flush_ind: flush indication
67  * @u.rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
68  * @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
69  * @u.rx.dev.priv_cb_m.exc_frm: exception frame
70  * @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
71  * @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
72  *					     sw exception bit from ring desc
73  * @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
74  * @u.rx.dev.priv_cb_m.fr_ds: from DS bit in RX packet
75  * @u.rx.dev.priv_cb_m.to_ds: to DS bit in RX packet
76  * @u.rx.dev.priv_cb_m.logical_link_id: link id of RX packet
77  * @u.rx.dev.priv_cb_m.reserved1: reserved bits
78  * @u.rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
79  * @u.rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
80  * @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs
81  * @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data
82  * @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
83  * @u.rx.dev.priv_cb_m.dp.wifi3.peer_id:  peer_id for RX packet
84  * @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data
85  * @u.rx.dev.priv_cb_m.dp.wifi2.map_index:
86  * @u.rx.dev.priv_cb_m.lro_ctx: LRO context
87  *
88  * @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
89  * @u.rx.tcp_proto: L4 protocol is TCP
90  * @u.rx.tcp_pure_ack: A TCP ACK packet with no payload
91  * @u.rx.ipv6_proto: L3 protocol is IPV6
92  * @u.rx.ip_offset: offset to IP header
93  * @u.rx.tcp_offset: offset to TCP header
94  * @u.rx.rx_ctx_id: Rx context id
95  * @u.rx.fcs_err: FCS error
96  * @u.rx.is_raw_frame: RAW frame
97  * @u.rx.num_elements_in_list: number of elements in the nbuf list
98  *
99  * @u.rx.tcp_udp_chksum: L4 payload checksum
100  * @u.rx.tcp_win: TCP window size
101  *
102  * @u.rx.flow_id: 32bit flow id
103  *
104  * @u.rx.flag_chfrag_start: first MSDU in an AMSDU
105  * @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
106  * @u.rx.flag_chfrag_end: last MSDU in an AMSDU
107  * @u.rx.flag_retry: flag to indicate MSDU is retried
108  * @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
109  * @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet
110  * @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet
111  * @u.rx.flag_is_frag: flag to indicate skb has frag list
112  *
113  * @u.rx.trace: combined structure for DP and protocol trace
114  * @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
115  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
116  * @u.rx.trace.dp_trace: flag (Datapath trace)
117  * @u.rx.trace.packet_track: RX_DATA packet
118  * @u.rx.trace.rsrvd: enable packet logging
119  *
120  * @u.rx.vdev_id: vdev_id for RX pkt
121  * @u.rx.tid_val: tid value
122  * @u.rx.ftype: mcast2ucast, TSO, SG, MESH
123  *
124  * @u.tx: tx data
125  * @u.tx.dev: union of priv_cb_w and priv_cb_m
126  *
127  * @u.tx.dev.priv_cb_w:
128  * @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
129  * @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
130  *
131  * @u.tx.dev.priv_cb_m:
132  * @u.tx.dev.priv_cb_m:ipa: IPA-specific data
133  * @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA
134  * @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA
135  * @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes
136  *                 + (1) CE classification enablement bit
137  *                 + (2) packet type (802.3 or Ethernet type II)
138  *                 + (3) packet offset (usually length of HTC/HTT descr)
139  * @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
140  * @u.tx.dev.priv_cb_m.dma_option: DMA options
141  * @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx
142  *                                              completion cb
143  * @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction
144  *                                                   dma map
145  * @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future
146  *                                                     use
147  * @u.tx.dev.priv_cb_m.flag_notify_comp: reserved
148  * @u.tx.dev.priv_cb_m.rsvd: reserved
149  * @u.tx.dev.priv_cb_m.reserved: reserved
150  *
151  * @u.tx.ftype: mcast2ucast, TSO, SG, MESH
152  * @u.tx.vdev_id: vdev (for protocol trace)
153  * @u.tx.len: length of efrag pointed by the above pointers
154  *
155  * @u.tx.flags: union of flag representations
156  * @u.tx.flags.bits: flags represent as individual bitmasks
157  * @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
158  * @u.tx.flags.bits.num: number of extra frags ( 0 or 1)
159  * @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
160  * @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
161  * @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
162  * @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
163  * @u.tx.flags.bits.flag_ext_header: extended flags
164  * @u.tx.flags.bits.is_critical: flag indicating a critical frame
165  * @u.tx.flags.u8: flags as a single u8
166  * @u.tx.trace: combined structure for DP and protocol trace
167  * @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
168  *                       +          (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
169  * @u.tx.trace.is_packet_priv:
170  * @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
171  * @u.tx.trace.to_fw: Flag to indicate send this packet to FW
172  * @u.tx.trace.htt2_frm: flag (high-latency path only)
173  * @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
174  *                          + (MGMT_ACTION)] - 4 bits
175  * @u.tx.trace.dp_trace: flag (Datapath trace)
176  * @u.tx.trace.is_bcast: flag (Broadcast packet)
177  * @u.tx.trace.is_mcast: flag (Multicast packet)
178  * @u.tx.trace.packet_type: flag (Packet type)
179  * @u.tx.trace.print: enable packet logging
180  *
181  * @u.tx.vaddr: virtual address of ~
182  * @u.tx.paddr: physical/DMA address of ~
183  */
184 struct qdf_nbuf_cb {
185 	/* common */
186 	qdf_paddr_t paddr; /* of skb->data */
187 	/* valid only in one direction */
188 	union {
189 		/* Note: MAX: 40 bytes */
190 		struct {
191 			union {
192 				struct {
193 					void *ext_cb_ptr;
194 					void *fctx;
195 					uint16_t msdu_len : 14,
196 						 flag_intra_bss : 1,
197 						 ipa_smmu_map : 1;
198 					uint16_t peer_id;
199 					uint16_t protocol_tag;
200 					uint16_t flow_tag;
201 				} priv_cb_w;
202 				struct {
203 					/* ipa_owned bit is common between rx
204 					 * control block and tx control block.
205 					 * Do not change location of this bit.
206 					 */
207 					uint32_t ipa_owned:1,
208 						 peer_cached_buf_frm:1,
209 						 flush_ind:1,
210 						 packet_buf_pool:1,
211 						 l3_hdr_pad:3,
212 						 /* exception frame flag */
213 						 exc_frm:1,
214 						 ipa_smmu_map:1,
215 						 reo_dest_ind_or_sw_excpt:5,
216 						 lmac_id:2,
217 						 fr_ds:1,
218 						 to_ds:1,
219 						 logical_link_id:4,
220 						 band:3,
221 						 reserved1:7;
222 					uint32_t tcp_seq_num;
223 					uint32_t tcp_ack_num;
224 					union {
225 						struct {
226 							uint16_t msdu_len;
227 							uint16_t peer_id;
228 						} wifi3;
229 						struct {
230 							uint32_t map_index;
231 						} wifi2;
232 					} dp;
233 					unsigned char *lro_ctx;
234 				} priv_cb_m;
235 			} dev;
236 			uint32_t lro_eligible:1,
237 				tcp_proto:1,
238 				tcp_pure_ack:1,
239 				ipv6_proto:1,
240 				ip_offset:7,
241 				tcp_offset:7,
242 				rx_ctx_id:4,
243 				fcs_err:1,
244 				is_raw_frame:1,
245 				num_elements_in_list:8;
246 			uint32_t tcp_udp_chksum:16,
247 				 tcp_win:16;
248 			uint32_t flow_id;
249 			uint8_t flag_chfrag_start:1,
250 				flag_chfrag_cont:1,
251 				flag_chfrag_end:1,
252 				flag_retry:1,
253 				flag_da_mcbc:1,
254 				flag_da_valid:1,
255 				flag_sa_valid:1,
256 				flag_is_frag:1;
257 			union {
258 				uint8_t packet_state;
259 				uint8_t dp_trace:1,
260 					packet_track:3,
261 					rsrvd:4;
262 			} trace;
263 			uint16_t vdev_id:8,
264 				 tid_val:4,
265 				 ftype:4;
266 		} rx;
267 
268 		/* Note: MAX: 40 bytes */
269 		struct {
270 			union {
271 				struct {
272 					void *ext_cb_ptr;
273 					void *fctx;
274 				} priv_cb_w;
275 				struct {
276 					/* ipa_owned bit is common between rx
277 					 * control block and tx control block.
278 					 * Do not change location of this bit.
279 					 */
280 					struct {
281 						uint32_t owned:1,
282 							priv:31;
283 					} ipa;
284 					uint32_t data_attr;
285 					uint16_t desc_id;
286 					uint16_t mgmt_desc_id;
287 					struct {
288 						uint8_t bi_map:1,
289 							reserved:7;
290 					} dma_option;
291 					uint8_t flag_notify_comp:1,
292 						band:3,
293 						rsvd:4;
294 					uint8_t reserved[2];
295 				} priv_cb_m;
296 			} dev;
297 			uint8_t ftype;
298 			uint8_t vdev_id;
299 			uint16_t len;
300 			union {
301 				struct {
302 					uint8_t flag_efrag:1,
303 						flag_nbuf:1,
304 						num:1,
305 						flag_chfrag_start:1,
306 						flag_chfrag_cont:1,
307 						flag_chfrag_end:1,
308 						flag_ext_header:1,
309 						is_critical:1;
310 				} bits;
311 				uint8_t u8;
312 			} flags;
313 			struct {
314 				uint8_t packet_state:7,
315 					is_packet_priv:1;
316 				uint8_t packet_track:3,
317 					to_fw:1,
318 					htt2_frm:1,
319 					proto_type:3;
320 				uint8_t dp_trace:1,
321 					is_bcast:1,
322 					is_mcast:1,
323 					packet_type:4,
324 					print:1;
325 			} trace;
326 			unsigned char *vaddr;
327 			qdf_paddr_t paddr;
328 		} tx;
329 	} u;
330 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
331 
332 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
333 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
334 			(sizeof(struct qdf_nbuf_cb)) <=
335 			sizeof_field(struct sk_buff, cb));
336 #else
337 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
338 			(sizeof(struct qdf_nbuf_cb)) <=
339 			FIELD_SIZEOF(struct sk_buff, cb));
340 #endif
341 
342 /*
343  *  access macros to qdf_nbuf_cb
344  *  Note: These macros can be used as L-values as well as R-values.
345  *        When used as R-values, they effectively function as "get" macros
346  *        When used as L_values, they effectively function as "set" macros
347  */
348 
349 #define QDF_NBUF_CB_PADDR(skb) \
350 	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
351 
352 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
353 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
354 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
355 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
356 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
357 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
358 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
359 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
360 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
361 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
362 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
363 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
364 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
365 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
366 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
367 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
368 
369 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
370 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
371 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
372 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
373 
374 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
375 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
376 
377 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
378 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
379 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
380 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
381 
382 #define QDF_NBUF_CB_RX_FTYPE(skb) \
383 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
384 
385 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
386 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
387 
388 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
389 	(((struct qdf_nbuf_cb *) \
390 	((skb)->cb))->u.rx.flag_chfrag_start)
391 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
392 	(((struct qdf_nbuf_cb *) \
393 	((skb)->cb))->u.rx.flag_chfrag_cont)
394 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
395 		(((struct qdf_nbuf_cb *) \
396 		((skb)->cb))->u.rx.flag_chfrag_end)
397 
398 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
399 	(((struct qdf_nbuf_cb *) \
400 	((skb)->cb))->u.rx.flag_da_mcbc)
401 
402 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
403 	(((struct qdf_nbuf_cb *) \
404 	((skb)->cb))->u.rx.flag_da_valid)
405 
406 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
407 	(((struct qdf_nbuf_cb *) \
408 	((skb)->cb))->u.rx.flag_sa_valid)
409 
410 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
411 	(((struct qdf_nbuf_cb *) \
412 	((skb)->cb))->u.rx.flag_retry)
413 
414 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
415 	(((struct qdf_nbuf_cb *) \
416 	((skb)->cb))->u.rx.is_raw_frame)
417 
418 #define QDF_NBUF_CB_RX_FROM_DS(skb) \
419 	(((struct qdf_nbuf_cb *) \
420 	((skb)->cb))->u.rx.dev.priv_cb_m.fr_ds)
421 
422 #define QDF_NBUF_CB_RX_TO_DS(skb) \
423 	(((struct qdf_nbuf_cb *) \
424 	((skb)->cb))->u.rx.dev.priv_cb_m.to_ds)
425 
426 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
427 	(((struct qdf_nbuf_cb *) \
428 	((skb)->cb))->u.rx.tid_val)
429 
430 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
431 	(((struct qdf_nbuf_cb *) \
432 	((skb)->cb))->u.rx.flag_is_frag)
433 
434 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
435 	(((struct qdf_nbuf_cb *) \
436 	((skb)->cb))->u.rx.fcs_err)
437 
438 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
439 	qdf_nbuf_set_state(skb, PACKET_STATE)
440 
441 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
442 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
443 
444 #define QDF_NBUF_CB_TX_FTYPE(skb) \
445 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
446 
447 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
448 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
449 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
450 		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
451 
452 /* Tx Flags Accessor Macros*/
453 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
454 	(((struct qdf_nbuf_cb *) \
455 		((skb)->cb))->u.tx.flags.bits.flag_efrag)
456 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
457 	(((struct qdf_nbuf_cb *) \
458 		((skb)->cb))->u.tx.flags.bits.flag_nbuf)
459 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
460 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
461 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
462 	(((struct qdf_nbuf_cb *) \
463 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
464 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
465 	(((struct qdf_nbuf_cb *) \
466 	((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
467 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
468 		(((struct qdf_nbuf_cb *) \
469 		((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
470 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
471 		(((struct qdf_nbuf_cb *) \
472 		((skb)->cb))->u.tx.flags.bits.flag_ext_header)
473 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
474 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
475 
476 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
477 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
478 /* End of Tx Flags Accessor Macros */
479 
480 /* Tx trace accessor macros */
481 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
482 	(((struct qdf_nbuf_cb *) \
483 		((skb)->cb))->u.tx.trace.packet_state)
484 
485 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
486 	(((struct qdf_nbuf_cb *) \
487 		((skb)->cb))->u.tx.trace.is_packet_priv)
488 
489 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
490 	(((struct qdf_nbuf_cb *) \
491 		((skb)->cb))->u.tx.trace.packet_track)
492 
493 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
494 	(((struct qdf_nbuf_cb *) \
495 		((skb)->cb))->u.tx.trace.to_fw)
496 
497 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
498 		(((struct qdf_nbuf_cb *) \
499 			((skb)->cb))->u.rx.trace.packet_track)
500 
501 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
502 	(((struct qdf_nbuf_cb *) \
503 		((skb)->cb))->u.tx.trace.proto_type)
504 
505 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
506 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
507 
508 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb)	\
509 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
510 
511 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)	\
512 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
513 
514 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
515 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
516 
517 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
518 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
519 
520 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
521 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
522 
523 #define QDF_NBUF_CB_SET_BCAST(skb) \
524 	(((struct qdf_nbuf_cb *) \
525 		((skb)->cb))->u.tx.trace.is_bcast = true)
526 
527 #define QDF_NBUF_CB_SET_MCAST(skb) \
528 	(((struct qdf_nbuf_cb *) \
529 		((skb)->cb))->u.tx.trace.is_mcast = true)
530 /* End of Tx trace accessor macros */
531 
532 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
533 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
534 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
535 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
536 
537 /* assume the OS provides a single fragment */
538 #define __qdf_nbuf_get_num_frags(skb)		   \
539 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
540 
541 #define __qdf_nbuf_reset_num_frags(skb) \
542 	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
543 
544 #define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \
545 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_seq_num)
546 #define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \
547 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_ack_num)
548 #define QDF_NBUF_CB_RX_LRO_CTX(skb) \
549 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.lro_ctx)
550 
551 #define QDF_NBUF_CB_TX_IPA_OWNED(skb) \
552 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.owned)
553 #define QDF_NBUF_CB_TX_IPA_PRIV(skb) \
554 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.priv)
555 #define QDF_NBUF_CB_TX_DESC_ID(skb)\
556 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.desc_id)
557 #define QDF_NBUF_CB_MGMT_TXRX_DESC_ID(skb)\
558 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.mgmt_desc_id)
559 #define QDF_NBUF_CB_TX_DMA_BI_MAP(skb) \
560 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \
561 	dma_option.bi_map)
562 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
563 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \
564 	flag_notify_comp)
565 
566 #define QDF_NBUF_CB_TX_BAND(skb) \
567 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \
568 	band)
569 
570 #define QDF_NBUF_CB_RX_PEER_ID(skb) \
571 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \
572 	wifi3.peer_id)
573 
574 #define QDF_NBUF_CB_RX_PKT_LEN(skb) \
575 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \
576 	wifi3.msdu_len)
577 
578 #define QDF_NBUF_CB_RX_MAP_IDX(skb) \
579 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \
580 	wifi2.map_index)
581 
582 #define  QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
583 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
584 	 peer_cached_buf_frm)
585 
586 #define  QDF_NBUF_CB_RX_FLUSH_IND(skb) \
587 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.flush_ind)
588 
589 #define  QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
590 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
591 	 packet_buf_pool)
592 
593 #define  QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(skb) \
594 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
595 	 l3_hdr_pad)
596 
597 #define  QDF_NBUF_CB_RX_PACKET_EXC_FRAME(skb) \
598 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
599 	 exc_frm)
600 
601 #define  QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
602 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
603 	 ipa_smmu_map)
604 
605 #define  QDF_NBUF_CB_RX_PACKET_REO_DEST_IND_OR_SW_EXCPT(skb) \
606 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
607 	reo_dest_ind_or_sw_excpt)
608 
609 #define  QDF_NBUF_CB_RX_PACKET_LMAC_ID(skb) \
610 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
611 	lmac_id)
612 
613 #define QDF_NBUF_CB_RX_LOGICAL_LINK_ID(skb) \
614 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
615 	logical_link_id)
616 
617 #define QDF_NBUF_CB_RX_BAND(skb) \
618 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
619 	band)
620 
621 #define __qdf_nbuf_ipa_owned_get(skb) \
622 	QDF_NBUF_CB_TX_IPA_OWNED(skb)
623 
624 #define __qdf_nbuf_ipa_owned_set(skb) \
625 	(QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1)
626 
627 #define __qdf_nbuf_ipa_owned_clear(skb) \
628 	(QDF_NBUF_CB_TX_IPA_OWNED(skb) = 0)
629 
630 #define __qdf_nbuf_ipa_priv_get(skb)	\
631 	QDF_NBUF_CB_TX_IPA_PRIV(skb)
632 
633 #define __qdf_nbuf_ipa_priv_set(skb, priv) \
634 	(QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv))
635 
636 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
637 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
638 
639 #define __qdf_nbuf_data_attr_get(skb)		\
640 	QDF_NBUF_CB_TX_DATA_ATTR(skb)
641 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
642 	(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
643 
644 /**
645  * __qdf_nbuf_map_nbytes_single() - map nbytes
646  * @osdev: os device
647  * @buf: buffer
648  * @dir: direction
649  * @nbytes: number of bytes
650  *
651  * Return: QDF_STATUS
652  */
653 #ifdef A_SIMOS_DEVHOST
654 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
655 		qdf_device_t osdev, struct sk_buff *buf,
656 		qdf_dma_dir_t dir, int nbytes)
657 {
658 	qdf_dma_addr_t paddr;
659 
660 	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
661 	return QDF_STATUS_SUCCESS;
662 }
663 #else
664 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
665 		qdf_device_t osdev, struct sk_buff *buf,
666 		qdf_dma_dir_t dir, int nbytes)
667 {
668 	qdf_dma_addr_t paddr;
669 	QDF_STATUS ret;
670 
671 	/* assume that the OS only provides a single fragment */
672 	QDF_NBUF_CB_PADDR(buf) = paddr =
673 		dma_map_single(osdev->dev, buf->data,
674 			       nbytes, __qdf_dma_dir_to_os(dir));
675 	ret =  dma_mapping_error(osdev->dev, paddr) ?
676 		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
677 	if (QDF_IS_STATUS_SUCCESS(ret))
678 		__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
679 					 dir, true);
680 	return ret;
681 }
682 #endif
683 /**
684  * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
685  * @osdev: os device
686  * @buf: buffer
687  * @dir: direction
688  * @nbytes: number of bytes
689  *
690  * Return: none
691  */
692 #if defined(A_SIMOS_DEVHOST)
693 static inline void
694 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
695 			       qdf_dma_dir_t dir, int nbytes)
696 {
697 }
698 
699 #else
700 static inline void
701 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
702 			       qdf_dma_dir_t dir, int nbytes)
703 {
704 	qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
705 
706 	if (qdf_likely(paddr)) {
707 		__qdf_record_nbuf_nbytes(
708 			__qdf_nbuf_get_end_offset(buf), dir, false);
709 		dma_unmap_single(osdev->dev, paddr, nbytes,
710 				 __qdf_dma_dir_to_os(dir));
711 		return;
712 	}
713 }
714 #endif
715 
716 /**
717  * __qdf_nbuf_reset() - reset the buffer data and pointer
718  * @skb: Network buf instance
719  * @reserve: reserve
720  * @align: align
721  *
722  * Return: none
723  */
724 static inline void
725 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
726 {
727 	int offset;
728 
729 	skb_push(skb, skb_headroom(skb));
730 	skb_put(skb, skb_tailroom(skb));
731 	memset(skb->data, 0x0, skb->len);
732 	skb_trim(skb, 0);
733 	skb_reserve(skb, NET_SKB_PAD);
734 	memset(skb->cb, 0x0, sizeof(skb->cb));
735 
736 	/*
737 	 * The default is for netbuf fragments to be interpreted
738 	 * as wordstreams rather than bytestreams.
739 	 */
740 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
741 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
742 
743 	/*
744 	 * Align & make sure that the tail & data are adjusted properly
745 	 */
746 
747 	if (align) {
748 		offset = ((unsigned long)skb->data) % align;
749 		if (offset)
750 			skb_reserve(skb, align - offset);
751 	}
752 
753 	skb_reserve(skb, reserve);
754 }
755 
756 /**
757  * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb
758  * @skb: skb pointer whose cb is updated with vdev id information
759  * @vdev_id: vdev id to be updated in cb
760  *
761  * Return: void
762  */
763 static inline void
764 qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id)
765 {
766 	QDF_NBUF_CB_RX_VDEV_ID(skb) = vdev_id;
767 }
768 
769 /**
770  * __qdf_nbuf_init_replenish_timer() - Initialize the alloc replenish timer
771  *
772  * This function initializes the nbuf alloc fail replenish timer.
773  *
774  * Return: void
775  */
776 void __qdf_nbuf_init_replenish_timer(void);
777 
778 /**
779  * __qdf_nbuf_deinit_replenish_timer() - Deinitialize the alloc replenish timer
780  *
781  * This function deinitializes the nbuf alloc fail replenish timer.
782  *
783  * Return: void
784  */
785 void __qdf_nbuf_deinit_replenish_timer(void);
786 
787 /**
788  * __qdf_nbuf_len() - return the amount of valid data in the skb
789  * @skb: Pointer to network buffer
790  *
791  * This API returns the amount of valid data in the skb, If there are frags
792  * then it returns total length.
793  *
794  * Return: network buffer length
795  */
796 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
797 {
798 	int i, extra_frag_len = 0;
799 
800 	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
801 	if (i > 0)
802 		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
803 
804 	return extra_frag_len + skb->len;
805 }
806 
807 /**
808  * __qdf_nbuf_num_frags_init() - init extra frags
809  * @skb: sk buffer
810  *
811  * Return: none
812  */
813 static inline
814 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
815 {
816 	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
817 }
818 
819 /**
820  * __qdf_nbuf_push_head() - Push data in the front
821  * @skb: Pointer to network buffer
822  * @size: size to be pushed
823  *
824  * Return: New data pointer of this buf after data has been pushed,
825  *         or NULL if there is not enough room in this buf.
826  */
827 static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size)
828 {
829 	if (QDF_NBUF_CB_PADDR(skb))
830 		QDF_NBUF_CB_PADDR(skb) -= size;
831 
832 	return skb_push(skb, size);
833 }
834 
835 
836 /**
837  * __qdf_nbuf_pull_head() - pull data out from the front
838  * @skb: Pointer to network buffer
839  * @size: size to be popped
840  *
841  * Return: New data pointer of this buf after data has been popped,
842  *	   or NULL if there is not sufficient data to pull.
843  */
844 static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
845 {
846 	if (QDF_NBUF_CB_PADDR(skb))
847 		QDF_NBUF_CB_PADDR(skb) += size;
848 
849 	return skb_pull(skb, size);
850 }
851 
852 /**
853  * qdf_nbuf_is_intra_bss() - get intra bss bit
854  * @buf: Network buffer
855  *
856  * Return: integer value - 0/1
857  */
858 static inline int qdf_nbuf_is_intra_bss(struct sk_buff *buf)
859 {
860 	return 0;
861 }
862 
863 /**
864  * qdf_nbuf_set_intra_bss() - set intra bss bit
865  * @buf: Network buffer
866  * @val: 0/1
867  *
868  * Return: void
869  */
870 static inline void qdf_nbuf_set_intra_bss(struct sk_buff *buf, uint8_t val)
871 {
872 }
873 
874 /**
875  * qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
876  *
877  * This function initializes the nbuf alloc fail replenish timer.
878  *
879  * Return: void
880  */
881 static inline void
882 qdf_nbuf_init_replenish_timer(void)
883 {
884 	__qdf_nbuf_init_replenish_timer();
885 }
886 
887 /**
888  * qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
889  *
890  * This function deinitializes the nbuf alloc fail replenish timer.
891  *
892  * Return: void
893  */
894 static inline void
895 qdf_nbuf_deinit_replenish_timer(void)
896 {
897 	__qdf_nbuf_deinit_replenish_timer();
898 }
899 
900 static inline void
901 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end) {}
902 
903 static inline void
904 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end) {}
905 
906 static inline void
907 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end) {}
908 
909 static inline void
910 __qdf_dsb(void) {}
911 
912 static inline void
913 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end) {}
914 
915 #endif /*_I_QDF_NBUF_M_H */
916