xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf_w.h
22  *
23  * This file provides platform specific nbuf API's.
24  * Included by i_qdf_nbuf.h and should not be included
25  * directly from other files.
26  */
27 
28 #ifndef _I_QDF_NBUF_W_H
29 #define _I_QDF_NBUF_W_H
30 
31 /* ext_cb accessor macros and internal API's */
32 
33 #define QDF_NBUF_CB_EXT_CB(skb) \
34 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.ext_cb_ptr)
35 
36 #define __qdf_nbuf_set_ext_cb(skb, ref) \
37 	do { \
38 		QDF_NBUF_CB_EXT_CB((skb)) = (ref); \
39 	} while (0)
40 
41 #define __qdf_nbuf_get_ext_cb(skb) \
42 	QDF_NBUF_CB_EXT_CB((skb))
43 
44 /* fctx accessor macros and internal API's*/
45 
46 #define QDF_NBUF_CB_RX_FCTX(skb) \
47 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.fctx)
48 
49 #define QDF_NBUF_CB_TX_FCTX(skb) \
50 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.fctx)
51 
52 #define QDF_NBUF_CB_RX_PEER_ID(skb) \
53 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.peer_id)
54 
55 #define QDF_NBUF_CB_RX_PKT_LEN(skb) \
56 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.msdu_len)
57 
58 #define QDF_NBUF_CB_RX_INTRA_BSS(skb) \
59 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.flag_intra_bss)
60 
61 #define __qdf_nbuf_set_rx_fctx_type(skb, ctx, type) \
62 	do { \
63 		QDF_NBUF_CB_RX_FCTX((skb)) = (ctx); \
64 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
65 	} while (0)
66 
67 #define __qdf_nbuf_get_rx_fctx(skb) \
68 		 QDF_NBUF_CB_RX_FCTX((skb))
69 
70 #define __qdf_nbuf_set_intra_bss(skb, val) \
71 	((QDF_NBUF_CB_RX_INTRA_BSS((skb))) = val)
72 
73 #define __qdf_nbuf_is_intra_bss(skb) \
74 	(QDF_NBUF_CB_RX_INTRA_BSS((skb)))
75 
76 #define __qdf_nbuf_set_tx_fctx_type(skb, ctx, type) \
77 	do { \
78 		QDF_NBUF_CB_TX_FCTX((skb)) = (ctx); \
79 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
80 	} while (0)
81 
82 #define __qdf_nbuf_get_tx_fctx(skb) \
83 		 QDF_NBUF_CB_TX_FCTX((skb))
84 
85 #define QDF_NBUF_CB_RX_PROTOCOL_TAG(skb) \
86 		(((struct qdf_nbuf_cb *) \
87 		((skb)->cb))->u.rx.dev.priv_cb_w.protocol_tag)
88 
89 #define __qdf_nbuf_set_rx_protocol_tag(skb, val) \
90 		((QDF_NBUF_CB_RX_PROTOCOL_TAG((skb))) = val)
91 
92 #define __qdf_nbuf_get_rx_protocol_tag(skb) \
93 		(QDF_NBUF_CB_RX_PROTOCOL_TAG((skb)))
94 
95 #define QDF_NBUF_CB_RX_FLOW_TAG(skb) \
96 		(((struct qdf_nbuf_cb *) \
97 		((skb)->cb))->u.rx.dev.priv_cb_w.flow_tag)
98 
99 #define __qdf_nbuf_set_rx_flow_tag(skb, val) \
100 		((QDF_NBUF_CB_RX_FLOW_TAG((skb))) = val)
101 
102 #define __qdf_nbuf_get_rx_flow_tag(skb) \
103 		(QDF_NBUF_CB_RX_FLOW_TAG((skb)))
104 
105 #define QDF_NBUF_CB_RX_FLOW_IDX_INVALID(skb) \
106 		(((struct qdf_nbuf_cb *) \
107 		((skb)->cb))->u.rx.dev.priv_cb_w.flow_idx_invalid)
108 
109 #define __qdf_nbuf_set_rx_flow_idx_invalid(skb, val) \
110 		((QDF_NBUF_CB_RX_FLOW_IDX_INVALID((skb))) = val)
111 
112 #define __qdf_nbuf_get_rx_flow_idx_invalid(skb) \
113 		(QDF_NBUF_CB_RX_FLOW_IDX_INVALID((skb)))
114 
115 #define QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT(skb) \
116 		(((struct qdf_nbuf_cb *) \
117 		((skb)->cb))->u.rx.dev.priv_cb_w.flow_idx_timeout)
118 
119 #define __qdf_nbuf_set_rx_flow_idx_timeout(skb, val) \
120 		((QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT((skb))) = val)
121 
122 #define __qdf_nbuf_get_rx_flow_idx_timeout(skb) \
123 		(QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT((skb)))
124 
125 #define  QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
126 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w. \
127 	 ipa_smmu_map)
128 
129 /**
130  * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb
131  * @skb: skb pointer whose cb is updated with vdev id information
132  * @vdev_id: vdev id to be updated in cb
133  *
134  * Return: void
135  */
136 static inline void
137 qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id)
138 {
139 	/* Does not apply to WIN */
140 }
141 
142 /**
143  * __qdf_nbuf_push_head() - Push data in the front
144  * @skb: Pointer to network buffer
145  * @size: size to be pushed
146  *
147  * Return: New data pointer of this buf after data has been pushed,
148  *         or NULL if there is not enough room in this buf.
149  */
150 static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size)
151 {
152 	return skb_push(skb, size);
153 }
154 
155 /**
156  * __qdf_nbuf_pull_head() - pull data out from the front
157  * @skb: Pointer to network buffer
158  * @size: size to be popped
159  *
160  * Return: New data pointer of this buf after data has been popped,
161  * or NULL if there is not sufficient data to pull.
162  */
163 static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
164 {
165 	return skb_pull(skb, size);
166 }
167 
168 static inline void qdf_nbuf_init_replenish_timer(void) {}
169 static inline void qdf_nbuf_deinit_replenish_timer(void) {}
170 
171 /**
172  * __qdf_nbuf_dma_inv_range() - nbuf invalidate
173  * @buf_start: from
174  * @buf_end: to address to invalidate
175  *
176  * Return: none
177  */
178 #if (defined(__LINUX_ARM_ARCH__) && !defined(DP_NO_CACHE_DESC_SUPPORT))
179 static inline void
180 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
181 {
182 	dmac_inv_range(buf_start, buf_end);
183 }
184 
185 static inline void
186 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
187 {
188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
189 	dmac_inv_range_no_dsb(buf_start, buf_end);
190 #else
191 	dmac_inv_range(buf_start, buf_end);
192 #endif
193 }
194 
195 static inline void
196 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
197 {
198 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
199 	dmac_clean_range_no_dsb(buf_start, buf_end);
200 #else
201 	dmac_clean_range(buf_start, buf_end);
202 #endif
203 }
204 
205 static inline void
206 __qdf_dsb(void)
207 {
208 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
209 	dsb(st);
210 #endif
211 }
212 
213 static inline void
214 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end)
215 {
216 	dmac_clean_range(buf_start, buf_end);
217 }
218 #elif defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
219 static inline void
220 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
221 {
222 	dma_cache_inv((unsigned long)buf_start,
223 		      (unsigned long)(buf_end - buf_start));
224 }
225 
226 static inline void
227 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
228 {
229 	dma_cache_inv((unsigned long)buf_start,
230 		      (unsigned long)(buf_end - buf_start));
231 }
232 
233 static inline void
234 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
235 {
236 	dmac_cache_wback((unsigned long)buf_start,
237 			 (unsigned long)(buf_end - buf_start));
238 }
239 
240 static inline void
241 __qdf_dsb(void)
242 {
243 }
244 
245 static inline void
246 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end)
247 {
248 	dma_cache_wback((unsigned long)buf_start,
249 			(unsigned long)(buf_end - buf_start));
250 }
251 #else
252 static inline void
253 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
254 {
255 }
256 
257 static inline void
258 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
259 {
260 }
261 
262 static inline void
263 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
264 {
265 }
266 
267 static inline void
268 __qdf_dsb(void)
269 {
270 }
271 
272 static inline void
273 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end)
274 {
275 }
276 #endif
277 #endif /*_I_QDF_NBUF_W_H */
278