xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_nbuf_w.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_nbuf_w.h
22  *
23  * This file provides platform specific nbuf API's.
24  * Included by i_qdf_nbuf.h and should not be included
25  * directly from other files.
26  */
27 
28 #ifndef _I_QDF_NBUF_W_H
29 #define _I_QDF_NBUF_W_H
30 
31 /* ext_cb accessor macros and internal API's */
32 
33 #define QDF_NBUF_CB_EXT_CB(skb) \
34 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.ext_cb_ptr)
35 
36 #define __qdf_nbuf_set_ext_cb(skb, ref) \
37 	do { \
38 		QDF_NBUF_CB_EXT_CB((skb)) = (ref); \
39 	} while (0)
40 
41 #define __qdf_nbuf_get_ext_cb(skb) \
42 	QDF_NBUF_CB_EXT_CB((skb))
43 
44 /* fctx accessor macros and internal API's*/
45 
46 #define QDF_NBUF_CB_RX_FCTX(skb) \
47 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.fctx)
48 
49 #define QDF_NBUF_CB_TX_FCTX(skb) \
50 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.fctx)
51 
52 #define QDF_NBUF_CB_RX_PEER_ID(skb) \
53 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.peer_id)
54 
55 #define QDF_NBUF_CB_RX_PKT_LEN(skb) \
56 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.msdu_len)
57 
58 #define QDF_NBUF_CB_RX_INTRA_BSS(skb) \
59 	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.flag_intra_bss)
60 
61 #define __qdf_nbuf_set_rx_fctx_type(skb, ctx, type) \
62 	do { \
63 		QDF_NBUF_CB_RX_FCTX((skb)) = (ctx); \
64 		QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
65 	} while (0)
66 
67 #define __qdf_nbuf_get_rx_fctx(skb) \
68 		 QDF_NBUF_CB_RX_FCTX((skb))
69 
70 #define __qdf_nbuf_set_intra_bss(skb, val) \
71 	((QDF_NBUF_CB_RX_INTRA_BSS((skb))) = val)
72 
73 #define __qdf_nbuf_is_intra_bss(skb) \
74 	(QDF_NBUF_CB_RX_INTRA_BSS((skb)))
75 
76 #define __qdf_nbuf_set_tx_fctx_type(skb, ctx, type) \
77 	do { \
78 		QDF_NBUF_CB_TX_FCTX((skb)) = (ctx); \
79 		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
80 	} while (0)
81 
82 #define __qdf_nbuf_get_tx_fctx(skb) \
83 		 QDF_NBUF_CB_TX_FCTX((skb))
84 
85 #define QDF_NBUF_CB_RX_PROTOCOL_TAG(skb) \
86 		(((struct qdf_nbuf_cb *) \
87 		((skb)->cb))->u.rx.dev.priv_cb_w.protocol_tag)
88 
89 #define __qdf_nbuf_set_rx_protocol_tag(skb, val) \
90 		((QDF_NBUF_CB_RX_PROTOCOL_TAG((skb))) = val)
91 
92 #define __qdf_nbuf_get_rx_protocol_tag(skb) \
93 		(QDF_NBUF_CB_RX_PROTOCOL_TAG((skb)))
94 
95 #define QDF_NBUF_CB_RX_FLOW_TAG(skb) \
96 		(((struct qdf_nbuf_cb *) \
97 		((skb)->cb))->u.rx.dev.priv_cb_w.flow_tag)
98 
99 #define __qdf_nbuf_set_rx_flow_tag(skb, val) \
100 		((QDF_NBUF_CB_RX_FLOW_TAG((skb))) = val)
101 
102 #define __qdf_nbuf_get_rx_flow_tag(skb) \
103 		(QDF_NBUF_CB_RX_FLOW_TAG((skb)))
104 
105 #define  QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
106 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w. \
107 	 ipa_smmu_map)
108 
109 /**
110  * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb
111  * @skb: skb pointer whose cb is updated with vdev id information
112  * @vdev_id: vdev id to be updated in cb
113  *
114  * Return: void
115  */
116 static inline void
117 qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id)
118 {
119 	/* Does not apply to WIN */
120 }
121 
122 /**
123  * __qdf_nbuf_push_head() - Push data in the front
124  * @skb: Pointer to network buffer
125  * @size: size to be pushed
126  *
127  * Return: New data pointer of this buf after data has been pushed,
128  *         or NULL if there is not enough room in this buf.
129  */
130 static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size)
131 {
132 	return skb_push(skb, size);
133 }
134 
135 /**
136  * __qdf_nbuf_pull_head() - pull data out from the front
137  * @skb: Pointer to network buffer
138  * @size: size to be popped
139  *
140  * Return: New data pointer of this buf after data has been popped,
141  * or NULL if there is not sufficient data to pull.
142  */
143 static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
144 {
145 	return skb_pull(skb, size);
146 }
147 
148 static inline void qdf_nbuf_init_replenish_timer(void) {}
149 static inline void qdf_nbuf_deinit_replenish_timer(void) {}
150 
151 /**
152  * __qdf_nbuf_dma_inv_range() - nbuf invalidate
153  * @buf_start: from
154  * @buf_end: to address to invalidate
155  *
156  * Return: none
157  */
158 #if (defined(__LINUX_ARM_ARCH__) && !defined(DP_NO_CACHE_DESC_SUPPORT))
159 static inline void
160 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
161 {
162 	dmac_inv_range(buf_start, buf_end);
163 }
164 
165 static inline void
166 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
167 {
168 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
169 	dmac_inv_range_no_dsb(buf_start, buf_end);
170 #else
171 	dmac_inv_range(buf_start, buf_end);
172 #endif
173 }
174 
175 static inline void
176 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
177 {
178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
179 	dmac_clean_range_no_dsb(buf_start, buf_end);
180 #else
181 	dmac_clean_range(buf_start, buf_end);
182 #endif
183 }
184 
185 static inline void
186 __qdf_dsb(void)
187 {
188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
189 	dsb(st);
190 #endif
191 }
192 
193 static inline void
194 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end)
195 {
196 	dmac_clean_range(buf_start, buf_end);
197 }
198 #elif defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
199 static inline void
200 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
201 {
202 	dma_cache_inv((unsigned long)buf_start,
203 		      (unsigned long)(buf_end - buf_start));
204 }
205 
206 static inline void
207 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
208 {
209 	dma_cache_inv((unsigned long)buf_start,
210 		      (unsigned long)(buf_end - buf_start));
211 }
212 
213 static inline void
214 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
215 {
216 	dmac_cache_wback((unsigned long)buf_start,
217 			 (unsigned long)(buf_end - buf_start));
218 }
219 
220 static inline void
221 __qdf_dsb(void)
222 {
223 }
224 
225 static inline void
226 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end)
227 {
228 	dma_cache_wback((unsigned long)buf_start,
229 			(unsigned long)(buf_end - buf_start));
230 }
231 #else
232 static inline void
233 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
234 {
235 }
236 
237 static inline void
238 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
239 {
240 }
241 
242 static inline void
243 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
244 {
245 }
246 
247 static inline void
248 __qdf_dsb(void)
249 {
250 }
251 
252 static inline void
253 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end)
254 {
255 }
256 #endif
257 #endif /*_I_QDF_NBUF_W_H */
258