xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_rx.h (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_LI_RX_H_
21 #define _DP_LI_RX_H_
22 
23 #include <dp_types.h>
24 #include <dp_rx.h>
25 #include "dp_li.h"
26 
27 uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
28 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
29 			  uint32_t quota);
30 
31 /**
32  * dp_rx_chain_msdus_li() - Function to chain all msdus of a mpdu
33  *			    to pdev invalid peer list
34  *
35  * @soc: core DP main context
36  * @nbuf: Buffer pointer
37  * @rx_tlv_hdr: start of rx tlv header
38  * @mac_id: mac id
39  *
40  *  Return: bool: true for last msdu of mpdu
41  */
42 bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
43 			  uint8_t *rx_tlv_hdr, uint8_t mac_id);
44 
45 /**
46  * dp_rx_desc_pool_init_li() - Initialize Rx Descriptor pool(s)
47  * @soc: Handle to DP Soc structure
48  * @rx_desc_pool: Rx descriptor pool handler
49  * @pool_id: Rx descriptor pool ID
50  *
51  * Return: None
52  */
53 QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc,
54 				   struct rx_desc_pool *rx_desc_pool,
55 				   uint32_t pool_id);
56 
57 /**
58  * dp_rx_desc_pool_deinit_li() - De-initialize Rx Descriptor pool(s)
59  * @soc: Handle to DP Soc structure
60  * @rx_desc_pool: Rx descriptor pool handler
61  * @pool_id: Rx descriptor pool ID
62  *
63  * Return: None
64  */
65 void dp_rx_desc_pool_deinit_li(struct dp_soc *soc,
66 			       struct rx_desc_pool *rx_desc_pool,
67 			       uint32_t pool_id);
68 
69 /**
70  * dp_wbm_get_rx_desc_from_hal_desc_li() - Get corresponding Rx Desc
71  *					address from WBM ring Desc
72  * @soc: Handle to DP Soc structure
73  * @ring_desc: ring descriptor structure pointer
74  * @r_rx_desc: pointer to a pointer of Rx Desc
75  *
76  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
77  */
78 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li(
79 					struct dp_soc *soc,
80 					void *ring_desc,
81 					struct dp_rx_desc **r_rx_desc);
82 /**
83  * dp_rx_get_reo_qdesc_addr_li(): API to get qdesc address of reo
84  * entrance ring desc
85  *
86  * @hal_soc: Handle to HAL Soc structure
87  * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
88  * @buf: pointer to the start of RX PKT TLV headers
89  * @txrx_peer: pointer to txrx_peer
90  * @tid: tid value
91  *
92  * Return: qdesc adrress in reo destination ring buffer
93  */
94 static inline
95 uint64_t dp_rx_get_reo_qdesc_addr_li(hal_soc_handle_t hal_soc,
96 				     uint8_t *dst_ring_desc,
97 				     uint8_t *buf,
98 				     struct dp_txrx_peer *txrx_peer,
99 				     unsigned int tid)
100 {
101 	return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
102 }
103 
104 /**
105  * dp_rx_desc_cookie_2_va_li() - Convert RX Desc cookie ID to VA
106  * @soc:Handle to DP Soc structure
107  * @cookie: cookie used to lookup virtual address
108  *
109  * Return: Rx descriptor virtual address
110  */
111 static inline
112 struct dp_rx_desc *dp_rx_desc_cookie_2_va_li(struct dp_soc *soc,
113 					     uint32_t cookie)
114 {
115 	return dp_rx_cookie_2_va_rxdma_buf(soc, cookie);
116 }
117 
118 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
119 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
120 #define DP_PEER_METADATA_OFFLOAD_MASK	0x01000000
121 #define DP_PEER_METADATA_OFFLOAD_SHIFT	24
122 
123 #define DP_PEER_METADATA_VDEV_ID_GET_LI(_peer_metadata)		\
124 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
125 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
126 
127 #define DP_PEER_METADATA_OFFLOAD_GET_LI(_peer_metadata)		\
128 	(((_peer_metadata) & DP_PEER_METADATA_OFFLOAD_MASK)	\
129 			>> DP_PEER_METADATA_OFFLOAD_SHIFT)
130 
131 static inline uint16_t
132 dp_rx_peer_metadata_peer_id_get_li(struct dp_soc *soc, uint32_t peer_metadata)
133 {
134 	struct htt_rx_peer_metadata_v0 *metadata =
135 			(struct htt_rx_peer_metadata_v0 *)&peer_metadata;
136 
137 	return metadata->peer_id;
138 }
139 
140 bool
141 dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
142 			       qdf_nbuf_t nbuf_copy,
143 			       struct cdp_tid_rx_stats *tid_stats);
144 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
145 static inline
146 void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)
147 {
148 	struct rx_pkt_tlvs *pkt_tlvs;
149 
150 	if (next) {
151 		/* prefetch skb->next and first few bytes of skb->cb */
152 		qdf_prefetch(next);
153 		/* skb->cb spread across 2 cache lines hence below prefetch */
154 		qdf_prefetch(&next->_skb_refdst);
155 		qdf_prefetch(&next->len);
156 		qdf_prefetch(&next->protocol);
157 		pkt_tlvs = (struct rx_pkt_tlvs *)next->data;
158 		/* sa_idx, da_idx, l3_pad in RX msdu_end TLV */
159 		qdf_prefetch(pkt_tlvs);
160 		/* msdu_done in RX attention TLV */
161 		qdf_prefetch(&pkt_tlvs->attn_tlv);
162 		/* fr_ds & to_ds in RX MPDU start TLV */
163 		if (qdf_nbuf_is_rx_chfrag_end(nbuf))
164 			qdf_prefetch(&pkt_tlvs->mpdu_start_tlv);
165 	}
166 }
167 #else
168 static inline
169 void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)
170 {
171 }
172 #endif
173 
174 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
175 /**
176  * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
177  * @soc: Handle to DP Soc structure
178  * @cookie: cookie used to lookup virtual address
179  *
180  * Return: prefetched Rx descriptor virtual address
181  */
182 static inline
183 void *dp_rx_cookie_2_va_rxdma_buf_prefetch(struct dp_soc *soc, uint32_t cookie)
184 {
185 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
186 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
187 	struct rx_desc_pool *rx_desc_pool;
188 	void *prefetch_desc;
189 
190 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
191 		return NULL;
192 
193 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
194 
195 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
196 		return NULL;
197 
198 	prefetch_desc = &soc->rx_desc_buf[pool_id].array[index].rx_desc;
199 	qdf_prefetch(prefetch_desc);
200 	return prefetch_desc;
201 }
202 
203 /**
204  * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
205  * @soc: Handle to HAL Soc structure
206  * @num_entries: valid number of HW descriptors
207  * @hal_ring_hdl: Destination ring pointer
208  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
209  * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
210  *
211  * Return: None
212  */
213 static inline
214 void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
215 				    hal_soc_handle_t hal_soc,
216 				    uint32_t num_entries,
217 				    hal_ring_handle_t hal_ring_hdl,
218 				    hal_ring_desc_t *last_prefetched_hw_desc,
219 				    struct dp_rx_desc **last_prefetched_sw_desc)
220 {
221 	if (*last_prefetched_sw_desc) {
222 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
223 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
224 	}
225 
226 	if (num_entries) {
227 		*last_prefetched_sw_desc = dp_rx_cookie_2_va_rxdma_buf_prefetch(soc, HAL_RX_REO_BUF_COOKIE_GET(*last_prefetched_hw_desc));
228 		*last_prefetched_hw_desc = hal_srng_dst_prefetch_next_cached_desc(hal_soc,
229 										  hal_ring_hdl,
230 										  (uint8_t *)*last_prefetched_hw_desc);
231 	}
232 }
233 #else
234 static inline
235 void dp_rx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
236 				    hal_soc_handle_t hal_soc,
237 				    uint32_t quota,
238 				    hal_ring_handle_t hal_ring_hdl,
239 				    hal_ring_desc_t *last_prefetched_hw_desc,
240 				    struct dp_rx_desc **last_prefetched_sw_desc)
241 {
242 }
243 #endif
244 
245 static inline
246 QDF_STATUS dp_peer_rx_reorder_queue_setup_li(struct dp_soc *soc,
247 					     struct dp_peer *peer,
248 					     int tid,
249 					     uint32_t ba_window_size)
250 {
251 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
252 
253 	if (!rx_tid->hw_qdesc_paddr)
254 		return QDF_STATUS_E_INVAL;
255 
256 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
257 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
258 		    soc->ctrl_psoc,
259 		    peer->vdev->pdev->pdev_id,
260 		    peer->vdev->vdev_id,
261 		    peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
262 		    1, ba_window_size)) {
263 			dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
264 				    soc, tid);
265 			return QDF_STATUS_E_FAILURE;
266 		}
267 	}
268 
269 	return QDF_STATUS_SUCCESS;
270 }
271 #endif
272