1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_module.h>
21 #include "hal_be_api.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_be_reo.h"
24 #include "hal_tx.h"	//HAL_SET_FLD
25 #include "hal_be_rx.h"	//HAL_RX_BUF_RBM_GET
26 #include "rx_reo_queue_1k.h"
27 #include "hal_be_rx_tlv.h"
28 
29 /*
30  * The 4 bits REO destination ring value is defined as: 0: TCL
31  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
32  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
33  *
34  */
35 uint32_t reo_dest_ring_remap[] = {REO_REMAP_SW1, REO_REMAP_SW2,
36 				  REO_REMAP_SW3, REO_REMAP_SW4,
37 				  REO_REMAP_SW5, REO_REMAP_SW6,
38 				  REO_REMAP_SW7, REO_REMAP_SW8};
39 /*
40  * WBM idle link descriptor for Return Buffer Manager in case of
41  * multi-chip configuration.
42  */
43 #define HAL_NUM_CHIPS 4
44 #define HAL_WBM_CHIP_INVALID	    0
45 #define HAL_WBM_CHIP0_IDLE_DESC_MAP 1
46 #define HAL_WBM_CHIP1_IDLE_DESC_MAP 2
47 #define HAL_WBM_CHIP2_IDLE_DESC_MAP 3
48 #define HAL_WBM_CHIP3_IDLE_DESC_MAP 12
49 
50 uint8_t wbm_idle_link_bm_map[] = {HAL_WBM_CHIP0_IDLE_DESC_MAP,
51 				  HAL_WBM_CHIP1_IDLE_DESC_MAP,
52 				  HAL_WBM_CHIP2_IDLE_DESC_MAP,
53 				  HAL_WBM_CHIP3_IDLE_DESC_MAP};
54 
55 #if defined(QDF_BIG_ENDIAN_MACHINE)
hal_setup_reo_swap(struct hal_soc * soc)56 void hal_setup_reo_swap(struct hal_soc *soc)
57 {
58 	uint32_t reg_val;
59 
60 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
61 		REO_REG_REG_BASE));
62 
63 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1);
64 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1);
65 
66 	HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
67 		REO_REG_REG_BASE), reg_val);
68 }
69 #else
hal_setup_reo_swap(struct hal_soc * soc)70 void hal_setup_reo_swap(struct hal_soc *soc)
71 {
72 }
73 #endif
74 
75 /**
76  * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG
77  * @hal_soc_hdl: Handle to HAL SoC structure
78  * @hal_ring_hdl: Handle to HAL SRNG structure
79  *
80  * Return: none
81  */
82 static void
hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)83 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl,
84 			 hal_ring_handle_t hal_ring_hdl)
85 {
86 }
87 
hal_reo_setup_generic_be(struct hal_soc * soc,void * reoparams,int qref_reset)88 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams,
89 			      int qref_reset)
90 {
91 	uint32_t reg_val;
92 	struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams;
93 
94 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR(
95 		REO_REG_REG_BASE));
96 
97 	hal_reo_config(soc, reg_val, reo_params);
98 	/* Other ring enable bits and REO_ENABLE will be set by FW */
99 
100 	/* TODO: Setup destination ring mapping if enabled */
101 
102 	/* TODO: Error destination ring setting is left to default.
103 	 * Default setting is to send all errors to release ring.
104 	 */
105 
106 	/* Set the reo descriptor swap bits in case of BIG endian platform */
107 	hal_setup_reo_swap(soc);
108 
109 	HAL_REG_WRITE(soc,
110 		      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE),
111 		      HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000);
112 
113 	HAL_REG_WRITE(soc,
114 		      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE),
115 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
116 
117 	HAL_REG_WRITE(soc,
118 		      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE),
119 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
120 
121 	HAL_REG_WRITE(soc,
122 		      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE),
123 		      (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000));
124 
125 	/*
126 	 * When hash based routing is enabled, routing of the rx packet
127 	 * is done based on the following value: 1 _ _ _ _ The last 4
128 	 * bits are based on hash[3:0]. This means the possible values
129 	 * are 0x10 to 0x1f. This value is used to look-up the
130 	 * ring ID configured in Destination_Ring_Ctrl_IX_* register.
131 	 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3
132 	 * registers need to be configured to set-up the 16 entries to
133 	 * map the hash values to a ring number. There are 3 bits per
134 	 * hash entry – which are mapped as follows:
135 	 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI),
136 	 * 7: NOT_USED.
137 	 */
138 	if (reo_params->rx_hash_enabled) {
139 		HAL_REG_WRITE(soc,
140 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
141 			REO_REG_REG_BASE),
142 			reo_params->remap1);
143 
144 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x",
145 			  HAL_REG_READ(soc,
146 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
147 				       REO_REG_REG_BASE)));
148 
149 		HAL_REG_WRITE(soc,
150 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
151 			REO_REG_REG_BASE),
152 			reo_params->remap2);
153 
154 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x",
155 			  HAL_REG_READ(soc,
156 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
157 				       REO_REG_REG_BASE)));
158 	}
159 
160 	/* TODO: Check if the following registers shoould be setup by host:
161 	 * AGING_CONTROL
162 	 * HIGH_MEMORY_THRESHOLD
163 	 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
164 	 * GLOBAL_LINK_DESC_COUNT_CTRL
165 	 */
166 }
167 
hal_set_link_desc_addr_be(void * desc,uint32_t cookie,qdf_dma_addr_t link_desc_paddr,uint8_t bm_id)168 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie,
169 			       qdf_dma_addr_t link_desc_paddr,
170 			       uint8_t bm_id)
171 {
172 	uint32_t *buf_addr = (uint32_t *)desc;
173 
174 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0,
175 			   link_desc_paddr & 0xffffffff);
176 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32,
177 			   (uint64_t)link_desc_paddr >> 32);
178 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER,
179 			   bm_id);
180 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE,
181 			   cookie);
182 }
183 
hal_get_rx_max_ba_window_be(int tid)184 static uint16_t hal_get_rx_max_ba_window_be(int tid)
185 {
186 	return  HAL_RX_BA_WINDOW_256;
187 }
188 
hal_get_reo_qdesc_size_be(uint32_t ba_window_size,int tid)189 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid)
190 {
191 	/* Hardcode the ba_window_size to HAL_RX_MAX_BA_WINDOW for
192 	 * NON_QOS_TID until HW issues are resolved.
193 	 */
194 	if (tid != HAL_NON_QOS_TID)
195 		ba_window_size = hal_get_rx_max_ba_window_be(tid);
196 
197 	/* Return descriptor size corresponding to window size of 2 since
198 	 * we set ba_window_size to 2 while setting up REO descriptors as
199 	 * a WAR to get 2k jump exception aggregates are received without
200 	 * a BA session.
201 	 */
202 	if (ba_window_size <= 1) {
203 		if (tid != HAL_NON_QOS_TID)
204 			return sizeof(struct rx_reo_queue) +
205 				sizeof(struct rx_reo_queue_ext);
206 		else
207 			return sizeof(struct rx_reo_queue);
208 	}
209 
210 	if (ba_window_size <= 105)
211 		return sizeof(struct rx_reo_queue) +
212 			sizeof(struct rx_reo_queue_ext);
213 
214 	if (ba_window_size <= 210)
215 		return sizeof(struct rx_reo_queue) +
216 			(2 * sizeof(struct rx_reo_queue_ext));
217 
218 	return sizeof(struct rx_reo_queue) +
219 		(3 * sizeof(struct rx_reo_queue_ext));
220 }
221 
hal_rx_msdu_ext_desc_info_get_ptr_be(void * msdu_details_ptr)222 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr)
223 {
224 	return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr);
225 }
226 
227 #if defined(QCA_WIFI_KIWI) && !defined(QCA_WIFI_KIWI_V2)
228 static inline uint32_t
hal_wbm2sw_release_source_get(void * hal_desc,enum hal_be_wbm_release_dir dir)229 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
230 {
231 	uint32_t buf_src;
232 
233 	buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
234 	switch (buf_src) {
235 	case HAL_BE_RX_WBM_ERR_SRC_RXDMA:
236 		return HAL_RX_WBM_ERR_SRC_RXDMA;
237 	case HAL_BE_RX_WBM_ERR_SRC_REO:
238 		return HAL_RX_WBM_ERR_SRC_REO;
239 	case HAL_BE_RX_WBM_ERR_SRC_FW_RX:
240 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
241 			qdf_assert_always(0);
242 		return HAL_RX_WBM_ERR_SRC_FW;
243 	case HAL_BE_RX_WBM_ERR_SRC_SW_RX:
244 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
245 			qdf_assert_always(0);
246 		return HAL_RX_WBM_ERR_SRC_SW;
247 	case HAL_BE_RX_WBM_ERR_SRC_TQM:
248 		return HAL_RX_WBM_ERR_SRC_TQM;
249 	case HAL_BE_RX_WBM_ERR_SRC_FW_TX:
250 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
251 			qdf_assert_always(0);
252 		return HAL_RX_WBM_ERR_SRC_FW;
253 	case HAL_BE_RX_WBM_ERR_SRC_SW_TX:
254 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
255 			qdf_assert_always(0);
256 		return HAL_RX_WBM_ERR_SRC_SW;
257 	default:
258 		qdf_assert_always(0);
259 	}
260 
261 	return buf_src;
262 }
263 #else
264 static inline uint32_t
hal_wbm2sw_release_source_get(void * hal_desc,enum hal_be_wbm_release_dir dir)265 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
266 {
267 	return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
268 }
269 #endif
270 
hal_tx_comp_get_buffer_source_generic_be(void * hal_desc)271 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc)
272 {
273 	return hal_wbm2sw_release_source_get(hal_desc,
274 					     HAL_BE_WBM_RELEASE_DIR_TX);
275 }
276 
277 /**
278  * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason
279  * @hal_desc: completion ring descriptor pointer
280  *
281  * This function will return the type of pointer - buffer or descriptor
282  *
283  * Return: buffer type
284  */
hal_tx_comp_get_release_reason_generic_be(void * hal_desc)285 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc)
286 {
287 	uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) +
288 			WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET);
289 
290 	return (comp_desc &
291 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >>
292 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB;
293 }
294 
295 /**
296  * hal_get_wbm_internal_error_generic_be() - is WBM internal error
297  * @hal_desc: completion ring descriptor pointer
298  *
299  * This function will return 0 or 1  - is it WBM internal error or not
300  *
301  * Return: uint8_t
302  */
hal_get_wbm_internal_error_generic_be(void * hal_desc)303 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc)
304 {
305 	/*
306 	 * TODO -  This func is called by tx comp and wbm error handler
307 	 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX
308 	 */
309 	uint32_t comp_desc =
310 		*(uint32_t *)(((uint8_t *)hal_desc) +
311 			      HAL_WBM_INTERNAL_ERROR_OFFSET);
312 
313 	return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >>
314 		HAL_WBM_INTERNAL_ERROR_LSB;
315 }
316 
317 /**
318  * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor
319  * @ring_desc: ring descriptor
320  *
321  * Return: wbm error source
322  */
hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc)323 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc)
324 {
325 	return hal_wbm2sw_release_source_get(ring_desc,
326 					     HAL_BE_WBM_RELEASE_DIR_RX);
327 }
328 
hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc)329 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc)
330 {
331 	/*
332 	 * The following macro takes buf_addr_info as argument,
333 	 * but since buf_addr_info is the first field in ring_desc
334 	 * Hence the following call is OK
335 	 */
336 	return HAL_RX_BUF_RBM_GET(ring_desc);
337 }
338 
339 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
340 		(WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \
341 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \
342 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB)
343 
344 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
345 		(WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \
346 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \
347 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB)
348 
349 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc)	\
350 	(((*(((uint32_t *)wbm_desc) +			\
351 	(WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \
352 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >>	\
353 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB)
354 
355 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc)	\
356 	(((*(((uint32_t *)wbm_desc) +			\
357 	(WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \
358 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >>	\
359 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB)
360 
hal_rx_wbm_err_info_get_generic_be(void * wbm_desc,void * wbm_er_info1)361 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1)
362 {
363 	struct hal_wbm_err_desc_info *wbm_er_info =
364 		(struct hal_wbm_err_desc_info *)wbm_er_info1;
365 
366 	wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc);
367 	wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc);
368 	wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc);
369 	wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc);
370 	wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc);
371 }
372 
hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc,struct hal_buf_info * buf_info)373 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc,
374 					struct hal_buf_info *buf_info)
375 {
376 	struct reo_destination_ring *reo_ring =
377 		 (struct reo_destination_ring *)rx_desc;
378 
379 	buf_info->paddr =
380 	 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) |
381 	  ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32));
382 	buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring);
383 }
384 
hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl,void * src_srng_desc,hal_buff_addrinfo_t buf_addr_info,uint8_t bm_action)385 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl,
386 					 void *src_srng_desc,
387 					 hal_buff_addrinfo_t buf_addr_info,
388 					 uint8_t bm_action)
389 {
390 	/*
391 	 * The offsets for fields used in this function are same in
392 	 * wbm_release_ring for Lithium and wbm_release_ring_tx
393 	 * for Beryllium. hence we can use wbm_release_ring directly.
394 	 */
395 	struct wbm_release_ring *wbm_rel_srng =
396 			(struct wbm_release_ring *)src_srng_desc;
397 	uint32_t addr_31_0;
398 	uint8_t addr_39_32;
399 
400 	/* Structure copy !!! */
401 	wbm_rel_srng->released_buff_or_desc_addr_info =
402 				*((struct buffer_addr_info *)buf_addr_info);
403 
404 	addr_31_0 =
405 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0;
406 	addr_39_32 =
407 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32;
408 
409 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
410 			   RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW);
411 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION,
412 			   bm_action);
413 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
414 			   BUFFER_OR_DESC_TYPE,
415 			   HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC);
416 
417 	/* WBM error is indicated when any of the link descriptors given to
418 	 * WBM has a NULL address, and one those paths is the link descriptors
419 	 * released from host after processing RXDMA errors,
420 	 * or from Rx defrag path, and we want to add an assert here to ensure
421 	 * host is not releasing descriptors with NULL address.
422 	 */
423 
424 	if (qdf_unlikely(!addr_31_0 && !addr_39_32)) {
425 		hal_dump_wbm_rel_desc(src_srng_desc);
426 		qdf_assert_always(0);
427 	}
428 }
429 
430 /**
431  * hal_rx_buf_cookie_rbm_get_be() - Get the cookie and return buffer
432  *                                  manager from the REO entrance ring desc
433  * @buf_addr_info_hdl: Buffer address info element from ring desc
434  * @buf_info_hdl: structure to return the buffer information
435  *
436  * Return: void
437  */
438 static
hal_rx_buf_cookie_rbm_get_be(uint32_t * buf_addr_info_hdl,hal_buf_info_t buf_info_hdl)439 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl,
440 				  hal_buf_info_t buf_info_hdl)
441 {
442 	struct hal_buf_info *buf_info =
443 		(struct hal_buf_info *)buf_info_hdl;
444 	struct buffer_addr_info *buf_addr_info =
445 		(struct buffer_addr_info *)buf_addr_info_hdl;
446 
447 	buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
448 	/*
449 	 * buffer addr info is the first member of ring desc, so the typecast
450 	 * can be done.
451 	 */
452 	buf_info->rbm = hal_rx_ret_buf_manager_get_be(
453 						(hal_ring_desc_t)buf_addr_info);
454 }
455 
456 /**
457  * hal_rx_en_mcast_fp_data_filter_generic_be() - Is mcast filter pass enabled
458  *
459  * Return: true default for BE WIN
460  */
461 static inline
hal_rx_en_mcast_fp_data_filter_generic_be(void)462 bool hal_rx_en_mcast_fp_data_filter_generic_be(void)
463 {
464 	return true;
465 }
466 
467 /**
468  * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the
469  *				    rxdma ring entry.
470  * @rxdma_entry: descriptor entry
471  * @paddr: physical address of nbuf data pointer.
472  * @cookie: SW cookie used as a index to SW rx desc.
473  * @manager: who owns the nbuf (host, NSS, etc...).
474  *
475  */
476 static inline void
hal_rxdma_buff_addr_info_set_be(void * rxdma_entry,qdf_dma_addr_t paddr,uint32_t cookie,uint8_t manager)477 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry,
478 				qdf_dma_addr_t paddr, uint32_t cookie,
479 				uint8_t manager)
480 {
481 	uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff);
482 	uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32;
483 
484 	HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo);
485 	HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi);
486 	HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie);
487 	HAL_RXDMA_MANAGER_SET(rxdma_entry, manager);
488 }
489 
490 /**
491  * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc
492  * @rx_desc: rx descriptor
493  *
494  * Return: REO error code
495  */
hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc)496 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc)
497 {
498 	struct reo_destination_ring *reo_desc =
499 			(struct reo_destination_ring *)rx_desc;
500 
501 	return HAL_RX_REO_ERROR_GET(reo_desc);
502 }
503 
504 /**
505  * hal_gen_reo_remap_val_generic_be() - Generate the reo map value
506  * @remap_reg: remap register
507  * @ix0_map: mapping values for reo
508  *
509  * Return: IX0 reo remap register value to be written
510  */
511 static uint32_t
hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg,uint8_t * ix0_map)512 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg,
513 				 uint8_t *ix0_map)
514 {
515 	uint32_t ix_val = 0;
516 
517 	switch (remap_reg) {
518 	case HAL_REO_REMAP_REG_IX0:
519 		ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) |
520 			HAL_REO_REMAP_IX0(ix0_map[1], 1) |
521 			HAL_REO_REMAP_IX0(ix0_map[2], 2) |
522 			HAL_REO_REMAP_IX0(ix0_map[3], 3) |
523 			HAL_REO_REMAP_IX0(ix0_map[4], 4) |
524 			HAL_REO_REMAP_IX0(ix0_map[5], 5) |
525 			HAL_REO_REMAP_IX0(ix0_map[6], 6) |
526 			HAL_REO_REMAP_IX0(ix0_map[7], 7);
527 		break;
528 	case HAL_REO_REMAP_REG_IX2:
529 		ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) |
530 			HAL_REO_REMAP_IX2(ix0_map[1], 17) |
531 			HAL_REO_REMAP_IX2(ix0_map[2], 18) |
532 			HAL_REO_REMAP_IX2(ix0_map[3], 19) |
533 			HAL_REO_REMAP_IX2(ix0_map[4], 20) |
534 			HAL_REO_REMAP_IX2(ix0_map[5], 21) |
535 			HAL_REO_REMAP_IX2(ix0_map[6], 22) |
536 			HAL_REO_REMAP_IX2(ix0_map[7], 23);
537 		break;
538 	default:
539 		break;
540 	}
541 
542 	return ix_val;
543 }
544 
hal_rx_err_status_get_be(hal_ring_desc_t rx_desc)545 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc)
546 {
547 	return HAL_RX_ERROR_STATUS_GET(rx_desc);
548 }
549 
hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl,hal_ring_desc_t reo_desc,void * st_handle,uint32_t tlv,int * num_ref)550 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl,
551 					   hal_ring_desc_t reo_desc,
552 					   void *st_handle,
553 					   uint32_t tlv, int *num_ref)
554 {
555 	union hal_reo_status *reo_status_ref;
556 
557 	reo_status_ref = (union hal_reo_status *)st_handle;
558 
559 	switch (tlv) {
560 	case HAL_REO_QUEUE_STATS_STATUS_TLV:
561 		hal_reo_queue_stats_status_be(reo_desc,
562 					      &reo_status_ref->queue_status,
563 					      hal_soc_hdl);
564 		*num_ref = reo_status_ref->queue_status.header.cmd_num;
565 		break;
566 	case HAL_REO_FLUSH_QUEUE_STATUS_TLV:
567 		hal_reo_flush_queue_status_be(reo_desc,
568 					      &reo_status_ref->fl_queue_status,
569 					      hal_soc_hdl);
570 		*num_ref = reo_status_ref->fl_queue_status.header.cmd_num;
571 		break;
572 	case HAL_REO_FLUSH_CACHE_STATUS_TLV:
573 		hal_reo_flush_cache_status_be(reo_desc,
574 					      &reo_status_ref->fl_cache_status,
575 					      hal_soc_hdl);
576 		*num_ref = reo_status_ref->fl_cache_status.header.cmd_num;
577 		break;
578 	case HAL_REO_UNBLK_CACHE_STATUS_TLV:
579 		hal_reo_unblock_cache_status_be
580 			(reo_desc, hal_soc_hdl,
581 			 &reo_status_ref->unblk_cache_status);
582 		*num_ref = reo_status_ref->unblk_cache_status.header.cmd_num;
583 		break;
584 	case HAL_REO_TIMOUT_LIST_STATUS_TLV:
585 		hal_reo_flush_timeout_list_status_be(
586 					reo_desc,
587 					&reo_status_ref->fl_timeout_status,
588 					hal_soc_hdl);
589 		*num_ref = reo_status_ref->fl_timeout_status.header.cmd_num;
590 		break;
591 	case HAL_REO_DESC_THRES_STATUS_TLV:
592 		hal_reo_desc_thres_reached_status_be(
593 						reo_desc,
594 						&reo_status_ref->thres_status,
595 						hal_soc_hdl);
596 		*num_ref = reo_status_ref->thres_status.header.cmd_num;
597 		break;
598 	case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV:
599 		hal_reo_rx_update_queue_status_be(
600 					reo_desc,
601 					&reo_status_ref->rx_queue_status,
602 					hal_soc_hdl);
603 		*num_ref = reo_status_ref->rx_queue_status.header.cmd_num;
604 		break;
605 	default:
606 		QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN,
607 			  "hal_soc %pK: no handler for TLV:%d",
608 			   hal_soc_hdl, tlv);
609 		return QDF_STATUS_E_FAILURE;
610 	} /* switch */
611 
612 	return QDF_STATUS_SUCCESS;
613 }
614 
hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)615 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)
616 {
617 	return HAL_RX_REO_BUF_TYPE_GET(rx_desc);
618 }
619 
620 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
621 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000
622 #endif
hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,struct hal_hw_cc_config * cc_cfg)623 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
624 				      struct hal_hw_cc_config *cc_cfg)
625 {
626 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
627 
628 	hal_soc->ops->hal_cookie_conversion_reg_cfg_be(hal_soc_hdl, cc_cfg);
629 }
630 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be);
631 
632 static inline void
hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,void * msdu_desc,uint32_t dst_ind,uint32_t nbuf_len)633 hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
634 			  void *msdu_desc, uint32_t dst_ind,
635 			  uint32_t nbuf_len)
636 {
637 	struct rx_msdu_desc_info *msdu_desc_info =
638 		(struct rx_msdu_desc_info *)msdu_desc;
639 	struct rx_msdu_ext_desc_info *msdu_ext_desc_info =
640 		(struct rx_msdu_ext_desc_info *)(msdu_desc_info + 1);
641 
642 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
643 				  FIRST_MSDU_IN_MPDU_FLAG, 1);
644 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
645 				  LAST_MSDU_IN_MPDU_FLAG, 1);
646 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
647 				  MSDU_CONTINUATION, 0x0);
648 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
649 				  MSDU_LENGTH, nbuf_len);
650 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
651 				  SA_IS_VALID, 1);
652 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
653 				  DA_IS_VALID, 1);
654 	HAL_RX_MSDU_REO_DST_IND_SET(msdu_ext_desc_info,
655 				    REO_DESTINATION_INDICATION, dst_ind);
656 }
657 
658 static inline void
hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,void * ent_desc,void * mpdu_desc,uint32_t seq_no)659 hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
660 			  void *ent_desc,
661 			  void *mpdu_desc,
662 			  uint32_t seq_no)
663 {
664 	struct rx_mpdu_desc_info *mpdu_desc_info =
665 			(struct rx_mpdu_desc_info *)mpdu_desc;
666 	uint8_t *desc = (uint8_t *)ent_desc;
667 
668 	HAL_RX_FLD_SET(desc, REO_ENTRANCE_RING,
669 		       MPDU_SEQUENCE_NUMBER, seq_no);
670 
671 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
672 				  MSDU_COUNT, 0x1);
673 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
674 				  FRAGMENT_FLAG, 0x1);
675 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
676 				  RAW_MPDU, 0x0);
677 }
678 
679 /**
680  * hal_rx_msdu_reo_dst_ind_get_be() - Gets the REO destination ring ID
681  *                                    from the msdu desc info
682  * @hal_soc_hdl: hal_soc handle
683  * @msdu_link_desc : Opaque cookie pointer used by HAL to get to
684  * the current descriptor
685  *
686  * Return: dst_ind (REO destination ring ID)
687  */
688 static inline
hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl,void * msdu_link_desc)689 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl,
690 					void *msdu_link_desc)
691 {
692 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
693 	struct rx_msdu_details *msdu_details;
694 	struct rx_msdu_desc_info *msdu_desc_info;
695 	struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
696 	uint32_t dst_ind;
697 
698 	msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc);
699 
700 	/* The first msdu in the link should exist */
701 	msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0],
702 							   hal_soc);
703 	dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info);
704 	return dst_ind;
705 }
706 
707 uint32_t
hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl,uint8_t rx_ring_mask)708 hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl,
709 			      uint8_t rx_ring_mask)
710 {
711 	uint32_t num_rings = 0;
712 	uint32_t i = 0;
713 	uint32_t ring_remap_arr[HAL_MAX_REO2SW_RINGS] = {0};
714 	uint32_t reo_remap_val = 0;
715 	uint32_t ring_idx = 0;
716 	uint8_t ix_map[HAL_NUM_RX_RING_PER_IX_MAP] = {0};
717 
718 	/* create reo ring remap array */
719 	while (i < HAL_MAX_REO2SW_RINGS) {
720 		if (rx_ring_mask & (1 << i)) {
721 			ring_remap_arr[num_rings] = reo_dest_ring_remap[i];
722 			num_rings++;
723 		}
724 		i++;
725 	}
726 
727 	for (i = 0; i < HAL_NUM_RX_RING_PER_IX_MAP; i++) {
728 		if (rx_ring_mask) {
729 			ix_map[i] = ring_remap_arr[ring_idx];
730 			ring_idx = ((ring_idx + 1) % num_rings);
731 		} else {
732 			/* if ring mask is zero configure to release to WBM */
733 			ix_map[i] = REO_REMAP_RELEASE;
734 		}
735 	}
736 
737 	reo_remap_val = HAL_REO_REMAP_IX0(ix_map[0], 0) |
738 					  HAL_REO_REMAP_IX0(ix_map[1], 1) |
739 					  HAL_REO_REMAP_IX0(ix_map[2], 2) |
740 					  HAL_REO_REMAP_IX0(ix_map[3], 3) |
741 					  HAL_REO_REMAP_IX0(ix_map[4], 4) |
742 					  HAL_REO_REMAP_IX0(ix_map[5], 5) |
743 					  HAL_REO_REMAP_IX0(ix_map[6], 6) |
744 					  HAL_REO_REMAP_IX0(ix_map[7], 7);
745 
746 	return reo_remap_val;
747 }
748 
749 qdf_export_symbol(hal_reo_ix_remap_value_get_be);
750 
hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id)751 uint8_t hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id)
752 {
753 	if (rx_ring_id >= HAL_MAX_REO2SW_RINGS)
754 		return REO_REMAP_RELEASE;
755 
756 	return reo_dest_ring_remap[rx_ring_id];
757 }
758 
759 qdf_export_symbol(hal_reo_ring_remap_value_get_be);
760 
hal_get_idle_link_bm_id_be(uint8_t chip_id)761 uint8_t hal_get_idle_link_bm_id_be(uint8_t chip_id)
762 {
763 	if (chip_id >= HAL_NUM_CHIPS)
764 		return HAL_WBM_CHIP_INVALID;
765 
766 	return wbm_idle_link_bm_map[chip_id];
767 }
768 
769 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
770 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
771 static inline void
hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,struct hal_buf_info * buf_info)772 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
773 				struct hal_buf_info *buf_info)
774 {
775 	if (hal_rx_wbm_get_cookie_convert_done(rx_desc))
776 		buf_info->paddr =
777 			(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
778 			 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
779 	else
780 		buf_info->paddr =
781 			(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
782 			 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
783 }
784 #else
785 static inline void
hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,struct hal_buf_info * buf_info)786 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
787 				struct hal_buf_info *buf_info)
788 {
789 	buf_info->paddr =
790 		(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
791 		 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
792 }
793 #endif
794 #else /* !DP_FEATURE_HW_COOKIE_CONVERSION */
795 static inline void
hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,struct hal_buf_info * buf_info)796 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
797 				struct hal_buf_info *buf_info)
798 {
799 	buf_info->paddr =
800 		(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
801 		 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
802 }
803 #endif
804 
805 #ifdef DP_UMAC_HW_RESET_SUPPORT
806 /**
807  * hal_unregister_reo_send_cmd_be() - Unregister Reo send command callback.
808  * @hal_soc: HAL soc handle
809  *
810  * Return: None
811  */
812 static
hal_unregister_reo_send_cmd_be(struct hal_soc * hal_soc)813 void hal_unregister_reo_send_cmd_be(struct hal_soc *hal_soc)
814 {
815 	hal_soc->ops->hal_reo_send_cmd = NULL;
816 }
817 
818 /**
819  * hal_register_reo_send_cmd_be() - Register Reo send command callback.
820  * @hal_soc: HAL soc handle
821  *
822  * Return: None
823  */
824 static
hal_register_reo_send_cmd_be(struct hal_soc * hal_soc)825 void hal_register_reo_send_cmd_be(struct hal_soc *hal_soc)
826 {
827 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
828 }
829 
830 /**
831  * hal_reset_rx_reo_tid_q_be() - reset the reo tid queue.
832  * @hal_soc: HAL soc handle
833  * @hw_qdesc_vaddr: start address of the tid queue
834  * @size: size of address pointed by hw_qdesc_vaddr
835  *
836  * Return: None
837  */
838 static void
hal_reset_rx_reo_tid_q_be(struct hal_soc * hal_soc,void * hw_qdesc_vaddr,uint32_t size)839 hal_reset_rx_reo_tid_q_be(struct hal_soc *hal_soc, void *hw_qdesc_vaddr,
840 			  uint32_t size)
841 {
842 	struct rx_reo_queue *hw_qdesc = (struct rx_reo_queue *)hw_qdesc_vaddr;
843 	int i;
844 
845 	if (!hw_qdesc)
846 		return;
847 
848 	hw_qdesc->svld = 0;
849 	hw_qdesc->ssn = 0;
850 	hw_qdesc->current_index = 0;
851 	hw_qdesc->pn_valid = 0;
852 	hw_qdesc->pn_31_0 = 0;
853 	hw_qdesc->pn_63_32 = 0;
854 	hw_qdesc->pn_95_64 = 0;
855 	hw_qdesc->pn_127_96 = 0;
856 	hw_qdesc->last_rx_enqueue_timestamp = 0;
857 	hw_qdesc->last_rx_dequeue_timestamp = 0;
858 	hw_qdesc->ptr_to_next_aging_queue_39_32 = 0;
859 	hw_qdesc->ptr_to_next_aging_queue_31_0 = 0;
860 	hw_qdesc->ptr_to_previous_aging_queue_31_0 = 0;
861 	hw_qdesc->ptr_to_previous_aging_queue_39_32 = 0;
862 	hw_qdesc->rx_bitmap_31_0 = 0;
863 	hw_qdesc->rx_bitmap_63_32 = 0;
864 	hw_qdesc->rx_bitmap_95_64 = 0;
865 	hw_qdesc->rx_bitmap_127_96 = 0;
866 	hw_qdesc->rx_bitmap_159_128 = 0;
867 	hw_qdesc->rx_bitmap_191_160 = 0;
868 	hw_qdesc->rx_bitmap_223_192 = 0;
869 	hw_qdesc->rx_bitmap_255_224 = 0;
870 	hw_qdesc->rx_bitmap_287_256 = 0;
871 	hw_qdesc->current_msdu_count = 0;
872 	hw_qdesc->current_mpdu_count = 0;
873 	hw_qdesc->last_sn_reg_index = 0;
874 
875 	if (size > sizeof(struct rx_reo_queue)) {
876 		struct rx_reo_queue_ext *ext_desc;
877 		struct rx_reo_queue_1k *kdesc;
878 
879 		i = ((size - sizeof(struct rx_reo_queue)) /
880 				sizeof(struct rx_reo_queue_ext));
881 
882 		if (i > 10) {
883 			i = 10;
884 			kdesc = (struct rx_reo_queue_1k *)
885 				(hw_qdesc_vaddr + sizeof(struct rx_reo_queue) +
886 				 (10 * sizeof(struct rx_reo_queue_ext)));
887 
888 			kdesc->rx_bitmap_319_288 = 0;
889 			kdesc->rx_bitmap_351_320 = 0;
890 			kdesc->rx_bitmap_383_352 = 0;
891 			kdesc->rx_bitmap_415_384 = 0;
892 			kdesc->rx_bitmap_447_416 = 0;
893 			kdesc->rx_bitmap_479_448 = 0;
894 			kdesc->rx_bitmap_511_480 = 0;
895 			kdesc->rx_bitmap_543_512 = 0;
896 			kdesc->rx_bitmap_575_544 = 0;
897 			kdesc->rx_bitmap_607_576 = 0;
898 			kdesc->rx_bitmap_639_608 = 0;
899 			kdesc->rx_bitmap_671_640 = 0;
900 			kdesc->rx_bitmap_703_672 = 0;
901 			kdesc->rx_bitmap_735_704 = 0;
902 			kdesc->rx_bitmap_767_736 = 0;
903 			kdesc->rx_bitmap_799_768 = 0;
904 			kdesc->rx_bitmap_831_800 = 0;
905 			kdesc->rx_bitmap_863_832 = 0;
906 			kdesc->rx_bitmap_895_864 = 0;
907 			kdesc->rx_bitmap_927_896 = 0;
908 			kdesc->rx_bitmap_959_928 = 0;
909 			kdesc->rx_bitmap_991_960 = 0;
910 			kdesc->rx_bitmap_1023_992 = 0;
911 		}
912 
913 		ext_desc = (struct rx_reo_queue_ext *)
914 			(hw_qdesc_vaddr + (sizeof(struct rx_reo_queue)));
915 
916 		while (i > 0) {
917 			qdf_mem_zero(&ext_desc->mpdu_link_pointer_0,
918 				     (15 * sizeof(struct rx_mpdu_link_ptr)));
919 
920 			ext_desc++;
921 			i--;
922 		}
923 	}
924 }
925 #endif
926 
hal_rx_get_phy_ppdu_id_size_be(void)927 static inline uint8_t hal_rx_get_phy_ppdu_id_size_be(void)
928 {
929 	return sizeof(uint64_t);
930 }
931 
hal_hw_txrx_default_ops_attach_be(struct hal_soc * hal_soc)932 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc)
933 {
934 	hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be;
935 	hal_soc->ops->hal_get_rx_max_ba_window = hal_get_rx_max_ba_window_be;
936 	hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be;
937 	hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be;
938 	hal_soc->ops->hal_get_reo_reg_base_offset =
939 					hal_get_reo_reg_base_offset_be;
940 	hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be;
941 	hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be;
942 	hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be;
943 	hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be;
944 
945 	hal_soc->ops->hal_rx_ret_buf_manager_get =
946 						hal_rx_ret_buf_manager_get_be;
947 	hal_soc->ops->hal_rxdma_buff_addr_info_set =
948 					hal_rxdma_buff_addr_info_set_be;
949 	hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be;
950 	hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be;
951 	hal_soc->ops->hal_gen_reo_remap_val =
952 				hal_gen_reo_remap_val_generic_be;
953 	hal_soc->ops->hal_tx_comp_get_buffer_source =
954 				hal_tx_comp_get_buffer_source_generic_be;
955 	hal_soc->ops->hal_tx_comp_get_release_reason =
956 				hal_tx_comp_get_release_reason_generic_be;
957 	hal_soc->ops->hal_get_wbm_internal_error =
958 					hal_get_wbm_internal_error_generic_be;
959 	hal_soc->ops->hal_rx_mpdu_desc_info_get =
960 				hal_rx_mpdu_desc_info_get_be;
961 	hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be;
962 	hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be;
963 	hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be;
964 	hal_soc->ops->hal_rx_wbm_rel_buf_paddr_get =
965 					hal_rx_wbm_rel_buf_paddr_get_be;
966 
967 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
968 	hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be;
969 	hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be;
970 	hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be;
971 	hal_soc->ops->hal_rx_msdu_reo_dst_ind_get =
972 						hal_rx_msdu_reo_dst_ind_get_be;
973 	hal_soc->ops->hal_get_idle_link_bm_id = hal_get_idle_link_bm_id_be;
974 	hal_soc->ops->hal_rx_msdu_ext_desc_info_get_ptr =
975 					hal_rx_msdu_ext_desc_info_get_ptr_be;
976 	hal_soc->ops->hal_msdu_desc_info_set = hal_msdu_desc_info_set_be;
977 	hal_soc->ops->hal_mpdu_desc_info_set = hal_mpdu_desc_info_set_be;
978 #ifdef DP_UMAC_HW_RESET_SUPPORT
979 	hal_soc->ops->hal_unregister_reo_send_cmd =
980 					hal_unregister_reo_send_cmd_be;
981 	hal_soc->ops->hal_register_reo_send_cmd = hal_register_reo_send_cmd_be;
982 	hal_soc->ops->hal_reset_rx_reo_tid_q = hal_reset_rx_reo_tid_q_be;
983 #endif
984 	hal_soc->ops->hal_rx_tlv_get_pn_num = hal_rx_tlv_get_pn_num_be;
985 #ifndef CONFIG_WORD_BASED_TLV
986 	hal_soc->ops->hal_rx_get_qdesc_addr = hal_rx_get_qdesc_addr_be;
987 #endif
988 	hal_soc->ops->hal_set_reo_ent_desc_reo_dest_ind =
989 					hal_set_reo_ent_desc_reo_dest_ind_be;
990 	hal_soc->ops->hal_get_reo_ent_desc_qdesc_addr =
991 					hal_get_reo_ent_desc_qdesc_addr_be;
992 	hal_soc->ops->hal_rx_en_mcast_fp_data_filter =
993 				hal_rx_en_mcast_fp_data_filter_generic_be;
994 	hal_soc->ops->hal_rx_get_phy_ppdu_id_size =
995 					hal_rx_get_phy_ppdu_id_size_be;
996 	hal_soc->ops->hal_rx_phy_legacy_get_rssi =
997 					hal_rx_phy_legacy_get_rssi_be;
998 	hal_soc->ops->hal_rx_parse_eht_sig_hdr = hal_rx_parse_eht_sig_hdr_be;
999 }
1000