xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/be/hal_be_generic_api.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_module.h>
21 #include "hal_be_api.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_be_reo.h"
24 #include "hal_tx.h"	//HAL_SET_FLD
25 #include "hal_be_rx.h"	//HAL_RX_BUF_RBM_GET
26 #include "rx_reo_queue_1k.h"
27 
28 /*
29  * The 4 bits REO destination ring value is defined as: 0: TCL
30  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
31  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
32  *
33  */
34 uint32_t reo_dest_ring_remap[] = {REO_REMAP_SW1, REO_REMAP_SW2,
35 				  REO_REMAP_SW3, REO_REMAP_SW4,
36 				  REO_REMAP_SW5, REO_REMAP_SW6,
37 				  REO_REMAP_SW7, REO_REMAP_SW8};
38 
39 #if defined(QDF_BIG_ENDIAN_MACHINE)
40 void hal_setup_reo_swap(struct hal_soc *soc)
41 {
42 	uint32_t reg_val;
43 
44 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
45 		REO_REG_REG_BASE));
46 
47 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1);
48 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1);
49 
50 	HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
51 		REO_REG_REG_BASE), reg_val);
52 }
53 #else
54 void hal_setup_reo_swap(struct hal_soc *soc)
55 {
56 }
57 #endif
58 
59 /**
60  * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG
61  * @hal_soc_hdl: Handle to HAL SoC structure
62  * @hal_srng: Handle to HAL SRNG structure
63  *
64  * Return: none
65  */
66 static void
67 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl,
68 			 hal_ring_handle_t hal_ring_hdl)
69 {
70 }
71 
72 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams,
73 			      int qref_reset)
74 {
75 	uint32_t reg_val;
76 	struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams;
77 
78 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR(
79 		REO_REG_REG_BASE));
80 
81 	hal_reo_config(soc, reg_val, reo_params);
82 	/* Other ring enable bits and REO_ENABLE will be set by FW */
83 
84 	/* TODO: Setup destination ring mapping if enabled */
85 
86 	/* TODO: Error destination ring setting is left to default.
87 	 * Default setting is to send all errors to release ring.
88 	 */
89 
90 	/* Set the reo descriptor swap bits in case of BIG endian platform */
91 	hal_setup_reo_swap(soc);
92 
93 	HAL_REG_WRITE(soc,
94 		      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE),
95 		      HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000);
96 
97 	HAL_REG_WRITE(soc,
98 		      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE),
99 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
100 
101 	HAL_REG_WRITE(soc,
102 		      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE),
103 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
104 
105 	HAL_REG_WRITE(soc,
106 		      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE),
107 		      (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000));
108 
109 	/*
110 	 * When hash based routing is enabled, routing of the rx packet
111 	 * is done based on the following value: 1 _ _ _ _ The last 4
112 	 * bits are based on hash[3:0]. This means the possible values
113 	 * are 0x10 to 0x1f. This value is used to look-up the
114 	 * ring ID configured in Destination_Ring_Ctrl_IX_* register.
115 	 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3
116 	 * registers need to be configured to set-up the 16 entries to
117 	 * map the hash values to a ring number. There are 3 bits per
118 	 * hash entry – which are mapped as follows:
119 	 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI),
120 	 * 7: NOT_USED.
121 	 */
122 	if (reo_params->rx_hash_enabled) {
123 		HAL_REG_WRITE(soc,
124 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
125 			REO_REG_REG_BASE),
126 			reo_params->remap1);
127 
128 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x",
129 			  HAL_REG_READ(soc,
130 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
131 				       REO_REG_REG_BASE)));
132 
133 		HAL_REG_WRITE(soc,
134 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
135 			REO_REG_REG_BASE),
136 			reo_params->remap2);
137 
138 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x",
139 			  HAL_REG_READ(soc,
140 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
141 				       REO_REG_REG_BASE)));
142 	}
143 
144 	/* TODO: Check if the following registers shoould be setup by host:
145 	 * AGING_CONTROL
146 	 * HIGH_MEMORY_THRESHOLD
147 	 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
148 	 * GLOBAL_LINK_DESC_COUNT_CTRL
149 	 */
150 }
151 
152 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie,
153 			       qdf_dma_addr_t link_desc_paddr,
154 			       uint8_t bm_id)
155 {
156 	uint32_t *buf_addr = (uint32_t *)desc;
157 
158 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0,
159 			   link_desc_paddr & 0xffffffff);
160 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32,
161 			   (uint64_t)link_desc_paddr >> 32);
162 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER,
163 			   bm_id);
164 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE,
165 			   cookie);
166 }
167 
168 static uint16_t hal_get_rx_max_ba_window_be(int tid)
169 {
170 	return  HAL_RX_BA_WINDOW_256;
171 }
172 
173 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid)
174 {
175 	/* Hardcode the ba_window_size to HAL_RX_MAX_BA_WINDOW for
176 	 * NON_QOS_TID until HW issues are resolved.
177 	 */
178 	if (tid != HAL_NON_QOS_TID)
179 		ba_window_size = hal_get_rx_max_ba_window_be(tid);
180 
181 	/* Return descriptor size corresponding to window size of 2 since
182 	 * we set ba_window_size to 2 while setting up REO descriptors as
183 	 * a WAR to get 2k jump exception aggregates are received without
184 	 * a BA session.
185 	 */
186 	if (ba_window_size <= 1) {
187 		if (tid != HAL_NON_QOS_TID)
188 			return sizeof(struct rx_reo_queue) +
189 				sizeof(struct rx_reo_queue_ext);
190 		else
191 			return sizeof(struct rx_reo_queue);
192 	}
193 
194 	if (ba_window_size <= 105)
195 		return sizeof(struct rx_reo_queue) +
196 			sizeof(struct rx_reo_queue_ext);
197 
198 	if (ba_window_size <= 210)
199 		return sizeof(struct rx_reo_queue) +
200 			(2 * sizeof(struct rx_reo_queue_ext));
201 
202 	return sizeof(struct rx_reo_queue) +
203 		(3 * sizeof(struct rx_reo_queue_ext));
204 }
205 
206 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr)
207 {
208 	return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr);
209 }
210 
211 #if defined(QCA_WIFI_KIWI) && !defined(QCA_WIFI_KIWI_V2)
212 static inline uint32_t
213 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
214 {
215 	uint32_t buf_src;
216 
217 	buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
218 	switch (buf_src) {
219 	case HAL_BE_RX_WBM_ERR_SRC_RXDMA:
220 		return HAL_RX_WBM_ERR_SRC_RXDMA;
221 	case HAL_BE_RX_WBM_ERR_SRC_REO:
222 		return HAL_RX_WBM_ERR_SRC_REO;
223 	case HAL_BE_RX_WBM_ERR_SRC_FW_RX:
224 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
225 			qdf_assert_always(0);
226 		return HAL_RX_WBM_ERR_SRC_FW;
227 	case HAL_BE_RX_WBM_ERR_SRC_SW_RX:
228 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
229 			qdf_assert_always(0);
230 		return HAL_RX_WBM_ERR_SRC_SW;
231 	case HAL_BE_RX_WBM_ERR_SRC_TQM:
232 		return HAL_RX_WBM_ERR_SRC_TQM;
233 	case HAL_BE_RX_WBM_ERR_SRC_FW_TX:
234 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
235 			qdf_assert_always(0);
236 		return HAL_RX_WBM_ERR_SRC_FW;
237 	case HAL_BE_RX_WBM_ERR_SRC_SW_TX:
238 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
239 			qdf_assert_always(0);
240 		return HAL_RX_WBM_ERR_SRC_SW;
241 	default:
242 		qdf_assert_always(0);
243 	}
244 
245 	return buf_src;
246 }
247 #else
248 static inline uint32_t
249 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
250 {
251 	return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
252 }
253 #endif
254 
255 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc)
256 {
257 	return hal_wbm2sw_release_source_get(hal_desc,
258 					     HAL_BE_WBM_RELEASE_DIR_TX);
259 }
260 
261 /**
262  * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason
263  * @hal_desc: completion ring descriptor pointer
264  *
265  * This function will return the type of pointer - buffer or descriptor
266  *
267  * Return: buffer type
268  */
269 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc)
270 {
271 	uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) +
272 			WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET);
273 
274 	return (comp_desc &
275 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >>
276 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB;
277 }
278 
279 /**
280  * hal_get_wbm_internal_error_generic_be() - is WBM internal error
281  * @hal_desc: completion ring descriptor pointer
282  *
283  * This function will return 0 or 1  - is it WBM internal error or not
284  *
285  * Return: uint8_t
286  */
287 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc)
288 {
289 	/*
290 	 * TODO -  This func is called by tx comp and wbm error handler
291 	 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX
292 	 */
293 	uint32_t comp_desc =
294 		*(uint32_t *)(((uint8_t *)hal_desc) +
295 			      HAL_WBM_INTERNAL_ERROR_OFFSET);
296 
297 	return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >>
298 		HAL_WBM_INTERNAL_ERROR_LSB;
299 }
300 
301 /**
302  * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor
303  * @ring_desc: ring descriptor
304  *
305  * Return: wbm error source
306  */
307 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc)
308 {
309 	return hal_wbm2sw_release_source_get(ring_desc,
310 					     HAL_BE_WBM_RELEASE_DIR_RX);
311 }
312 
313 /**
314  * hal_rx_ret_buf_manager_get_be() - Get return buffer manager from ring desc
315  * @ring_desc: ring descriptor
316  *
317  * Return: rbm
318  */
319 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc)
320 {
321 	/*
322 	 * The following macro takes buf_addr_info as argument,
323 	 * but since buf_addr_info is the first field in ring_desc
324 	 * Hence the following call is OK
325 	 */
326 	return HAL_RX_BUF_RBM_GET(ring_desc);
327 }
328 
329 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
330 		(WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \
331 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \
332 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB)
333 
334 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
335 		(WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \
336 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \
337 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB)
338 
339 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc)	\
340 	(((*(((uint32_t *)wbm_desc) +			\
341 	(WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \
342 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >>	\
343 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB)
344 
345 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc)	\
346 	(((*(((uint32_t *)wbm_desc) +			\
347 	(WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \
348 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >>	\
349 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB)
350 
351 /**
352  * hal_rx_wbm_err_info_get_generic_be(): Retrieves WBM error code and reason and
353  *	save it to hal_wbm_err_desc_info structure passed by caller
354  * @wbm_desc: wbm ring descriptor
355  * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter.
356  * Return: void
357  */
358 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1)
359 {
360 	struct hal_wbm_err_desc_info *wbm_er_info =
361 		(struct hal_wbm_err_desc_info *)wbm_er_info1;
362 
363 	wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc);
364 	wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc);
365 	wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc);
366 	wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc);
367 	wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc);
368 }
369 
370 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc,
371 					struct hal_buf_info *buf_info)
372 {
373 	struct reo_destination_ring *reo_ring =
374 		 (struct reo_destination_ring *)rx_desc;
375 
376 	buf_info->paddr =
377 	 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) |
378 	  ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32));
379 	buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring);
380 }
381 
382 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl,
383 					 void *src_srng_desc,
384 					 hal_buff_addrinfo_t buf_addr_info,
385 					 uint8_t bm_action)
386 {
387 	/*
388 	 * The offsets for fields used in this function are same in
389 	 * wbm_release_ring for Lithium and wbm_release_ring_tx
390 	 * for Beryllium. hence we can use wbm_release_ring directly.
391 	 */
392 	struct wbm_release_ring *wbm_rel_srng =
393 			(struct wbm_release_ring *)src_srng_desc;
394 	uint32_t addr_31_0;
395 	uint8_t addr_39_32;
396 
397 	/* Structure copy !!! */
398 	wbm_rel_srng->released_buff_or_desc_addr_info =
399 				*((struct buffer_addr_info *)buf_addr_info);
400 
401 	addr_31_0 =
402 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0;
403 	addr_39_32 =
404 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32;
405 
406 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
407 			   RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW);
408 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION,
409 			   bm_action);
410 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
411 			   BUFFER_OR_DESC_TYPE,
412 			   HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC);
413 
414 	/* WBM error is indicated when any of the link descriptors given to
415 	 * WBM has a NULL address, and one those paths is the link descriptors
416 	 * released from host after processing RXDMA errors,
417 	 * or from Rx defrag path, and we want to add an assert here to ensure
418 	 * host is not releasing descriptors with NULL address.
419 	 */
420 
421 	if (qdf_unlikely(!addr_31_0 && !addr_39_32)) {
422 		hal_dump_wbm_rel_desc(src_srng_desc);
423 		qdf_assert_always(0);
424 	}
425 }
426 
427 /**
428  * hal_rx_reo_ent_buf_paddr_get_be: Gets the physical address and
429  * cookie from the REO entrance ring element
430  *
431  * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to
432  * the current descriptor
433  * @ buf_info: structure to return the buffer information
434  * @ msdu_cnt: pointer to msdu count in MPDU
435  * Return: void
436  */
437 static
438 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl,
439 				  hal_buf_info_t buf_info_hdl)
440 {
441 	struct hal_buf_info *buf_info =
442 		(struct hal_buf_info *)buf_info_hdl;
443 	struct buffer_addr_info *buf_addr_info =
444 		(struct buffer_addr_info *)buf_addr_info_hdl;
445 
446 	buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
447 	/*
448 	 * buffer addr info is the first member of ring desc, so the typecast
449 	 * can be done.
450 	 */
451 	buf_info->rbm = hal_rx_ret_buf_manager_get_be(
452 						(hal_ring_desc_t)buf_addr_info);
453 }
454 
455 /*
456  * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the
457  *				    rxdma ring entry.
458  * @rxdma_entry: descriptor entry
459  * @paddr: physical address of nbuf data pointer.
460  * @cookie: SW cookie used as a index to SW rx desc.
461  * @manager: who owns the nbuf (host, NSS, etc...).
462  *
463  */
464 static inline void
465 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry,
466 				qdf_dma_addr_t paddr, uint32_t cookie,
467 				uint8_t manager)
468 {
469 	uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff);
470 	uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32;
471 
472 	HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo);
473 	HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi);
474 	HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie);
475 	HAL_RXDMA_MANAGER_SET(rxdma_entry, manager);
476 }
477 
478 /**
479  * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc
480  * @rx_desc: rx descriptor
481  *
482  * Return: REO error code
483  */
484 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc)
485 {
486 	struct reo_destination_ring *reo_desc =
487 			(struct reo_destination_ring *)rx_desc;
488 
489 	return HAL_RX_REO_ERROR_GET(reo_desc);
490 }
491 
492 /**
493  * hal_gen_reo_remap_val_generic_be() - Generate the reo map value
494  * @ix0_map: mapping values for reo
495  *
496  * Return: IX0 reo remap register value to be written
497  */
498 static uint32_t
499 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg,
500 				 uint8_t *ix0_map)
501 {
502 	uint32_t ix_val = 0;
503 
504 	switch (remap_reg) {
505 	case HAL_REO_REMAP_REG_IX0:
506 		ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) |
507 			HAL_REO_REMAP_IX0(ix0_map[1], 1) |
508 			HAL_REO_REMAP_IX0(ix0_map[2], 2) |
509 			HAL_REO_REMAP_IX0(ix0_map[3], 3) |
510 			HAL_REO_REMAP_IX0(ix0_map[4], 4) |
511 			HAL_REO_REMAP_IX0(ix0_map[5], 5) |
512 			HAL_REO_REMAP_IX0(ix0_map[6], 6) |
513 			HAL_REO_REMAP_IX0(ix0_map[7], 7);
514 		break;
515 	case HAL_REO_REMAP_REG_IX2:
516 		ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) |
517 			HAL_REO_REMAP_IX2(ix0_map[1], 17) |
518 			HAL_REO_REMAP_IX2(ix0_map[2], 18) |
519 			HAL_REO_REMAP_IX2(ix0_map[3], 19) |
520 			HAL_REO_REMAP_IX2(ix0_map[4], 20) |
521 			HAL_REO_REMAP_IX2(ix0_map[5], 21) |
522 			HAL_REO_REMAP_IX2(ix0_map[6], 22) |
523 			HAL_REO_REMAP_IX2(ix0_map[7], 23);
524 		break;
525 	default:
526 		break;
527 	}
528 
529 	return ix_val;
530 }
531 
532 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc)
533 {
534 	return HAL_RX_ERROR_STATUS_GET(rx_desc);
535 }
536 
537 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl,
538 					   hal_ring_desc_t reo_desc,
539 					   void *st_handle,
540 					   uint32_t tlv, int *num_ref)
541 {
542 	union hal_reo_status *reo_status_ref;
543 
544 	reo_status_ref = (union hal_reo_status *)st_handle;
545 
546 	switch (tlv) {
547 	case HAL_REO_QUEUE_STATS_STATUS_TLV:
548 		hal_reo_queue_stats_status_be(reo_desc,
549 					      &reo_status_ref->queue_status,
550 					      hal_soc_hdl);
551 		*num_ref = reo_status_ref->queue_status.header.cmd_num;
552 		break;
553 	case HAL_REO_FLUSH_QUEUE_STATUS_TLV:
554 		hal_reo_flush_queue_status_be(reo_desc,
555 					      &reo_status_ref->fl_queue_status,
556 					      hal_soc_hdl);
557 		*num_ref = reo_status_ref->fl_queue_status.header.cmd_num;
558 		break;
559 	case HAL_REO_FLUSH_CACHE_STATUS_TLV:
560 		hal_reo_flush_cache_status_be(reo_desc,
561 					      &reo_status_ref->fl_cache_status,
562 					      hal_soc_hdl);
563 		*num_ref = reo_status_ref->fl_cache_status.header.cmd_num;
564 		break;
565 	case HAL_REO_UNBLK_CACHE_STATUS_TLV:
566 		hal_reo_unblock_cache_status_be
567 			(reo_desc, hal_soc_hdl,
568 			 &reo_status_ref->unblk_cache_status);
569 		*num_ref = reo_status_ref->unblk_cache_status.header.cmd_num;
570 		break;
571 	case HAL_REO_TIMOUT_LIST_STATUS_TLV:
572 		hal_reo_flush_timeout_list_status_be(
573 					reo_desc,
574 					&reo_status_ref->fl_timeout_status,
575 					hal_soc_hdl);
576 		*num_ref = reo_status_ref->fl_timeout_status.header.cmd_num;
577 		break;
578 	case HAL_REO_DESC_THRES_STATUS_TLV:
579 		hal_reo_desc_thres_reached_status_be(
580 						reo_desc,
581 						&reo_status_ref->thres_status,
582 						hal_soc_hdl);
583 		*num_ref = reo_status_ref->thres_status.header.cmd_num;
584 		break;
585 	case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV:
586 		hal_reo_rx_update_queue_status_be(
587 					reo_desc,
588 					&reo_status_ref->rx_queue_status,
589 					hal_soc_hdl);
590 		*num_ref = reo_status_ref->rx_queue_status.header.cmd_num;
591 		break;
592 	default:
593 		QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN,
594 			  "hal_soc %pK: no handler for TLV:%d",
595 			   hal_soc_hdl, tlv);
596 		return QDF_STATUS_E_FAILURE;
597 	} /* switch */
598 
599 	return QDF_STATUS_SUCCESS;
600 }
601 
602 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)
603 {
604 	return HAL_RX_REO_BUF_TYPE_GET(rx_desc);
605 }
606 
607 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
608 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000
609 #endif
610 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
611 				      struct hal_hw_cc_config *cc_cfg)
612 {
613 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
614 
615 	hal_soc->ops->hal_cookie_conversion_reg_cfg_be(hal_soc_hdl, cc_cfg);
616 }
617 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be);
618 
619 static inline void
620 hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
621 			  void *msdu_desc, uint32_t dst_ind,
622 			  uint32_t nbuf_len)
623 {
624 	struct rx_msdu_desc_info *msdu_desc_info =
625 		(struct rx_msdu_desc_info *)msdu_desc;
626 
627 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
628 				  FIRST_MSDU_IN_MPDU_FLAG, 1);
629 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
630 				  LAST_MSDU_IN_MPDU_FLAG, 1);
631 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
632 				  MSDU_CONTINUATION, 0x0);
633 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
634 				  MSDU_LENGTH, nbuf_len);
635 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
636 				  SA_IS_VALID, 1);
637 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
638 				  DA_IS_VALID, 1);
639 }
640 
641 static inline void
642 hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
643 			  void *mpdu_desc, uint32_t seq_no)
644 {
645 	struct rx_mpdu_desc_info *mpdu_desc_info =
646 			(struct rx_mpdu_desc_info *)mpdu_desc;
647 
648 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
649 				  MSDU_COUNT, 0x1);
650 	/* unset frag bit */
651 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
652 				  FRAGMENT_FLAG, 0x0);
653 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
654 				  RAW_MPDU, 0x0);
655 }
656 
657 /**
658  * hal_rx_msdu_reo_dst_ind_get: Gets the REO
659  * destination ring ID from the msdu desc info
660  *
661  * @msdu_link_desc : Opaque cookie pointer used by HAL to get to
662  * the current descriptor
663  *
664  * Return: dst_ind (REO destination ring ID)
665  */
666 static inline
667 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl,
668 					void *msdu_link_desc)
669 {
670 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
671 	struct rx_msdu_details *msdu_details;
672 	struct rx_msdu_desc_info *msdu_desc_info;
673 	struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
674 	uint32_t dst_ind;
675 
676 	msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc);
677 
678 	/* The first msdu in the link should exsist */
679 	msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0],
680 							   hal_soc);
681 	dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info);
682 	return dst_ind;
683 }
684 
685 uint32_t
686 hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl,
687 			      uint8_t rx_ring_mask)
688 {
689 	uint32_t num_rings = 0;
690 	uint32_t i = 0;
691 	uint32_t ring_remap_arr[HAL_MAX_REO2SW_RINGS] = {0};
692 	uint32_t reo_remap_val = 0;
693 	uint32_t ring_idx = 0;
694 	uint8_t ix_map[HAL_NUM_RX_RING_PER_IX_MAP] = {0};
695 
696 	/* create reo ring remap array */
697 	while (i < HAL_MAX_REO2SW_RINGS) {
698 		if (rx_ring_mask & (1 << i)) {
699 			ring_remap_arr[num_rings] = reo_dest_ring_remap[i];
700 			num_rings++;
701 		}
702 		i++;
703 	}
704 
705 	for (i = 0; i < HAL_NUM_RX_RING_PER_IX_MAP; i++) {
706 		if (rx_ring_mask) {
707 			ix_map[i] = ring_remap_arr[ring_idx];
708 			ring_idx = ((ring_idx + 1) % num_rings);
709 		} else {
710 			/* if ring mask is zero configure to release to WBM */
711 			ix_map[i] = REO_REMAP_RELEASE;
712 		}
713 	}
714 
715 	reo_remap_val = HAL_REO_REMAP_IX0(ix_map[0], 0) |
716 					  HAL_REO_REMAP_IX0(ix_map[1], 1) |
717 					  HAL_REO_REMAP_IX0(ix_map[2], 2) |
718 					  HAL_REO_REMAP_IX0(ix_map[3], 3) |
719 					  HAL_REO_REMAP_IX0(ix_map[4], 4) |
720 					  HAL_REO_REMAP_IX0(ix_map[5], 5) |
721 					  HAL_REO_REMAP_IX0(ix_map[6], 6) |
722 					  HAL_REO_REMAP_IX0(ix_map[7], 7);
723 
724 	return reo_remap_val;
725 }
726 
727 qdf_export_symbol(hal_reo_ix_remap_value_get_be);
728 
729 uint8_t hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id)
730 {
731 	if (rx_ring_id >= HAL_MAX_REO2SW_RINGS)
732 		return REO_REMAP_RELEASE;
733 
734 	return reo_dest_ring_remap[rx_ring_id];
735 }
736 
737 qdf_export_symbol(hal_reo_ring_remap_value_get_be);
738 
739 uint8_t hal_get_idle_link_bm_id_be(uint8_t chip_id)
740 {
741 	return (WBM_IDLE_DESC_LIST + chip_id);
742 }
743 
744 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
745 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
746 static inline void
747 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
748 				struct hal_buf_info *buf_info)
749 {
750 	if (hal_rx_wbm_get_cookie_convert_done(rx_desc))
751 		buf_info->paddr =
752 			(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
753 			 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
754 	else
755 		buf_info->paddr =
756 			(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
757 			 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
758 }
759 #else
760 static inline void
761 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
762 				struct hal_buf_info *buf_info)
763 {
764 	buf_info->paddr =
765 		(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
766 		 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
767 }
768 #endif
769 #else /* !DP_FEATURE_HW_COOKIE_CONVERSION */
770 static inline void
771 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
772 				struct hal_buf_info *buf_info)
773 {
774 	buf_info->paddr =
775 		(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
776 		 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
777 }
778 #endif
779 
780 #ifdef DP_UMAC_HW_RESET_SUPPORT
781 /**
782  * hal_unregister_reo_send_cmd_be() - Unregister Reo send command callback.
783  * @hal_soc_hdl: HAL soc handle
784  *
785  * Return: None
786  */
787 static
788 void hal_unregister_reo_send_cmd_be(struct hal_soc *hal_soc)
789 {
790 	hal_soc->ops->hal_reo_send_cmd = NULL;
791 }
792 
793 /**
794  * hal_register_reo_send_cmd_be() - Register Reo send command callback.
795  * @hal_soc_hdl: HAL soc handle
796  *
797  * Return: None
798  */
799 static
800 void hal_register_reo_send_cmd_be(struct hal_soc *hal_soc)
801 {
802 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
803 }
804 
805 /**
806  * hal_reset_rx_reo_tid_q_be() - reset the reo tid queue.
807  * @hal_soc_hdl: HAL soc handle
808  * @hw_qdesc_vaddr:start address of the tid queue
809  * @size:size of address pointed by hw_qdesc_vaddr
810  *
811  * Return: None
812  */
813 static void
814 hal_reset_rx_reo_tid_q_be(struct hal_soc *hal_soc, void *hw_qdesc_vaddr,
815 			  uint32_t size)
816 {
817 	struct rx_reo_queue *hw_qdesc = (struct rx_reo_queue *)hw_qdesc_vaddr;
818 	int i;
819 
820 	if (!hw_qdesc)
821 		return;
822 
823 	hw_qdesc->svld = 0;
824 	hw_qdesc->ssn = 0;
825 	hw_qdesc->current_index = 0;
826 	hw_qdesc->pn_valid = 0;
827 	hw_qdesc->pn_31_0 = 0;
828 	hw_qdesc->pn_63_32 = 0;
829 	hw_qdesc->pn_95_64 = 0;
830 	hw_qdesc->pn_127_96 = 0;
831 	hw_qdesc->last_rx_enqueue_timestamp = 0;
832 	hw_qdesc->last_rx_dequeue_timestamp = 0;
833 	hw_qdesc->ptr_to_next_aging_queue_39_32 = 0;
834 	hw_qdesc->ptr_to_next_aging_queue_31_0 = 0;
835 	hw_qdesc->ptr_to_previous_aging_queue_31_0 = 0;
836 	hw_qdesc->ptr_to_previous_aging_queue_39_32 = 0;
837 	hw_qdesc->rx_bitmap_31_0 = 0;
838 	hw_qdesc->rx_bitmap_63_32 = 0;
839 	hw_qdesc->rx_bitmap_95_64 = 0;
840 	hw_qdesc->rx_bitmap_127_96 = 0;
841 	hw_qdesc->rx_bitmap_159_128 = 0;
842 	hw_qdesc->rx_bitmap_191_160 = 0;
843 	hw_qdesc->rx_bitmap_223_192 = 0;
844 	hw_qdesc->rx_bitmap_255_224 = 0;
845 	hw_qdesc->rx_bitmap_287_256 = 0;
846 	hw_qdesc->current_msdu_count = 0;
847 	hw_qdesc->current_mpdu_count = 0;
848 	hw_qdesc->last_sn_reg_index = 0;
849 
850 	if (size > sizeof(struct rx_reo_queue)) {
851 		struct rx_reo_queue_ext *ext_desc;
852 		struct rx_reo_queue_1k *kdesc;
853 
854 		i = ((size - sizeof(struct rx_reo_queue)) /
855 				sizeof(struct rx_reo_queue_ext));
856 
857 		if (i > 10) {
858 			i = 10;
859 			kdesc = (struct rx_reo_queue_1k *)
860 				(hw_qdesc_vaddr + sizeof(struct rx_reo_queue) +
861 				 (10 * sizeof(struct rx_reo_queue_ext)));
862 
863 			kdesc->rx_bitmap_319_288 = 0;
864 			kdesc->rx_bitmap_351_320 = 0;
865 			kdesc->rx_bitmap_383_352 = 0;
866 			kdesc->rx_bitmap_415_384 = 0;
867 			kdesc->rx_bitmap_447_416 = 0;
868 			kdesc->rx_bitmap_479_448 = 0;
869 			kdesc->rx_bitmap_511_480 = 0;
870 			kdesc->rx_bitmap_543_512 = 0;
871 			kdesc->rx_bitmap_575_544 = 0;
872 			kdesc->rx_bitmap_607_576 = 0;
873 			kdesc->rx_bitmap_639_608 = 0;
874 			kdesc->rx_bitmap_671_640 = 0;
875 			kdesc->rx_bitmap_703_672 = 0;
876 			kdesc->rx_bitmap_735_704 = 0;
877 			kdesc->rx_bitmap_767_736 = 0;
878 			kdesc->rx_bitmap_799_768 = 0;
879 			kdesc->rx_bitmap_831_800 = 0;
880 			kdesc->rx_bitmap_863_832 = 0;
881 			kdesc->rx_bitmap_895_864 = 0;
882 			kdesc->rx_bitmap_927_896 = 0;
883 			kdesc->rx_bitmap_959_928 = 0;
884 			kdesc->rx_bitmap_991_960 = 0;
885 			kdesc->rx_bitmap_1023_992 = 0;
886 		}
887 
888 		ext_desc = (struct rx_reo_queue_ext *)
889 			(hw_qdesc_vaddr + (sizeof(struct rx_reo_queue)));
890 
891 		while (i > 0) {
892 			qdf_mem_zero(&ext_desc->mpdu_link_pointer_0,
893 				     (15 * sizeof(struct rx_mpdu_link_ptr)));
894 
895 			ext_desc++;
896 			i--;
897 		}
898 	}
899 }
900 #endif
901 
902 /**
903  * hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for
904  *		beryllium chipsets.
905  * @hal_soc_hdl: HAL soc handle
906  *
907  * Return: None
908  */
909 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc)
910 {
911 	hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be;
912 	hal_soc->ops->hal_get_rx_max_ba_window = hal_get_rx_max_ba_window_be;
913 	hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be;
914 	hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be;
915 	hal_soc->ops->hal_get_reo_reg_base_offset =
916 					hal_get_reo_reg_base_offset_be;
917 	hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be;
918 	hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be;
919 	hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be;
920 	hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be;
921 
922 	hal_soc->ops->hal_rx_ret_buf_manager_get =
923 						hal_rx_ret_buf_manager_get_be;
924 	hal_soc->ops->hal_rxdma_buff_addr_info_set =
925 					hal_rxdma_buff_addr_info_set_be;
926 	hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be;
927 	hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be;
928 	hal_soc->ops->hal_gen_reo_remap_val =
929 				hal_gen_reo_remap_val_generic_be;
930 	hal_soc->ops->hal_tx_comp_get_buffer_source =
931 				hal_tx_comp_get_buffer_source_generic_be;
932 	hal_soc->ops->hal_tx_comp_get_release_reason =
933 				hal_tx_comp_get_release_reason_generic_be;
934 	hal_soc->ops->hal_get_wbm_internal_error =
935 					hal_get_wbm_internal_error_generic_be;
936 	hal_soc->ops->hal_rx_mpdu_desc_info_get =
937 				hal_rx_mpdu_desc_info_get_be;
938 	hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be;
939 	hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be;
940 	hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be;
941 	hal_soc->ops->hal_rx_wbm_rel_buf_paddr_get =
942 					hal_rx_wbm_rel_buf_paddr_get_be;
943 
944 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
945 	hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be;
946 	hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be;
947 	hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be;
948 	hal_soc->ops->hal_rx_msdu_reo_dst_ind_get =
949 						hal_rx_msdu_reo_dst_ind_get_be;
950 	hal_soc->ops->hal_get_idle_link_bm_id = hal_get_idle_link_bm_id_be;
951 	hal_soc->ops->hal_rx_msdu_ext_desc_info_get_ptr =
952 					hal_rx_msdu_ext_desc_info_get_ptr_be;
953 	hal_soc->ops->hal_msdu_desc_info_set = hal_msdu_desc_info_set_be;
954 	hal_soc->ops->hal_mpdu_desc_info_set = hal_mpdu_desc_info_set_be;
955 #ifdef DP_UMAC_HW_RESET_SUPPORT
956 	hal_soc->ops->hal_unregister_reo_send_cmd =
957 					hal_unregister_reo_send_cmd_be;
958 	hal_soc->ops->hal_register_reo_send_cmd = hal_register_reo_send_cmd_be;
959 	hal_soc->ops->hal_reset_rx_reo_tid_q = hal_reset_rx_reo_tid_q_be;
960 #endif
961 }
962