xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/be/hal_be_generic_api.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_module.h>
21 #include "hal_be_api.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_be_reo.h"
24 #include "hal_tx.h"	//HAL_SET_FLD
25 #include "hal_be_rx.h"	//HAL_RX_BUF_RBM_GET
26 
27 /*
28  * The 4 bits REO destination ring value is defined as: 0: TCL
29  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
30  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
31  *
32  */
33 uint32_t reo_dest_ring_remap[] = {REO_REMAP_SW1, REO_REMAP_SW2,
34 				  REO_REMAP_SW3, REO_REMAP_SW4,
35 				  REO_REMAP_SW5, REO_REMAP_SW6,
36 				  REO_REMAP_SW7, REO_REMAP_SW8};
37 
38 #if defined(QDF_BIG_ENDIAN_MACHINE)
39 void hal_setup_reo_swap(struct hal_soc *soc)
40 {
41 	uint32_t reg_val;
42 
43 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
44 		REO_REG_REG_BASE));
45 
46 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1);
47 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1);
48 
49 	HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
50 		REO_REG_REG_BASE), reg_val);
51 }
52 #else
53 void hal_setup_reo_swap(struct hal_soc *soc)
54 {
55 }
56 #endif
57 
58 /**
59  * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG
60  * @hal_soc_hdl: Handle to HAL SoC structure
61  * @hal_srng: Handle to HAL SRNG structure
62  *
63  * Return: none
64  */
65 static void
66 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl,
67 			 hal_ring_handle_t hal_ring_hdl)
68 {
69 }
70 
71 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams)
72 {
73 	uint32_t reg_val;
74 	struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams;
75 
76 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR(
77 		REO_REG_REG_BASE));
78 
79 	hal_reo_config(soc, reg_val, reo_params);
80 	/* Other ring enable bits and REO_ENABLE will be set by FW */
81 
82 	/* TODO: Setup destination ring mapping if enabled */
83 
84 	/* TODO: Error destination ring setting is left to default.
85 	 * Default setting is to send all errors to release ring.
86 	 */
87 
88 	/* Set the reo descriptor swap bits in case of BIG endian platform */
89 	hal_setup_reo_swap(soc);
90 
91 	HAL_REG_WRITE(soc,
92 		      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE),
93 		      HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000);
94 
95 	HAL_REG_WRITE(soc,
96 		      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE),
97 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
98 
99 	HAL_REG_WRITE(soc,
100 		      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE),
101 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
102 
103 	HAL_REG_WRITE(soc,
104 		      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE),
105 		      (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000));
106 
107 	/*
108 	 * When hash based routing is enabled, routing of the rx packet
109 	 * is done based on the following value: 1 _ _ _ _ The last 4
110 	 * bits are based on hash[3:0]. This means the possible values
111 	 * are 0x10 to 0x1f. This value is used to look-up the
112 	 * ring ID configured in Destination_Ring_Ctrl_IX_* register.
113 	 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3
114 	 * registers need to be configured to set-up the 16 entries to
115 	 * map the hash values to a ring number. There are 3 bits per
116 	 * hash entry – which are mapped as follows:
117 	 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI),
118 	 * 7: NOT_USED.
119 	 */
120 	if (reo_params->rx_hash_enabled) {
121 		HAL_REG_WRITE(soc,
122 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
123 			REO_REG_REG_BASE),
124 			reo_params->remap1);
125 
126 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x",
127 			  HAL_REG_READ(soc,
128 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
129 				       REO_REG_REG_BASE)));
130 
131 		HAL_REG_WRITE(soc,
132 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
133 			REO_REG_REG_BASE),
134 			reo_params->remap2);
135 
136 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x",
137 			  HAL_REG_READ(soc,
138 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
139 				       REO_REG_REG_BASE)));
140 	}
141 
142 	/* TODO: Check if the following registers shoould be setup by host:
143 	 * AGING_CONTROL
144 	 * HIGH_MEMORY_THRESHOLD
145 	 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
146 	 * GLOBAL_LINK_DESC_COUNT_CTRL
147 	 */
148 }
149 
150 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie,
151 			       qdf_dma_addr_t link_desc_paddr,
152 			       uint8_t bm_id)
153 {
154 	uint32_t *buf_addr = (uint32_t *)desc;
155 
156 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0,
157 			   link_desc_paddr & 0xffffffff);
158 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32,
159 			   (uint64_t)link_desc_paddr >> 32);
160 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER,
161 			   bm_id);
162 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE,
163 			   cookie);
164 }
165 
166 static uint16_t hal_get_rx_max_ba_window_be(int tid)
167 {
168 	return  HAL_RX_BA_WINDOW_256;
169 }
170 
171 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid)
172 {
173 	/* Hardcode the ba_window_size to HAL_RX_MAX_BA_WINDOW for
174 	 * NON_QOS_TID until HW issues are resolved.
175 	 */
176 	if (tid != HAL_NON_QOS_TID)
177 		ba_window_size = hal_get_rx_max_ba_window_be(tid);
178 
179 	/* Return descriptor size corresponding to window size of 2 since
180 	 * we set ba_window_size to 2 while setting up REO descriptors as
181 	 * a WAR to get 2k jump exception aggregates are received without
182 	 * a BA session.
183 	 */
184 	if (ba_window_size <= 1) {
185 		if (tid != HAL_NON_QOS_TID)
186 			return sizeof(struct rx_reo_queue) +
187 				sizeof(struct rx_reo_queue_ext);
188 		else
189 			return sizeof(struct rx_reo_queue);
190 	}
191 
192 	if (ba_window_size <= 105)
193 		return sizeof(struct rx_reo_queue) +
194 			sizeof(struct rx_reo_queue_ext);
195 
196 	if (ba_window_size <= 210)
197 		return sizeof(struct rx_reo_queue) +
198 			(2 * sizeof(struct rx_reo_queue_ext));
199 
200 	return sizeof(struct rx_reo_queue) +
201 		(3 * sizeof(struct rx_reo_queue_ext));
202 }
203 
204 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr)
205 {
206 	return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr);
207 }
208 
209 #if defined(QCA_WIFI_KIWI) && !defined(QCA_WIFI_KIWI_V2)
210 static inline uint32_t
211 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
212 {
213 	uint32_t buf_src;
214 
215 	buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
216 	switch (buf_src) {
217 	case HAL_BE_RX_WBM_ERR_SRC_RXDMA:
218 		return HAL_RX_WBM_ERR_SRC_RXDMA;
219 	case HAL_BE_RX_WBM_ERR_SRC_REO:
220 		return HAL_RX_WBM_ERR_SRC_REO;
221 	case HAL_BE_RX_WBM_ERR_SRC_FW_RX:
222 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
223 			qdf_assert_always(0);
224 		return HAL_RX_WBM_ERR_SRC_FW;
225 	case HAL_BE_RX_WBM_ERR_SRC_SW_RX:
226 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
227 			qdf_assert_always(0);
228 		return HAL_RX_WBM_ERR_SRC_SW;
229 	case HAL_BE_RX_WBM_ERR_SRC_TQM:
230 		return HAL_RX_WBM_ERR_SRC_TQM;
231 	case HAL_BE_RX_WBM_ERR_SRC_FW_TX:
232 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
233 			qdf_assert_always(0);
234 		return HAL_RX_WBM_ERR_SRC_FW;
235 	case HAL_BE_RX_WBM_ERR_SRC_SW_TX:
236 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
237 			qdf_assert_always(0);
238 		return HAL_RX_WBM_ERR_SRC_SW;
239 	default:
240 		qdf_assert_always(0);
241 	}
242 
243 	return buf_src;
244 }
245 #else
246 static inline uint32_t
247 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
248 {
249 	return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
250 }
251 #endif
252 
253 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc)
254 {
255 	return hal_wbm2sw_release_source_get(hal_desc,
256 					     HAL_BE_WBM_RELEASE_DIR_TX);
257 }
258 
259 /**
260  * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason
261  * @hal_desc: completion ring descriptor pointer
262  *
263  * This function will return the type of pointer - buffer or descriptor
264  *
265  * Return: buffer type
266  */
267 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc)
268 {
269 	uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) +
270 			WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET);
271 
272 	return (comp_desc &
273 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >>
274 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB;
275 }
276 
277 /**
278  * hal_get_wbm_internal_error_generic_be() - is WBM internal error
279  * @hal_desc: completion ring descriptor pointer
280  *
281  * This function will return 0 or 1  - is it WBM internal error or not
282  *
283  * Return: uint8_t
284  */
285 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc)
286 {
287 	/*
288 	 * TODO -  This func is called by tx comp and wbm error handler
289 	 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX
290 	 */
291 	uint32_t comp_desc =
292 		*(uint32_t *)(((uint8_t *)hal_desc) +
293 			      HAL_WBM_INTERNAL_ERROR_OFFSET);
294 
295 	return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >>
296 		HAL_WBM_INTERNAL_ERROR_LSB;
297 }
298 
299 /**
300  * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor
301  * @ring_desc: ring descriptor
302  *
303  * Return: wbm error source
304  */
305 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc)
306 {
307 	return hal_wbm2sw_release_source_get(ring_desc,
308 					     HAL_BE_WBM_RELEASE_DIR_RX);
309 }
310 
311 /**
312  * hal_rx_ret_buf_manager_get_be() - Get return buffer manager from ring desc
313  * @ring_desc: ring descriptor
314  *
315  * Return: rbm
316  */
317 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc)
318 {
319 	/*
320 	 * The following macro takes buf_addr_info as argument,
321 	 * but since buf_addr_info is the first field in ring_desc
322 	 * Hence the following call is OK
323 	 */
324 	return HAL_RX_BUF_RBM_GET(ring_desc);
325 }
326 
327 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
328 		(WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \
329 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \
330 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB)
331 
332 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
333 		(WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \
334 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \
335 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB)
336 
337 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc)	\
338 	(((*(((uint32_t *)wbm_desc) +			\
339 	(WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \
340 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >>	\
341 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB)
342 
343 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc)	\
344 	(((*(((uint32_t *)wbm_desc) +			\
345 	(WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \
346 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >>	\
347 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB)
348 
349 /**
350  * hal_rx_wbm_err_info_get_generic_be(): Retrieves WBM error code and reason and
351  *	save it to hal_wbm_err_desc_info structure passed by caller
352  * @wbm_desc: wbm ring descriptor
353  * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter.
354  * Return: void
355  */
356 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1)
357 {
358 	struct hal_wbm_err_desc_info *wbm_er_info =
359 		(struct hal_wbm_err_desc_info *)wbm_er_info1;
360 
361 	wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc);
362 	wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc);
363 	wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc);
364 	wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc);
365 	wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc);
366 }
367 
368 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc,
369 					struct hal_buf_info *buf_info)
370 {
371 	struct reo_destination_ring *reo_ring =
372 		 (struct reo_destination_ring *)rx_desc;
373 
374 	buf_info->paddr =
375 	 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) |
376 	  ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32));
377 	buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring);
378 }
379 
380 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl,
381 					 void *src_srng_desc,
382 					 hal_buff_addrinfo_t buf_addr_info,
383 					 uint8_t bm_action)
384 {
385 	/*
386 	 * The offsets for fields used in this function are same in
387 	 * wbm_release_ring for Lithium and wbm_release_ring_tx
388 	 * for Beryllium. hence we can use wbm_release_ring directly.
389 	 */
390 	struct wbm_release_ring *wbm_rel_srng =
391 			(struct wbm_release_ring *)src_srng_desc;
392 	uint32_t addr_31_0;
393 	uint8_t addr_39_32;
394 
395 	/* Structure copy !!! */
396 	wbm_rel_srng->released_buff_or_desc_addr_info =
397 				*((struct buffer_addr_info *)buf_addr_info);
398 
399 	addr_31_0 =
400 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0;
401 	addr_39_32 =
402 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32;
403 
404 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
405 			   RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW);
406 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION,
407 			   bm_action);
408 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
409 			   BUFFER_OR_DESC_TYPE,
410 			   HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC);
411 
412 	/* WBM error is indicated when any of the link descriptors given to
413 	 * WBM has a NULL address, and one those paths is the link descriptors
414 	 * released from host after processing RXDMA errors,
415 	 * or from Rx defrag path, and we want to add an assert here to ensure
416 	 * host is not releasing descriptors with NULL address.
417 	 */
418 
419 	if (qdf_unlikely(!addr_31_0 && !addr_39_32)) {
420 		hal_dump_wbm_rel_desc(src_srng_desc);
421 		qdf_assert_always(0);
422 	}
423 }
424 
425 /**
426  * hal_rx_reo_ent_buf_paddr_get_be: Gets the physical address and
427  * cookie from the REO entrance ring element
428  *
429  * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to
430  * the current descriptor
431  * @ buf_info: structure to return the buffer information
432  * @ msdu_cnt: pointer to msdu count in MPDU
433  * Return: void
434  */
435 static
436 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl,
437 				  hal_buf_info_t buf_info_hdl)
438 {
439 	struct hal_buf_info *buf_info =
440 		(struct hal_buf_info *)buf_info_hdl;
441 	struct buffer_addr_info *buf_addr_info =
442 		(struct buffer_addr_info *)buf_addr_info_hdl;
443 
444 	buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
445 	/*
446 	 * buffer addr info is the first member of ring desc, so the typecast
447 	 * can be done.
448 	 */
449 	buf_info->rbm = hal_rx_ret_buf_manager_get_be(
450 						(hal_ring_desc_t)buf_addr_info);
451 }
452 
453 /*
454  * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the
455  *				    rxdma ring entry.
456  * @rxdma_entry: descriptor entry
457  * @paddr: physical address of nbuf data pointer.
458  * @cookie: SW cookie used as a index to SW rx desc.
459  * @manager: who owns the nbuf (host, NSS, etc...).
460  *
461  */
462 static inline void
463 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry,
464 				qdf_dma_addr_t paddr, uint32_t cookie,
465 				uint8_t manager)
466 {
467 	uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff);
468 	uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32;
469 
470 	HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo);
471 	HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi);
472 	HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie);
473 	HAL_RXDMA_MANAGER_SET(rxdma_entry, manager);
474 }
475 
476 /**
477  * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc
478  * @rx_desc: rx descriptor
479  *
480  * Return: REO error code
481  */
482 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc)
483 {
484 	struct reo_destination_ring *reo_desc =
485 			(struct reo_destination_ring *)rx_desc;
486 
487 	return HAL_RX_REO_ERROR_GET(reo_desc);
488 }
489 
490 /**
491  * hal_gen_reo_remap_val_generic_be() - Generate the reo map value
492  * @ix0_map: mapping values for reo
493  *
494  * Return: IX0 reo remap register value to be written
495  */
496 static uint32_t
497 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg,
498 				 uint8_t *ix0_map)
499 {
500 	uint32_t ix_val = 0;
501 
502 	switch (remap_reg) {
503 	case HAL_REO_REMAP_REG_IX0:
504 		ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) |
505 			HAL_REO_REMAP_IX0(ix0_map[1], 1) |
506 			HAL_REO_REMAP_IX0(ix0_map[2], 2) |
507 			HAL_REO_REMAP_IX0(ix0_map[3], 3) |
508 			HAL_REO_REMAP_IX0(ix0_map[4], 4) |
509 			HAL_REO_REMAP_IX0(ix0_map[5], 5) |
510 			HAL_REO_REMAP_IX0(ix0_map[6], 6) |
511 			HAL_REO_REMAP_IX0(ix0_map[7], 7);
512 		break;
513 	case HAL_REO_REMAP_REG_IX2:
514 		ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) |
515 			HAL_REO_REMAP_IX2(ix0_map[1], 17) |
516 			HAL_REO_REMAP_IX2(ix0_map[2], 18) |
517 			HAL_REO_REMAP_IX2(ix0_map[3], 19) |
518 			HAL_REO_REMAP_IX2(ix0_map[4], 20) |
519 			HAL_REO_REMAP_IX2(ix0_map[5], 21) |
520 			HAL_REO_REMAP_IX2(ix0_map[6], 22) |
521 			HAL_REO_REMAP_IX2(ix0_map[7], 23);
522 		break;
523 	default:
524 		break;
525 	}
526 
527 	return ix_val;
528 }
529 
530 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc)
531 {
532 	return HAL_RX_ERROR_STATUS_GET(rx_desc);
533 }
534 
535 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl,
536 					   hal_ring_desc_t reo_desc,
537 					   void *st_handle,
538 					   uint32_t tlv, int *num_ref)
539 {
540 	union hal_reo_status *reo_status_ref;
541 
542 	reo_status_ref = (union hal_reo_status *)st_handle;
543 
544 	switch (tlv) {
545 	case HAL_REO_QUEUE_STATS_STATUS_TLV:
546 		hal_reo_queue_stats_status_be(reo_desc,
547 					      &reo_status_ref->queue_status,
548 					      hal_soc_hdl);
549 		*num_ref = reo_status_ref->queue_status.header.cmd_num;
550 		break;
551 	case HAL_REO_FLUSH_QUEUE_STATUS_TLV:
552 		hal_reo_flush_queue_status_be(reo_desc,
553 					      &reo_status_ref->fl_queue_status,
554 					      hal_soc_hdl);
555 		*num_ref = reo_status_ref->fl_queue_status.header.cmd_num;
556 		break;
557 	case HAL_REO_FLUSH_CACHE_STATUS_TLV:
558 		hal_reo_flush_cache_status_be(reo_desc,
559 					      &reo_status_ref->fl_cache_status,
560 					      hal_soc_hdl);
561 		*num_ref = reo_status_ref->fl_cache_status.header.cmd_num;
562 		break;
563 	case HAL_REO_UNBLK_CACHE_STATUS_TLV:
564 		hal_reo_unblock_cache_status_be
565 			(reo_desc, hal_soc_hdl,
566 			 &reo_status_ref->unblk_cache_status);
567 		*num_ref = reo_status_ref->unblk_cache_status.header.cmd_num;
568 		break;
569 	case HAL_REO_TIMOUT_LIST_STATUS_TLV:
570 		hal_reo_flush_timeout_list_status_be(
571 					reo_desc,
572 					&reo_status_ref->fl_timeout_status,
573 					hal_soc_hdl);
574 		*num_ref = reo_status_ref->fl_timeout_status.header.cmd_num;
575 		break;
576 	case HAL_REO_DESC_THRES_STATUS_TLV:
577 		hal_reo_desc_thres_reached_status_be(
578 						reo_desc,
579 						&reo_status_ref->thres_status,
580 						hal_soc_hdl);
581 		*num_ref = reo_status_ref->thres_status.header.cmd_num;
582 		break;
583 	case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV:
584 		hal_reo_rx_update_queue_status_be(
585 					reo_desc,
586 					&reo_status_ref->rx_queue_status,
587 					hal_soc_hdl);
588 		*num_ref = reo_status_ref->rx_queue_status.header.cmd_num;
589 		break;
590 	default:
591 		QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN,
592 			  "hal_soc %pK: no handler for TLV:%d",
593 			   hal_soc_hdl, tlv);
594 		return QDF_STATUS_E_FAILURE;
595 	} /* switch */
596 
597 	return QDF_STATUS_SUCCESS;
598 }
599 
600 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)
601 {
602 	return HAL_RX_REO_BUF_TYPE_GET(rx_desc);
603 }
604 
605 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
606 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000
607 #endif
608 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
609 				      struct hal_hw_cc_config *cc_cfg)
610 {
611 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
612 
613 	hal_soc->ops->hal_cookie_conversion_reg_cfg_be(hal_soc_hdl, cc_cfg);
614 }
615 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be);
616 
617 static inline void
618 hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
619 			  void *msdu_desc, uint32_t dst_ind,
620 			  uint32_t nbuf_len)
621 {
622 	struct rx_msdu_desc_info *msdu_desc_info =
623 		(struct rx_msdu_desc_info *)msdu_desc;
624 
625 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
626 				  FIRST_MSDU_IN_MPDU_FLAG, 1);
627 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
628 				  LAST_MSDU_IN_MPDU_FLAG, 1);
629 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
630 				  MSDU_CONTINUATION, 0x0);
631 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
632 				  MSDU_LENGTH, nbuf_len);
633 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
634 				  SA_IS_VALID, 1);
635 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
636 				  DA_IS_VALID, 1);
637 }
638 
639 static inline void
640 hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
641 			  void *mpdu_desc, uint32_t seq_no)
642 {
643 	struct rx_mpdu_desc_info *mpdu_desc_info =
644 			(struct rx_mpdu_desc_info *)mpdu_desc;
645 
646 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
647 				  MSDU_COUNT, 0x1);
648 	/* unset frag bit */
649 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
650 				  FRAGMENT_FLAG, 0x0);
651 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
652 				  RAW_MPDU, 0x0);
653 }
654 
655 /**
656  * hal_rx_msdu_reo_dst_ind_get: Gets the REO
657  * destination ring ID from the msdu desc info
658  *
659  * @msdu_link_desc : Opaque cookie pointer used by HAL to get to
660  * the current descriptor
661  *
662  * Return: dst_ind (REO destination ring ID)
663  */
664 static inline
665 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl,
666 					void *msdu_link_desc)
667 {
668 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
669 	struct rx_msdu_details *msdu_details;
670 	struct rx_msdu_desc_info *msdu_desc_info;
671 	struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
672 	uint32_t dst_ind;
673 
674 	msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc);
675 
676 	/* The first msdu in the link should exsist */
677 	msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0],
678 							   hal_soc);
679 	dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info);
680 	return dst_ind;
681 }
682 
683 uint32_t
684 hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl,
685 			      uint8_t rx_ring_mask)
686 {
687 	uint32_t num_rings = 0;
688 	uint32_t i = 0;
689 	uint32_t ring_remap_arr[HAL_MAX_REO2SW_RINGS] = {0};
690 	uint32_t reo_remap_val = 0;
691 	uint32_t ring_idx = 0;
692 	uint8_t ix_map[HAL_NUM_RX_RING_PER_IX_MAP] = {0};
693 
694 	/* create reo ring remap array */
695 	while (i < HAL_MAX_REO2SW_RINGS) {
696 		if (rx_ring_mask & (1 << i)) {
697 			ring_remap_arr[num_rings] = reo_dest_ring_remap[i];
698 			num_rings++;
699 		}
700 		i++;
701 	}
702 
703 	for (i = 0; i < HAL_NUM_RX_RING_PER_IX_MAP; i++) {
704 		if (rx_ring_mask) {
705 			ix_map[i] = ring_remap_arr[ring_idx];
706 			ring_idx = ((ring_idx + 1) % num_rings);
707 		} else {
708 			/* if ring mask is zero configure to release to WBM */
709 			ix_map[i] = REO_REMAP_RELEASE;
710 		}
711 	}
712 
713 	reo_remap_val = HAL_REO_REMAP_IX0(ix_map[0], 0) |
714 					  HAL_REO_REMAP_IX0(ix_map[1], 1) |
715 					  HAL_REO_REMAP_IX0(ix_map[2], 2) |
716 					  HAL_REO_REMAP_IX0(ix_map[3], 3) |
717 					  HAL_REO_REMAP_IX0(ix_map[4], 4) |
718 					  HAL_REO_REMAP_IX0(ix_map[5], 5) |
719 					  HAL_REO_REMAP_IX0(ix_map[6], 6) |
720 					  HAL_REO_REMAP_IX0(ix_map[7], 7);
721 
722 	return reo_remap_val;
723 }
724 
725 qdf_export_symbol(hal_reo_ix_remap_value_get_be);
726 
727 uint8_t hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id)
728 {
729 	if (rx_ring_id >= HAL_MAX_REO2SW_RINGS)
730 		return REO_REMAP_RELEASE;
731 
732 	return reo_dest_ring_remap[rx_ring_id];
733 }
734 
735 qdf_export_symbol(hal_reo_ring_remap_value_get_be);
736 
737 uint8_t hal_get_idle_link_bm_id_be(uint8_t chip_id)
738 {
739 	return (WBM_IDLE_DESC_LIST + chip_id);
740 }
741 
742 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
743 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
744 static inline void
745 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
746 				struct hal_buf_info *buf_info)
747 {
748 	if (hal_rx_wbm_get_cookie_convert_done(rx_desc))
749 		buf_info->paddr =
750 			(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
751 			 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
752 	else
753 		buf_info->paddr =
754 			(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
755 			 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
756 }
757 #else
758 static inline void
759 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
760 				struct hal_buf_info *buf_info)
761 {
762 	buf_info->paddr =
763 		(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
764 		 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
765 }
766 #endif
767 #else /* !DP_FEATURE_HW_COOKIE_CONVERSION */
768 static inline void
769 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
770 				struct hal_buf_info *buf_info)
771 {
772 	buf_info->paddr =
773 		(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
774 		 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
775 }
776 #endif
777 
778 /**
779  * hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for
780  *		beryllium chipsets.
781  * @hal_soc_hdl: HAL soc handle
782  *
783  * Return: None
784  */
785 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc)
786 {
787 	hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be;
788 	hal_soc->ops->hal_get_rx_max_ba_window = hal_get_rx_max_ba_window_be;
789 	hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be;
790 	hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be;
791 	hal_soc->ops->hal_get_reo_reg_base_offset =
792 					hal_get_reo_reg_base_offset_be;
793 	hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be;
794 	hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be;
795 	hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be;
796 	hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be;
797 
798 	hal_soc->ops->hal_rx_ret_buf_manager_get =
799 						hal_rx_ret_buf_manager_get_be;
800 	hal_soc->ops->hal_rxdma_buff_addr_info_set =
801 					hal_rxdma_buff_addr_info_set_be;
802 	hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be;
803 	hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be;
804 	hal_soc->ops->hal_gen_reo_remap_val =
805 				hal_gen_reo_remap_val_generic_be;
806 	hal_soc->ops->hal_tx_comp_get_buffer_source =
807 				hal_tx_comp_get_buffer_source_generic_be;
808 	hal_soc->ops->hal_tx_comp_get_release_reason =
809 				hal_tx_comp_get_release_reason_generic_be;
810 	hal_soc->ops->hal_get_wbm_internal_error =
811 					hal_get_wbm_internal_error_generic_be;
812 	hal_soc->ops->hal_rx_mpdu_desc_info_get =
813 				hal_rx_mpdu_desc_info_get_be;
814 	hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be;
815 	hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be;
816 	hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be;
817 	hal_soc->ops->hal_rx_wbm_rel_buf_paddr_get =
818 					hal_rx_wbm_rel_buf_paddr_get_be;
819 
820 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
821 	hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be;
822 	hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be;
823 	hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be;
824 	hal_soc->ops->hal_rx_msdu_reo_dst_ind_get =
825 						hal_rx_msdu_reo_dst_ind_get_be;
826 	hal_soc->ops->hal_get_idle_link_bm_id = hal_get_idle_link_bm_id_be;
827 	hal_soc->ops->hal_rx_msdu_ext_desc_info_get_ptr =
828 					hal_rx_msdu_ext_desc_info_get_ptr_be;
829 	hal_soc->ops->hal_msdu_desc_info_set = hal_msdu_desc_info_set_be;
830 	hal_soc->ops->hal_mpdu_desc_info_set = hal_mpdu_desc_info_set_be;
831 }
832