xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/be/hal_be_generic_api.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_module.h>
21 #include "hal_be_api.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_be_reo.h"
24 #include "hal_tx.h"	//HAL_SET_FLD
25 #include "hal_be_rx.h"	//HAL_RX_BUF_RBM_GET
26 #include "rx_reo_queue_1k.h"
27 #include "hal_be_rx_tlv.h"
28 
29 /*
30  * The 4 bits REO destination ring value is defined as: 0: TCL
31  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
32  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
33  *
34  */
35 uint32_t reo_dest_ring_remap[] = {REO_REMAP_SW1, REO_REMAP_SW2,
36 				  REO_REMAP_SW3, REO_REMAP_SW4,
37 				  REO_REMAP_SW5, REO_REMAP_SW6,
38 				  REO_REMAP_SW7, REO_REMAP_SW8};
39 
40 #if defined(QDF_BIG_ENDIAN_MACHINE)
41 void hal_setup_reo_swap(struct hal_soc *soc)
42 {
43 	uint32_t reg_val;
44 
45 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
46 		REO_REG_REG_BASE));
47 
48 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1);
49 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1);
50 
51 	HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
52 		REO_REG_REG_BASE), reg_val);
53 }
54 #else
55 void hal_setup_reo_swap(struct hal_soc *soc)
56 {
57 }
58 #endif
59 
60 /**
61  * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG
62  * @hal_soc_hdl: Handle to HAL SoC structure
63  * @hal_srng: Handle to HAL SRNG structure
64  *
65  * Return: none
66  */
67 static void
68 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl,
69 			 hal_ring_handle_t hal_ring_hdl)
70 {
71 }
72 
73 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams,
74 			      int qref_reset)
75 {
76 	uint32_t reg_val;
77 	struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams;
78 
79 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR(
80 		REO_REG_REG_BASE));
81 
82 	hal_reo_config(soc, reg_val, reo_params);
83 	/* Other ring enable bits and REO_ENABLE will be set by FW */
84 
85 	/* TODO: Setup destination ring mapping if enabled */
86 
87 	/* TODO: Error destination ring setting is left to default.
88 	 * Default setting is to send all errors to release ring.
89 	 */
90 
91 	/* Set the reo descriptor swap bits in case of BIG endian platform */
92 	hal_setup_reo_swap(soc);
93 
94 	HAL_REG_WRITE(soc,
95 		      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE),
96 		      HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000);
97 
98 	HAL_REG_WRITE(soc,
99 		      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE),
100 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
101 
102 	HAL_REG_WRITE(soc,
103 		      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE),
104 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
105 
106 	HAL_REG_WRITE(soc,
107 		      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE),
108 		      (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000));
109 
110 	/*
111 	 * When hash based routing is enabled, routing of the rx packet
112 	 * is done based on the following value: 1 _ _ _ _ The last 4
113 	 * bits are based on hash[3:0]. This means the possible values
114 	 * are 0x10 to 0x1f. This value is used to look-up the
115 	 * ring ID configured in Destination_Ring_Ctrl_IX_* register.
116 	 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3
117 	 * registers need to be configured to set-up the 16 entries to
118 	 * map the hash values to a ring number. There are 3 bits per
119 	 * hash entry – which are mapped as follows:
120 	 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI),
121 	 * 7: NOT_USED.
122 	 */
123 	if (reo_params->rx_hash_enabled) {
124 		HAL_REG_WRITE(soc,
125 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
126 			REO_REG_REG_BASE),
127 			reo_params->remap1);
128 
129 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x",
130 			  HAL_REG_READ(soc,
131 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
132 				       REO_REG_REG_BASE)));
133 
134 		HAL_REG_WRITE(soc,
135 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
136 			REO_REG_REG_BASE),
137 			reo_params->remap2);
138 
139 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x",
140 			  HAL_REG_READ(soc,
141 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
142 				       REO_REG_REG_BASE)));
143 	}
144 
145 	/* TODO: Check if the following registers shoould be setup by host:
146 	 * AGING_CONTROL
147 	 * HIGH_MEMORY_THRESHOLD
148 	 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
149 	 * GLOBAL_LINK_DESC_COUNT_CTRL
150 	 */
151 }
152 
153 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie,
154 			       qdf_dma_addr_t link_desc_paddr,
155 			       uint8_t bm_id)
156 {
157 	uint32_t *buf_addr = (uint32_t *)desc;
158 
159 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0,
160 			   link_desc_paddr & 0xffffffff);
161 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32,
162 			   (uint64_t)link_desc_paddr >> 32);
163 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER,
164 			   bm_id);
165 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE,
166 			   cookie);
167 }
168 
169 static uint16_t hal_get_rx_max_ba_window_be(int tid)
170 {
171 	return  HAL_RX_BA_WINDOW_256;
172 }
173 
174 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid)
175 {
176 	/* Hardcode the ba_window_size to HAL_RX_MAX_BA_WINDOW for
177 	 * NON_QOS_TID until HW issues are resolved.
178 	 */
179 	if (tid != HAL_NON_QOS_TID)
180 		ba_window_size = hal_get_rx_max_ba_window_be(tid);
181 
182 	/* Return descriptor size corresponding to window size of 2 since
183 	 * we set ba_window_size to 2 while setting up REO descriptors as
184 	 * a WAR to get 2k jump exception aggregates are received without
185 	 * a BA session.
186 	 */
187 	if (ba_window_size <= 1) {
188 		if (tid != HAL_NON_QOS_TID)
189 			return sizeof(struct rx_reo_queue) +
190 				sizeof(struct rx_reo_queue_ext);
191 		else
192 			return sizeof(struct rx_reo_queue);
193 	}
194 
195 	if (ba_window_size <= 105)
196 		return sizeof(struct rx_reo_queue) +
197 			sizeof(struct rx_reo_queue_ext);
198 
199 	if (ba_window_size <= 210)
200 		return sizeof(struct rx_reo_queue) +
201 			(2 * sizeof(struct rx_reo_queue_ext));
202 
203 	return sizeof(struct rx_reo_queue) +
204 		(3 * sizeof(struct rx_reo_queue_ext));
205 }
206 
207 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr)
208 {
209 	return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr);
210 }
211 
212 #if defined(QCA_WIFI_KIWI) && !defined(QCA_WIFI_KIWI_V2)
213 static inline uint32_t
214 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
215 {
216 	uint32_t buf_src;
217 
218 	buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
219 	switch (buf_src) {
220 	case HAL_BE_RX_WBM_ERR_SRC_RXDMA:
221 		return HAL_RX_WBM_ERR_SRC_RXDMA;
222 	case HAL_BE_RX_WBM_ERR_SRC_REO:
223 		return HAL_RX_WBM_ERR_SRC_REO;
224 	case HAL_BE_RX_WBM_ERR_SRC_FW_RX:
225 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
226 			qdf_assert_always(0);
227 		return HAL_RX_WBM_ERR_SRC_FW;
228 	case HAL_BE_RX_WBM_ERR_SRC_SW_RX:
229 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
230 			qdf_assert_always(0);
231 		return HAL_RX_WBM_ERR_SRC_SW;
232 	case HAL_BE_RX_WBM_ERR_SRC_TQM:
233 		return HAL_RX_WBM_ERR_SRC_TQM;
234 	case HAL_BE_RX_WBM_ERR_SRC_FW_TX:
235 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
236 			qdf_assert_always(0);
237 		return HAL_RX_WBM_ERR_SRC_FW;
238 	case HAL_BE_RX_WBM_ERR_SRC_SW_TX:
239 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
240 			qdf_assert_always(0);
241 		return HAL_RX_WBM_ERR_SRC_SW;
242 	default:
243 		qdf_assert_always(0);
244 	}
245 
246 	return buf_src;
247 }
248 #else
249 static inline uint32_t
250 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
251 {
252 	return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
253 }
254 #endif
255 
256 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc)
257 {
258 	return hal_wbm2sw_release_source_get(hal_desc,
259 					     HAL_BE_WBM_RELEASE_DIR_TX);
260 }
261 
262 /**
263  * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason
264  * @hal_desc: completion ring descriptor pointer
265  *
266  * This function will return the type of pointer - buffer or descriptor
267  *
268  * Return: buffer type
269  */
270 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc)
271 {
272 	uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) +
273 			WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET);
274 
275 	return (comp_desc &
276 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >>
277 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB;
278 }
279 
280 /**
281  * hal_get_wbm_internal_error_generic_be() - is WBM internal error
282  * @hal_desc: completion ring descriptor pointer
283  *
284  * This function will return 0 or 1  - is it WBM internal error or not
285  *
286  * Return: uint8_t
287  */
288 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc)
289 {
290 	/*
291 	 * TODO -  This func is called by tx comp and wbm error handler
292 	 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX
293 	 */
294 	uint32_t comp_desc =
295 		*(uint32_t *)(((uint8_t *)hal_desc) +
296 			      HAL_WBM_INTERNAL_ERROR_OFFSET);
297 
298 	return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >>
299 		HAL_WBM_INTERNAL_ERROR_LSB;
300 }
301 
302 /**
303  * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor
304  * @ring_desc: ring descriptor
305  *
306  * Return: wbm error source
307  */
308 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc)
309 {
310 	return hal_wbm2sw_release_source_get(ring_desc,
311 					     HAL_BE_WBM_RELEASE_DIR_RX);
312 }
313 
314 /**
315  * hal_rx_ret_buf_manager_get_be() - Get return buffer manager from ring desc
316  * @ring_desc: ring descriptor
317  *
318  * Return: rbm
319  */
320 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc)
321 {
322 	/*
323 	 * The following macro takes buf_addr_info as argument,
324 	 * but since buf_addr_info is the first field in ring_desc
325 	 * Hence the following call is OK
326 	 */
327 	return HAL_RX_BUF_RBM_GET(ring_desc);
328 }
329 
330 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
331 		(WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \
332 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \
333 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB)
334 
335 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
336 		(WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \
337 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \
338 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB)
339 
340 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc)	\
341 	(((*(((uint32_t *)wbm_desc) +			\
342 	(WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \
343 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >>	\
344 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB)
345 
346 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc)	\
347 	(((*(((uint32_t *)wbm_desc) +			\
348 	(WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \
349 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >>	\
350 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB)
351 
352 /**
353  * hal_rx_wbm_err_info_get_generic_be(): Retrieves WBM error code and reason and
354  *	save it to hal_wbm_err_desc_info structure passed by caller
355  * @wbm_desc: wbm ring descriptor
356  * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter.
357  * Return: void
358  */
359 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1)
360 {
361 	struct hal_wbm_err_desc_info *wbm_er_info =
362 		(struct hal_wbm_err_desc_info *)wbm_er_info1;
363 
364 	wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc);
365 	wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc);
366 	wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc);
367 	wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc);
368 	wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc);
369 }
370 
371 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc,
372 					struct hal_buf_info *buf_info)
373 {
374 	struct reo_destination_ring *reo_ring =
375 		 (struct reo_destination_ring *)rx_desc;
376 
377 	buf_info->paddr =
378 	 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) |
379 	  ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32));
380 	buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring);
381 }
382 
383 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl,
384 					 void *src_srng_desc,
385 					 hal_buff_addrinfo_t buf_addr_info,
386 					 uint8_t bm_action)
387 {
388 	/*
389 	 * The offsets for fields used in this function are same in
390 	 * wbm_release_ring for Lithium and wbm_release_ring_tx
391 	 * for Beryllium. hence we can use wbm_release_ring directly.
392 	 */
393 	struct wbm_release_ring *wbm_rel_srng =
394 			(struct wbm_release_ring *)src_srng_desc;
395 	uint32_t addr_31_0;
396 	uint8_t addr_39_32;
397 
398 	/* Structure copy !!! */
399 	wbm_rel_srng->released_buff_or_desc_addr_info =
400 				*((struct buffer_addr_info *)buf_addr_info);
401 
402 	addr_31_0 =
403 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0;
404 	addr_39_32 =
405 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32;
406 
407 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
408 			   RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW);
409 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION,
410 			   bm_action);
411 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
412 			   BUFFER_OR_DESC_TYPE,
413 			   HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC);
414 
415 	/* WBM error is indicated when any of the link descriptors given to
416 	 * WBM has a NULL address, and one those paths is the link descriptors
417 	 * released from host after processing RXDMA errors,
418 	 * or from Rx defrag path, and we want to add an assert here to ensure
419 	 * host is not releasing descriptors with NULL address.
420 	 */
421 
422 	if (qdf_unlikely(!addr_31_0 && !addr_39_32)) {
423 		hal_dump_wbm_rel_desc(src_srng_desc);
424 		qdf_assert_always(0);
425 	}
426 }
427 
428 /**
429  * hal_rx_reo_ent_buf_paddr_get_be: Gets the physical address and
430  * cookie from the REO entrance ring element
431  *
432  * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to
433  * the current descriptor
434  * @ buf_info: structure to return the buffer information
435  * @ msdu_cnt: pointer to msdu count in MPDU
436  * Return: void
437  */
438 static
439 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl,
440 				  hal_buf_info_t buf_info_hdl)
441 {
442 	struct hal_buf_info *buf_info =
443 		(struct hal_buf_info *)buf_info_hdl;
444 	struct buffer_addr_info *buf_addr_info =
445 		(struct buffer_addr_info *)buf_addr_info_hdl;
446 
447 	buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
448 	/*
449 	 * buffer addr info is the first member of ring desc, so the typecast
450 	 * can be done.
451 	 */
452 	buf_info->rbm = hal_rx_ret_buf_manager_get_be(
453 						(hal_ring_desc_t)buf_addr_info);
454 }
455 
456 /*
457  * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the
458  *				    rxdma ring entry.
459  * @rxdma_entry: descriptor entry
460  * @paddr: physical address of nbuf data pointer.
461  * @cookie: SW cookie used as a index to SW rx desc.
462  * @manager: who owns the nbuf (host, NSS, etc...).
463  *
464  */
465 static inline void
466 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry,
467 				qdf_dma_addr_t paddr, uint32_t cookie,
468 				uint8_t manager)
469 {
470 	uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff);
471 	uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32;
472 
473 	HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo);
474 	HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi);
475 	HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie);
476 	HAL_RXDMA_MANAGER_SET(rxdma_entry, manager);
477 }
478 
479 /**
480  * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc
481  * @rx_desc: rx descriptor
482  *
483  * Return: REO error code
484  */
485 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc)
486 {
487 	struct reo_destination_ring *reo_desc =
488 			(struct reo_destination_ring *)rx_desc;
489 
490 	return HAL_RX_REO_ERROR_GET(reo_desc);
491 }
492 
493 /**
494  * hal_gen_reo_remap_val_generic_be() - Generate the reo map value
495  * @ix0_map: mapping values for reo
496  *
497  * Return: IX0 reo remap register value to be written
498  */
499 static uint32_t
500 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg,
501 				 uint8_t *ix0_map)
502 {
503 	uint32_t ix_val = 0;
504 
505 	switch (remap_reg) {
506 	case HAL_REO_REMAP_REG_IX0:
507 		ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) |
508 			HAL_REO_REMAP_IX0(ix0_map[1], 1) |
509 			HAL_REO_REMAP_IX0(ix0_map[2], 2) |
510 			HAL_REO_REMAP_IX0(ix0_map[3], 3) |
511 			HAL_REO_REMAP_IX0(ix0_map[4], 4) |
512 			HAL_REO_REMAP_IX0(ix0_map[5], 5) |
513 			HAL_REO_REMAP_IX0(ix0_map[6], 6) |
514 			HAL_REO_REMAP_IX0(ix0_map[7], 7);
515 		break;
516 	case HAL_REO_REMAP_REG_IX2:
517 		ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) |
518 			HAL_REO_REMAP_IX2(ix0_map[1], 17) |
519 			HAL_REO_REMAP_IX2(ix0_map[2], 18) |
520 			HAL_REO_REMAP_IX2(ix0_map[3], 19) |
521 			HAL_REO_REMAP_IX2(ix0_map[4], 20) |
522 			HAL_REO_REMAP_IX2(ix0_map[5], 21) |
523 			HAL_REO_REMAP_IX2(ix0_map[6], 22) |
524 			HAL_REO_REMAP_IX2(ix0_map[7], 23);
525 		break;
526 	default:
527 		break;
528 	}
529 
530 	return ix_val;
531 }
532 
533 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc)
534 {
535 	return HAL_RX_ERROR_STATUS_GET(rx_desc);
536 }
537 
538 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl,
539 					   hal_ring_desc_t reo_desc,
540 					   void *st_handle,
541 					   uint32_t tlv, int *num_ref)
542 {
543 	union hal_reo_status *reo_status_ref;
544 
545 	reo_status_ref = (union hal_reo_status *)st_handle;
546 
547 	switch (tlv) {
548 	case HAL_REO_QUEUE_STATS_STATUS_TLV:
549 		hal_reo_queue_stats_status_be(reo_desc,
550 					      &reo_status_ref->queue_status,
551 					      hal_soc_hdl);
552 		*num_ref = reo_status_ref->queue_status.header.cmd_num;
553 		break;
554 	case HAL_REO_FLUSH_QUEUE_STATUS_TLV:
555 		hal_reo_flush_queue_status_be(reo_desc,
556 					      &reo_status_ref->fl_queue_status,
557 					      hal_soc_hdl);
558 		*num_ref = reo_status_ref->fl_queue_status.header.cmd_num;
559 		break;
560 	case HAL_REO_FLUSH_CACHE_STATUS_TLV:
561 		hal_reo_flush_cache_status_be(reo_desc,
562 					      &reo_status_ref->fl_cache_status,
563 					      hal_soc_hdl);
564 		*num_ref = reo_status_ref->fl_cache_status.header.cmd_num;
565 		break;
566 	case HAL_REO_UNBLK_CACHE_STATUS_TLV:
567 		hal_reo_unblock_cache_status_be
568 			(reo_desc, hal_soc_hdl,
569 			 &reo_status_ref->unblk_cache_status);
570 		*num_ref = reo_status_ref->unblk_cache_status.header.cmd_num;
571 		break;
572 	case HAL_REO_TIMOUT_LIST_STATUS_TLV:
573 		hal_reo_flush_timeout_list_status_be(
574 					reo_desc,
575 					&reo_status_ref->fl_timeout_status,
576 					hal_soc_hdl);
577 		*num_ref = reo_status_ref->fl_timeout_status.header.cmd_num;
578 		break;
579 	case HAL_REO_DESC_THRES_STATUS_TLV:
580 		hal_reo_desc_thres_reached_status_be(
581 						reo_desc,
582 						&reo_status_ref->thres_status,
583 						hal_soc_hdl);
584 		*num_ref = reo_status_ref->thres_status.header.cmd_num;
585 		break;
586 	case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV:
587 		hal_reo_rx_update_queue_status_be(
588 					reo_desc,
589 					&reo_status_ref->rx_queue_status,
590 					hal_soc_hdl);
591 		*num_ref = reo_status_ref->rx_queue_status.header.cmd_num;
592 		break;
593 	default:
594 		QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN,
595 			  "hal_soc %pK: no handler for TLV:%d",
596 			   hal_soc_hdl, tlv);
597 		return QDF_STATUS_E_FAILURE;
598 	} /* switch */
599 
600 	return QDF_STATUS_SUCCESS;
601 }
602 
603 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)
604 {
605 	return HAL_RX_REO_BUF_TYPE_GET(rx_desc);
606 }
607 
608 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
609 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000
610 #endif
611 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
612 				      struct hal_hw_cc_config *cc_cfg)
613 {
614 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
615 
616 	hal_soc->ops->hal_cookie_conversion_reg_cfg_be(hal_soc_hdl, cc_cfg);
617 }
618 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be);
619 
620 static inline void
621 hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
622 			  void *msdu_desc, uint32_t dst_ind,
623 			  uint32_t nbuf_len)
624 {
625 	struct rx_msdu_desc_info *msdu_desc_info =
626 		(struct rx_msdu_desc_info *)msdu_desc;
627 	struct rx_msdu_ext_desc_info *msdu_ext_desc_info =
628 		(struct rx_msdu_ext_desc_info *)(msdu_desc_info + 1);
629 
630 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
631 				  FIRST_MSDU_IN_MPDU_FLAG, 1);
632 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
633 				  LAST_MSDU_IN_MPDU_FLAG, 1);
634 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
635 				  MSDU_CONTINUATION, 0x0);
636 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
637 				  MSDU_LENGTH, nbuf_len);
638 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
639 				  SA_IS_VALID, 1);
640 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
641 				  DA_IS_VALID, 1);
642 	HAL_RX_MSDU_REO_DST_IND_SET(msdu_ext_desc_info,
643 				    REO_DESTINATION_INDICATION, dst_ind);
644 }
645 
646 static inline void
647 hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
648 			  void *ent_desc,
649 			  void *mpdu_desc,
650 			  uint32_t seq_no)
651 {
652 	struct rx_mpdu_desc_info *mpdu_desc_info =
653 			(struct rx_mpdu_desc_info *)mpdu_desc;
654 	uint8_t *desc = (uint8_t *)ent_desc;
655 
656 	HAL_RX_FLD_SET(desc, REO_ENTRANCE_RING,
657 		       MPDU_SEQUENCE_NUMBER, seq_no);
658 
659 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
660 				  MSDU_COUNT, 0x1);
661 	/* unset frag bit */
662 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
663 				  FRAGMENT_FLAG, 0x0);
664 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
665 				  RAW_MPDU, 0x0);
666 }
667 
668 /**
669  * hal_rx_msdu_reo_dst_ind_get: Gets the REO
670  * destination ring ID from the msdu desc info
671  *
672  * @msdu_link_desc : Opaque cookie pointer used by HAL to get to
673  * the current descriptor
674  *
675  * Return: dst_ind (REO destination ring ID)
676  */
677 static inline
678 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl,
679 					void *msdu_link_desc)
680 {
681 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
682 	struct rx_msdu_details *msdu_details;
683 	struct rx_msdu_desc_info *msdu_desc_info;
684 	struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
685 	uint32_t dst_ind;
686 
687 	msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc);
688 
689 	/* The first msdu in the link should exist */
690 	msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0],
691 							   hal_soc);
692 	dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info);
693 	return dst_ind;
694 }
695 
696 uint32_t
697 hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl,
698 			      uint8_t rx_ring_mask)
699 {
700 	uint32_t num_rings = 0;
701 	uint32_t i = 0;
702 	uint32_t ring_remap_arr[HAL_MAX_REO2SW_RINGS] = {0};
703 	uint32_t reo_remap_val = 0;
704 	uint32_t ring_idx = 0;
705 	uint8_t ix_map[HAL_NUM_RX_RING_PER_IX_MAP] = {0};
706 
707 	/* create reo ring remap array */
708 	while (i < HAL_MAX_REO2SW_RINGS) {
709 		if (rx_ring_mask & (1 << i)) {
710 			ring_remap_arr[num_rings] = reo_dest_ring_remap[i];
711 			num_rings++;
712 		}
713 		i++;
714 	}
715 
716 	for (i = 0; i < HAL_NUM_RX_RING_PER_IX_MAP; i++) {
717 		if (rx_ring_mask) {
718 			ix_map[i] = ring_remap_arr[ring_idx];
719 			ring_idx = ((ring_idx + 1) % num_rings);
720 		} else {
721 			/* if ring mask is zero configure to release to WBM */
722 			ix_map[i] = REO_REMAP_RELEASE;
723 		}
724 	}
725 
726 	reo_remap_val = HAL_REO_REMAP_IX0(ix_map[0], 0) |
727 					  HAL_REO_REMAP_IX0(ix_map[1], 1) |
728 					  HAL_REO_REMAP_IX0(ix_map[2], 2) |
729 					  HAL_REO_REMAP_IX0(ix_map[3], 3) |
730 					  HAL_REO_REMAP_IX0(ix_map[4], 4) |
731 					  HAL_REO_REMAP_IX0(ix_map[5], 5) |
732 					  HAL_REO_REMAP_IX0(ix_map[6], 6) |
733 					  HAL_REO_REMAP_IX0(ix_map[7], 7);
734 
735 	return reo_remap_val;
736 }
737 
738 qdf_export_symbol(hal_reo_ix_remap_value_get_be);
739 
740 uint8_t hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id)
741 {
742 	if (rx_ring_id >= HAL_MAX_REO2SW_RINGS)
743 		return REO_REMAP_RELEASE;
744 
745 	return reo_dest_ring_remap[rx_ring_id];
746 }
747 
748 qdf_export_symbol(hal_reo_ring_remap_value_get_be);
749 
750 uint8_t hal_get_idle_link_bm_id_be(uint8_t chip_id)
751 {
752 	return (WBM_IDLE_DESC_LIST + chip_id);
753 }
754 
755 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
756 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
757 static inline void
758 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
759 				struct hal_buf_info *buf_info)
760 {
761 	if (hal_rx_wbm_get_cookie_convert_done(rx_desc))
762 		buf_info->paddr =
763 			(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
764 			 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
765 	else
766 		buf_info->paddr =
767 			(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
768 			 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
769 }
770 #else
771 static inline void
772 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
773 				struct hal_buf_info *buf_info)
774 {
775 	buf_info->paddr =
776 		(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
777 		 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
778 }
779 #endif
780 #else /* !DP_FEATURE_HW_COOKIE_CONVERSION */
781 static inline void
782 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
783 				struct hal_buf_info *buf_info)
784 {
785 	buf_info->paddr =
786 		(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
787 		 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
788 }
789 #endif
790 
791 #ifdef DP_UMAC_HW_RESET_SUPPORT
792 /**
793  * hal_unregister_reo_send_cmd_be() - Unregister Reo send command callback.
794  * @hal_soc_hdl: HAL soc handle
795  *
796  * Return: None
797  */
798 static
799 void hal_unregister_reo_send_cmd_be(struct hal_soc *hal_soc)
800 {
801 	hal_soc->ops->hal_reo_send_cmd = NULL;
802 }
803 
804 /**
805  * hal_register_reo_send_cmd_be() - Register Reo send command callback.
806  * @hal_soc_hdl: HAL soc handle
807  *
808  * Return: None
809  */
810 static
811 void hal_register_reo_send_cmd_be(struct hal_soc *hal_soc)
812 {
813 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
814 }
815 
816 /**
817  * hal_reset_rx_reo_tid_q_be() - reset the reo tid queue.
818  * @hal_soc_hdl: HAL soc handle
819  * @hw_qdesc_vaddr:start address of the tid queue
820  * @size:size of address pointed by hw_qdesc_vaddr
821  *
822  * Return: None
823  */
824 static void
825 hal_reset_rx_reo_tid_q_be(struct hal_soc *hal_soc, void *hw_qdesc_vaddr,
826 			  uint32_t size)
827 {
828 	struct rx_reo_queue *hw_qdesc = (struct rx_reo_queue *)hw_qdesc_vaddr;
829 	int i;
830 
831 	if (!hw_qdesc)
832 		return;
833 
834 	hw_qdesc->svld = 0;
835 	hw_qdesc->ssn = 0;
836 	hw_qdesc->current_index = 0;
837 	hw_qdesc->pn_valid = 0;
838 	hw_qdesc->pn_31_0 = 0;
839 	hw_qdesc->pn_63_32 = 0;
840 	hw_qdesc->pn_95_64 = 0;
841 	hw_qdesc->pn_127_96 = 0;
842 	hw_qdesc->last_rx_enqueue_timestamp = 0;
843 	hw_qdesc->last_rx_dequeue_timestamp = 0;
844 	hw_qdesc->ptr_to_next_aging_queue_39_32 = 0;
845 	hw_qdesc->ptr_to_next_aging_queue_31_0 = 0;
846 	hw_qdesc->ptr_to_previous_aging_queue_31_0 = 0;
847 	hw_qdesc->ptr_to_previous_aging_queue_39_32 = 0;
848 	hw_qdesc->rx_bitmap_31_0 = 0;
849 	hw_qdesc->rx_bitmap_63_32 = 0;
850 	hw_qdesc->rx_bitmap_95_64 = 0;
851 	hw_qdesc->rx_bitmap_127_96 = 0;
852 	hw_qdesc->rx_bitmap_159_128 = 0;
853 	hw_qdesc->rx_bitmap_191_160 = 0;
854 	hw_qdesc->rx_bitmap_223_192 = 0;
855 	hw_qdesc->rx_bitmap_255_224 = 0;
856 	hw_qdesc->rx_bitmap_287_256 = 0;
857 	hw_qdesc->current_msdu_count = 0;
858 	hw_qdesc->current_mpdu_count = 0;
859 	hw_qdesc->last_sn_reg_index = 0;
860 
861 	if (size > sizeof(struct rx_reo_queue)) {
862 		struct rx_reo_queue_ext *ext_desc;
863 		struct rx_reo_queue_1k *kdesc;
864 
865 		i = ((size - sizeof(struct rx_reo_queue)) /
866 				sizeof(struct rx_reo_queue_ext));
867 
868 		if (i > 10) {
869 			i = 10;
870 			kdesc = (struct rx_reo_queue_1k *)
871 				(hw_qdesc_vaddr + sizeof(struct rx_reo_queue) +
872 				 (10 * sizeof(struct rx_reo_queue_ext)));
873 
874 			kdesc->rx_bitmap_319_288 = 0;
875 			kdesc->rx_bitmap_351_320 = 0;
876 			kdesc->rx_bitmap_383_352 = 0;
877 			kdesc->rx_bitmap_415_384 = 0;
878 			kdesc->rx_bitmap_447_416 = 0;
879 			kdesc->rx_bitmap_479_448 = 0;
880 			kdesc->rx_bitmap_511_480 = 0;
881 			kdesc->rx_bitmap_543_512 = 0;
882 			kdesc->rx_bitmap_575_544 = 0;
883 			kdesc->rx_bitmap_607_576 = 0;
884 			kdesc->rx_bitmap_639_608 = 0;
885 			kdesc->rx_bitmap_671_640 = 0;
886 			kdesc->rx_bitmap_703_672 = 0;
887 			kdesc->rx_bitmap_735_704 = 0;
888 			kdesc->rx_bitmap_767_736 = 0;
889 			kdesc->rx_bitmap_799_768 = 0;
890 			kdesc->rx_bitmap_831_800 = 0;
891 			kdesc->rx_bitmap_863_832 = 0;
892 			kdesc->rx_bitmap_895_864 = 0;
893 			kdesc->rx_bitmap_927_896 = 0;
894 			kdesc->rx_bitmap_959_928 = 0;
895 			kdesc->rx_bitmap_991_960 = 0;
896 			kdesc->rx_bitmap_1023_992 = 0;
897 		}
898 
899 		ext_desc = (struct rx_reo_queue_ext *)
900 			(hw_qdesc_vaddr + (sizeof(struct rx_reo_queue)));
901 
902 		while (i > 0) {
903 			qdf_mem_zero(&ext_desc->mpdu_link_pointer_0,
904 				     (15 * sizeof(struct rx_mpdu_link_ptr)));
905 
906 			ext_desc++;
907 			i--;
908 		}
909 	}
910 }
911 #endif
912 
913 /**
914  * hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for
915  *		beryllium chipsets.
916  * @hal_soc_hdl: HAL soc handle
917  *
918  * Return: None
919  */
920 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc)
921 {
922 	hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be;
923 	hal_soc->ops->hal_get_rx_max_ba_window = hal_get_rx_max_ba_window_be;
924 	hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be;
925 	hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be;
926 	hal_soc->ops->hal_get_reo_reg_base_offset =
927 					hal_get_reo_reg_base_offset_be;
928 	hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be;
929 	hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be;
930 	hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be;
931 	hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be;
932 
933 	hal_soc->ops->hal_rx_ret_buf_manager_get =
934 						hal_rx_ret_buf_manager_get_be;
935 	hal_soc->ops->hal_rxdma_buff_addr_info_set =
936 					hal_rxdma_buff_addr_info_set_be;
937 	hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be;
938 	hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be;
939 	hal_soc->ops->hal_gen_reo_remap_val =
940 				hal_gen_reo_remap_val_generic_be;
941 	hal_soc->ops->hal_tx_comp_get_buffer_source =
942 				hal_tx_comp_get_buffer_source_generic_be;
943 	hal_soc->ops->hal_tx_comp_get_release_reason =
944 				hal_tx_comp_get_release_reason_generic_be;
945 	hal_soc->ops->hal_get_wbm_internal_error =
946 					hal_get_wbm_internal_error_generic_be;
947 	hal_soc->ops->hal_rx_mpdu_desc_info_get =
948 				hal_rx_mpdu_desc_info_get_be;
949 	hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be;
950 	hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be;
951 	hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be;
952 	hal_soc->ops->hal_rx_wbm_rel_buf_paddr_get =
953 					hal_rx_wbm_rel_buf_paddr_get_be;
954 
955 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
956 	hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be;
957 	hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be;
958 	hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be;
959 	hal_soc->ops->hal_rx_msdu_reo_dst_ind_get =
960 						hal_rx_msdu_reo_dst_ind_get_be;
961 	hal_soc->ops->hal_get_idle_link_bm_id = hal_get_idle_link_bm_id_be;
962 	hal_soc->ops->hal_rx_msdu_ext_desc_info_get_ptr =
963 					hal_rx_msdu_ext_desc_info_get_ptr_be;
964 	hal_soc->ops->hal_msdu_desc_info_set = hal_msdu_desc_info_set_be;
965 	hal_soc->ops->hal_mpdu_desc_info_set = hal_mpdu_desc_info_set_be;
966 #ifdef DP_UMAC_HW_RESET_SUPPORT
967 	hal_soc->ops->hal_unregister_reo_send_cmd =
968 					hal_unregister_reo_send_cmd_be;
969 	hal_soc->ops->hal_register_reo_send_cmd = hal_register_reo_send_cmd_be;
970 	hal_soc->ops->hal_reset_rx_reo_tid_q = hal_reset_rx_reo_tid_q_be;
971 #endif
972 	hal_soc->ops->hal_rx_tlv_get_pn_num = hal_rx_tlv_get_pn_num_be;
973 #ifndef CONFIG_WORD_BASED_TLV
974 	hal_soc->ops->hal_rx_get_qdesc_addr = hal_rx_get_qdesc_addr_be;
975 #endif
976 	hal_soc->ops->hal_set_reo_ent_desc_reo_dest_ind =
977 					hal_set_reo_ent_desc_reo_dest_ind_be;
978 	hal_soc->ops->hal_get_reo_ent_desc_qdesc_addr =
979 					hal_get_reo_ent_desc_qdesc_addr_be;
980 }
981