xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/be/hal_be_generic_api.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_module.h>
21 #include "hal_be_api.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_be_reo.h"
24 #include "hal_tx.h"	//HAL_SET_FLD
25 #include "hal_be_rx.h"	//HAL_RX_BUF_RBM_GET
26 #include "rx_reo_queue_1k.h"
27 #include "hal_be_rx_tlv.h"
28 
29 /*
30  * The 4 bits REO destination ring value is defined as: 0: TCL
31  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
32  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
33  *
34  */
35 uint32_t reo_dest_ring_remap[] = {REO_REMAP_SW1, REO_REMAP_SW2,
36 				  REO_REMAP_SW3, REO_REMAP_SW4,
37 				  REO_REMAP_SW5, REO_REMAP_SW6,
38 				  REO_REMAP_SW7, REO_REMAP_SW8};
39 
40 #if defined(QDF_BIG_ENDIAN_MACHINE)
41 void hal_setup_reo_swap(struct hal_soc *soc)
42 {
43 	uint32_t reg_val;
44 
45 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
46 		REO_REG_REG_BASE));
47 
48 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1);
49 	reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1);
50 
51 	HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR(
52 		REO_REG_REG_BASE), reg_val);
53 }
54 #else
55 void hal_setup_reo_swap(struct hal_soc *soc)
56 {
57 }
58 #endif
59 
60 /**
61  * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG
62  * @hal_soc_hdl: Handle to HAL SoC structure
63  * @hal_srng: Handle to HAL SRNG structure
64  *
65  * Return: none
66  */
67 static void
68 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl,
69 			 hal_ring_handle_t hal_ring_hdl)
70 {
71 }
72 
73 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams,
74 			      int qref_reset)
75 {
76 	uint32_t reg_val;
77 	struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams;
78 
79 	reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR(
80 		REO_REG_REG_BASE));
81 
82 	hal_reo_config(soc, reg_val, reo_params);
83 	/* Other ring enable bits and REO_ENABLE will be set by FW */
84 
85 	/* TODO: Setup destination ring mapping if enabled */
86 
87 	/* TODO: Error destination ring setting is left to default.
88 	 * Default setting is to send all errors to release ring.
89 	 */
90 
91 	/* Set the reo descriptor swap bits in case of BIG endian platform */
92 	hal_setup_reo_swap(soc);
93 
94 	HAL_REG_WRITE(soc,
95 		      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE),
96 		      HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000);
97 
98 	HAL_REG_WRITE(soc,
99 		      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE),
100 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
101 
102 	HAL_REG_WRITE(soc,
103 		      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE),
104 		      (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000));
105 
106 	HAL_REG_WRITE(soc,
107 		      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE),
108 		      (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000));
109 
110 	/*
111 	 * When hash based routing is enabled, routing of the rx packet
112 	 * is done based on the following value: 1 _ _ _ _ The last 4
113 	 * bits are based on hash[3:0]. This means the possible values
114 	 * are 0x10 to 0x1f. This value is used to look-up the
115 	 * ring ID configured in Destination_Ring_Ctrl_IX_* register.
116 	 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3
117 	 * registers need to be configured to set-up the 16 entries to
118 	 * map the hash values to a ring number. There are 3 bits per
119 	 * hash entry – which are mapped as follows:
120 	 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI),
121 	 * 7: NOT_USED.
122 	 */
123 	if (reo_params->rx_hash_enabled) {
124 		HAL_REG_WRITE(soc,
125 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
126 			REO_REG_REG_BASE),
127 			reo_params->remap1);
128 
129 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x",
130 			  HAL_REG_READ(soc,
131 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
132 				       REO_REG_REG_BASE)));
133 
134 		HAL_REG_WRITE(soc,
135 			HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
136 			REO_REG_REG_BASE),
137 			reo_params->remap2);
138 
139 		hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x",
140 			  HAL_REG_READ(soc,
141 				       HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
142 				       REO_REG_REG_BASE)));
143 	}
144 
145 	/* TODO: Check if the following registers shoould be setup by host:
146 	 * AGING_CONTROL
147 	 * HIGH_MEMORY_THRESHOLD
148 	 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
149 	 * GLOBAL_LINK_DESC_COUNT_CTRL
150 	 */
151 }
152 
153 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie,
154 			       qdf_dma_addr_t link_desc_paddr,
155 			       uint8_t bm_id)
156 {
157 	uint32_t *buf_addr = (uint32_t *)desc;
158 
159 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0,
160 			   link_desc_paddr & 0xffffffff);
161 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32,
162 			   (uint64_t)link_desc_paddr >> 32);
163 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER,
164 			   bm_id);
165 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE,
166 			   cookie);
167 }
168 
169 static uint16_t hal_get_rx_max_ba_window_be(int tid)
170 {
171 	return  HAL_RX_BA_WINDOW_256;
172 }
173 
174 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid)
175 {
176 	/* Hardcode the ba_window_size to HAL_RX_MAX_BA_WINDOW for
177 	 * NON_QOS_TID until HW issues are resolved.
178 	 */
179 	if (tid != HAL_NON_QOS_TID)
180 		ba_window_size = hal_get_rx_max_ba_window_be(tid);
181 
182 	/* Return descriptor size corresponding to window size of 2 since
183 	 * we set ba_window_size to 2 while setting up REO descriptors as
184 	 * a WAR to get 2k jump exception aggregates are received without
185 	 * a BA session.
186 	 */
187 	if (ba_window_size <= 1) {
188 		if (tid != HAL_NON_QOS_TID)
189 			return sizeof(struct rx_reo_queue) +
190 				sizeof(struct rx_reo_queue_ext);
191 		else
192 			return sizeof(struct rx_reo_queue);
193 	}
194 
195 	if (ba_window_size <= 105)
196 		return sizeof(struct rx_reo_queue) +
197 			sizeof(struct rx_reo_queue_ext);
198 
199 	if (ba_window_size <= 210)
200 		return sizeof(struct rx_reo_queue) +
201 			(2 * sizeof(struct rx_reo_queue_ext));
202 
203 	return sizeof(struct rx_reo_queue) +
204 		(3 * sizeof(struct rx_reo_queue_ext));
205 }
206 
207 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr)
208 {
209 	return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr);
210 }
211 
212 #if defined(QCA_WIFI_KIWI) && !defined(QCA_WIFI_KIWI_V2)
213 static inline uint32_t
214 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
215 {
216 	uint32_t buf_src;
217 
218 	buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
219 	switch (buf_src) {
220 	case HAL_BE_RX_WBM_ERR_SRC_RXDMA:
221 		return HAL_RX_WBM_ERR_SRC_RXDMA;
222 	case HAL_BE_RX_WBM_ERR_SRC_REO:
223 		return HAL_RX_WBM_ERR_SRC_REO;
224 	case HAL_BE_RX_WBM_ERR_SRC_FW_RX:
225 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
226 			qdf_assert_always(0);
227 		return HAL_RX_WBM_ERR_SRC_FW;
228 	case HAL_BE_RX_WBM_ERR_SRC_SW_RX:
229 		if (dir != HAL_BE_WBM_RELEASE_DIR_RX)
230 			qdf_assert_always(0);
231 		return HAL_RX_WBM_ERR_SRC_SW;
232 	case HAL_BE_RX_WBM_ERR_SRC_TQM:
233 		return HAL_RX_WBM_ERR_SRC_TQM;
234 	case HAL_BE_RX_WBM_ERR_SRC_FW_TX:
235 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
236 			qdf_assert_always(0);
237 		return HAL_RX_WBM_ERR_SRC_FW;
238 	case HAL_BE_RX_WBM_ERR_SRC_SW_TX:
239 		if (dir != HAL_BE_WBM_RELEASE_DIR_TX)
240 			qdf_assert_always(0);
241 		return HAL_RX_WBM_ERR_SRC_SW;
242 	default:
243 		qdf_assert_always(0);
244 	}
245 
246 	return buf_src;
247 }
248 #else
249 static inline uint32_t
250 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir)
251 {
252 	return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc);
253 }
254 #endif
255 
256 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc)
257 {
258 	return hal_wbm2sw_release_source_get(hal_desc,
259 					     HAL_BE_WBM_RELEASE_DIR_TX);
260 }
261 
262 /**
263  * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason
264  * @hal_desc: completion ring descriptor pointer
265  *
266  * This function will return the type of pointer - buffer or descriptor
267  *
268  * Return: buffer type
269  */
270 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc)
271 {
272 	uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) +
273 			WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET);
274 
275 	return (comp_desc &
276 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >>
277 		WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB;
278 }
279 
280 /**
281  * hal_get_wbm_internal_error_generic_be() - is WBM internal error
282  * @hal_desc: completion ring descriptor pointer
283  *
284  * This function will return 0 or 1  - is it WBM internal error or not
285  *
286  * Return: uint8_t
287  */
288 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc)
289 {
290 	/*
291 	 * TODO -  This func is called by tx comp and wbm error handler
292 	 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX
293 	 */
294 	uint32_t comp_desc =
295 		*(uint32_t *)(((uint8_t *)hal_desc) +
296 			      HAL_WBM_INTERNAL_ERROR_OFFSET);
297 
298 	return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >>
299 		HAL_WBM_INTERNAL_ERROR_LSB;
300 }
301 
302 /**
303  * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor
304  * @ring_desc: ring descriptor
305  *
306  * Return: wbm error source
307  */
308 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc)
309 {
310 	return hal_wbm2sw_release_source_get(ring_desc,
311 					     HAL_BE_WBM_RELEASE_DIR_RX);
312 }
313 
314 /**
315  * hal_rx_ret_buf_manager_get_be() - Get return buffer manager from ring desc
316  * @ring_desc: ring descriptor
317  *
318  * Return: rbm
319  */
320 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc)
321 {
322 	/*
323 	 * The following macro takes buf_addr_info as argument,
324 	 * but since buf_addr_info is the first field in ring_desc
325 	 * Hence the following call is OK
326 	 */
327 	return HAL_RX_BUF_RBM_GET(ring_desc);
328 }
329 
330 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
331 		(WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \
332 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \
333 		WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB)
334 
335 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \
336 		(WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \
337 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \
338 		WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB)
339 
340 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc)	\
341 	(((*(((uint32_t *)wbm_desc) +			\
342 	(WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \
343 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >>	\
344 	WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB)
345 
346 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc)	\
347 	(((*(((uint32_t *)wbm_desc) +			\
348 	(WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \
349 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >>	\
350 	WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB)
351 
352 /**
353  * hal_rx_wbm_err_info_get_generic_be(): Retrieves WBM error code and reason and
354  *	save it to hal_wbm_err_desc_info structure passed by caller
355  * @wbm_desc: wbm ring descriptor
356  * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter.
357  * Return: void
358  */
359 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1)
360 {
361 	struct hal_wbm_err_desc_info *wbm_er_info =
362 		(struct hal_wbm_err_desc_info *)wbm_er_info1;
363 
364 	wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc);
365 	wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc);
366 	wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc);
367 	wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc);
368 	wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc);
369 }
370 
371 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc,
372 					struct hal_buf_info *buf_info)
373 {
374 	struct reo_destination_ring *reo_ring =
375 		 (struct reo_destination_ring *)rx_desc;
376 
377 	buf_info->paddr =
378 	 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) |
379 	  ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32));
380 	buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring);
381 }
382 
383 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl,
384 					 void *src_srng_desc,
385 					 hal_buff_addrinfo_t buf_addr_info,
386 					 uint8_t bm_action)
387 {
388 	/*
389 	 * The offsets for fields used in this function are same in
390 	 * wbm_release_ring for Lithium and wbm_release_ring_tx
391 	 * for Beryllium. hence we can use wbm_release_ring directly.
392 	 */
393 	struct wbm_release_ring *wbm_rel_srng =
394 			(struct wbm_release_ring *)src_srng_desc;
395 	uint32_t addr_31_0;
396 	uint8_t addr_39_32;
397 
398 	/* Structure copy !!! */
399 	wbm_rel_srng->released_buff_or_desc_addr_info =
400 				*((struct buffer_addr_info *)buf_addr_info);
401 
402 	addr_31_0 =
403 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0;
404 	addr_39_32 =
405 	wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32;
406 
407 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
408 			   RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW);
409 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION,
410 			   bm_action);
411 	HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING,
412 			   BUFFER_OR_DESC_TYPE,
413 			   HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC);
414 
415 	/* WBM error is indicated when any of the link descriptors given to
416 	 * WBM has a NULL address, and one those paths is the link descriptors
417 	 * released from host after processing RXDMA errors,
418 	 * or from Rx defrag path, and we want to add an assert here to ensure
419 	 * host is not releasing descriptors with NULL address.
420 	 */
421 
422 	if (qdf_unlikely(!addr_31_0 && !addr_39_32)) {
423 		hal_dump_wbm_rel_desc(src_srng_desc);
424 		qdf_assert_always(0);
425 	}
426 }
427 
428 /**
429  * hal_rx_reo_ent_buf_paddr_get_be: Gets the physical address and
430  * cookie from the REO entrance ring element
431  *
432  * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to
433  * the current descriptor
434  * @ buf_info: structure to return the buffer information
435  * @ msdu_cnt: pointer to msdu count in MPDU
436  * Return: void
437  */
438 static
439 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl,
440 				  hal_buf_info_t buf_info_hdl)
441 {
442 	struct hal_buf_info *buf_info =
443 		(struct hal_buf_info *)buf_info_hdl;
444 	struct buffer_addr_info *buf_addr_info =
445 		(struct buffer_addr_info *)buf_addr_info_hdl;
446 
447 	buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
448 	/*
449 	 * buffer addr info is the first member of ring desc, so the typecast
450 	 * can be done.
451 	 */
452 	buf_info->rbm = hal_rx_ret_buf_manager_get_be(
453 						(hal_ring_desc_t)buf_addr_info);
454 }
455 
456 /*
457  * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the
458  *				    rxdma ring entry.
459  * @rxdma_entry: descriptor entry
460  * @paddr: physical address of nbuf data pointer.
461  * @cookie: SW cookie used as a index to SW rx desc.
462  * @manager: who owns the nbuf (host, NSS, etc...).
463  *
464  */
465 static inline void
466 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry,
467 				qdf_dma_addr_t paddr, uint32_t cookie,
468 				uint8_t manager)
469 {
470 	uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff);
471 	uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32;
472 
473 	HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo);
474 	HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi);
475 	HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie);
476 	HAL_RXDMA_MANAGER_SET(rxdma_entry, manager);
477 }
478 
479 /**
480  * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc
481  * @rx_desc: rx descriptor
482  *
483  * Return: REO error code
484  */
485 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc)
486 {
487 	struct reo_destination_ring *reo_desc =
488 			(struct reo_destination_ring *)rx_desc;
489 
490 	return HAL_RX_REO_ERROR_GET(reo_desc);
491 }
492 
493 /**
494  * hal_gen_reo_remap_val_generic_be() - Generate the reo map value
495  * @ix0_map: mapping values for reo
496  *
497  * Return: IX0 reo remap register value to be written
498  */
499 static uint32_t
500 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg,
501 				 uint8_t *ix0_map)
502 {
503 	uint32_t ix_val = 0;
504 
505 	switch (remap_reg) {
506 	case HAL_REO_REMAP_REG_IX0:
507 		ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) |
508 			HAL_REO_REMAP_IX0(ix0_map[1], 1) |
509 			HAL_REO_REMAP_IX0(ix0_map[2], 2) |
510 			HAL_REO_REMAP_IX0(ix0_map[3], 3) |
511 			HAL_REO_REMAP_IX0(ix0_map[4], 4) |
512 			HAL_REO_REMAP_IX0(ix0_map[5], 5) |
513 			HAL_REO_REMAP_IX0(ix0_map[6], 6) |
514 			HAL_REO_REMAP_IX0(ix0_map[7], 7);
515 		break;
516 	case HAL_REO_REMAP_REG_IX2:
517 		ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) |
518 			HAL_REO_REMAP_IX2(ix0_map[1], 17) |
519 			HAL_REO_REMAP_IX2(ix0_map[2], 18) |
520 			HAL_REO_REMAP_IX2(ix0_map[3], 19) |
521 			HAL_REO_REMAP_IX2(ix0_map[4], 20) |
522 			HAL_REO_REMAP_IX2(ix0_map[5], 21) |
523 			HAL_REO_REMAP_IX2(ix0_map[6], 22) |
524 			HAL_REO_REMAP_IX2(ix0_map[7], 23);
525 		break;
526 	default:
527 		break;
528 	}
529 
530 	return ix_val;
531 }
532 
533 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc)
534 {
535 	return HAL_RX_ERROR_STATUS_GET(rx_desc);
536 }
537 
538 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl,
539 					   hal_ring_desc_t reo_desc,
540 					   void *st_handle,
541 					   uint32_t tlv, int *num_ref)
542 {
543 	union hal_reo_status *reo_status_ref;
544 
545 	reo_status_ref = (union hal_reo_status *)st_handle;
546 
547 	switch (tlv) {
548 	case HAL_REO_QUEUE_STATS_STATUS_TLV:
549 		hal_reo_queue_stats_status_be(reo_desc,
550 					      &reo_status_ref->queue_status,
551 					      hal_soc_hdl);
552 		*num_ref = reo_status_ref->queue_status.header.cmd_num;
553 		break;
554 	case HAL_REO_FLUSH_QUEUE_STATUS_TLV:
555 		hal_reo_flush_queue_status_be(reo_desc,
556 					      &reo_status_ref->fl_queue_status,
557 					      hal_soc_hdl);
558 		*num_ref = reo_status_ref->fl_queue_status.header.cmd_num;
559 		break;
560 	case HAL_REO_FLUSH_CACHE_STATUS_TLV:
561 		hal_reo_flush_cache_status_be(reo_desc,
562 					      &reo_status_ref->fl_cache_status,
563 					      hal_soc_hdl);
564 		*num_ref = reo_status_ref->fl_cache_status.header.cmd_num;
565 		break;
566 	case HAL_REO_UNBLK_CACHE_STATUS_TLV:
567 		hal_reo_unblock_cache_status_be
568 			(reo_desc, hal_soc_hdl,
569 			 &reo_status_ref->unblk_cache_status);
570 		*num_ref = reo_status_ref->unblk_cache_status.header.cmd_num;
571 		break;
572 	case HAL_REO_TIMOUT_LIST_STATUS_TLV:
573 		hal_reo_flush_timeout_list_status_be(
574 					reo_desc,
575 					&reo_status_ref->fl_timeout_status,
576 					hal_soc_hdl);
577 		*num_ref = reo_status_ref->fl_timeout_status.header.cmd_num;
578 		break;
579 	case HAL_REO_DESC_THRES_STATUS_TLV:
580 		hal_reo_desc_thres_reached_status_be(
581 						reo_desc,
582 						&reo_status_ref->thres_status,
583 						hal_soc_hdl);
584 		*num_ref = reo_status_ref->thres_status.header.cmd_num;
585 		break;
586 	case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV:
587 		hal_reo_rx_update_queue_status_be(
588 					reo_desc,
589 					&reo_status_ref->rx_queue_status,
590 					hal_soc_hdl);
591 		*num_ref = reo_status_ref->rx_queue_status.header.cmd_num;
592 		break;
593 	default:
594 		QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN,
595 			  "hal_soc %pK: no handler for TLV:%d",
596 			   hal_soc_hdl, tlv);
597 		return QDF_STATUS_E_FAILURE;
598 	} /* switch */
599 
600 	return QDF_STATUS_SUCCESS;
601 }
602 
603 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)
604 {
605 	return HAL_RX_REO_BUF_TYPE_GET(rx_desc);
606 }
607 
608 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
609 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000
610 #endif
611 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
612 				      struct hal_hw_cc_config *cc_cfg)
613 {
614 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
615 
616 	hal_soc->ops->hal_cookie_conversion_reg_cfg_be(hal_soc_hdl, cc_cfg);
617 }
618 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be);
619 
620 static inline void
621 hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
622 			  void *msdu_desc, uint32_t dst_ind,
623 			  uint32_t nbuf_len)
624 {
625 	struct rx_msdu_desc_info *msdu_desc_info =
626 		(struct rx_msdu_desc_info *)msdu_desc;
627 
628 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
629 				  FIRST_MSDU_IN_MPDU_FLAG, 1);
630 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
631 				  LAST_MSDU_IN_MPDU_FLAG, 1);
632 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
633 				  MSDU_CONTINUATION, 0x0);
634 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
635 				  MSDU_LENGTH, nbuf_len);
636 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
637 				  SA_IS_VALID, 1);
638 	HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info,
639 				  DA_IS_VALID, 1);
640 }
641 
642 static inline void
643 hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl,
644 			  void *ent_desc,
645 			  void *mpdu_desc,
646 			  uint32_t seq_no)
647 {
648 	struct rx_mpdu_desc_info *mpdu_desc_info =
649 			(struct rx_mpdu_desc_info *)mpdu_desc;
650 	uint8_t *desc = (uint8_t *)ent_desc;
651 
652 	HAL_RX_FLD_SET(desc, REO_ENTRANCE_RING,
653 		       MPDU_SEQUENCE_NUMBER, seq_no);
654 
655 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
656 				  MSDU_COUNT, 0x1);
657 	/* unset frag bit */
658 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
659 				  FRAGMENT_FLAG, 0x0);
660 	HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info,
661 				  RAW_MPDU, 0x0);
662 }
663 
664 /**
665  * hal_rx_msdu_reo_dst_ind_get: Gets the REO
666  * destination ring ID from the msdu desc info
667  *
668  * @msdu_link_desc : Opaque cookie pointer used by HAL to get to
669  * the current descriptor
670  *
671  * Return: dst_ind (REO destination ring ID)
672  */
673 static inline
674 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl,
675 					void *msdu_link_desc)
676 {
677 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
678 	struct rx_msdu_details *msdu_details;
679 	struct rx_msdu_desc_info *msdu_desc_info;
680 	struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
681 	uint32_t dst_ind;
682 
683 	msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc);
684 
685 	/* The first msdu in the link should exsist */
686 	msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0],
687 							   hal_soc);
688 	dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info);
689 	return dst_ind;
690 }
691 
692 uint32_t
693 hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl,
694 			      uint8_t rx_ring_mask)
695 {
696 	uint32_t num_rings = 0;
697 	uint32_t i = 0;
698 	uint32_t ring_remap_arr[HAL_MAX_REO2SW_RINGS] = {0};
699 	uint32_t reo_remap_val = 0;
700 	uint32_t ring_idx = 0;
701 	uint8_t ix_map[HAL_NUM_RX_RING_PER_IX_MAP] = {0};
702 
703 	/* create reo ring remap array */
704 	while (i < HAL_MAX_REO2SW_RINGS) {
705 		if (rx_ring_mask & (1 << i)) {
706 			ring_remap_arr[num_rings] = reo_dest_ring_remap[i];
707 			num_rings++;
708 		}
709 		i++;
710 	}
711 
712 	for (i = 0; i < HAL_NUM_RX_RING_PER_IX_MAP; i++) {
713 		if (rx_ring_mask) {
714 			ix_map[i] = ring_remap_arr[ring_idx];
715 			ring_idx = ((ring_idx + 1) % num_rings);
716 		} else {
717 			/* if ring mask is zero configure to release to WBM */
718 			ix_map[i] = REO_REMAP_RELEASE;
719 		}
720 	}
721 
722 	reo_remap_val = HAL_REO_REMAP_IX0(ix_map[0], 0) |
723 					  HAL_REO_REMAP_IX0(ix_map[1], 1) |
724 					  HAL_REO_REMAP_IX0(ix_map[2], 2) |
725 					  HAL_REO_REMAP_IX0(ix_map[3], 3) |
726 					  HAL_REO_REMAP_IX0(ix_map[4], 4) |
727 					  HAL_REO_REMAP_IX0(ix_map[5], 5) |
728 					  HAL_REO_REMAP_IX0(ix_map[6], 6) |
729 					  HAL_REO_REMAP_IX0(ix_map[7], 7);
730 
731 	return reo_remap_val;
732 }
733 
734 qdf_export_symbol(hal_reo_ix_remap_value_get_be);
735 
736 uint8_t hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id)
737 {
738 	if (rx_ring_id >= HAL_MAX_REO2SW_RINGS)
739 		return REO_REMAP_RELEASE;
740 
741 	return reo_dest_ring_remap[rx_ring_id];
742 }
743 
744 qdf_export_symbol(hal_reo_ring_remap_value_get_be);
745 
746 uint8_t hal_get_idle_link_bm_id_be(uint8_t chip_id)
747 {
748 	return (WBM_IDLE_DESC_LIST + chip_id);
749 }
750 
751 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
752 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
753 static inline void
754 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
755 				struct hal_buf_info *buf_info)
756 {
757 	if (hal_rx_wbm_get_cookie_convert_done(rx_desc))
758 		buf_info->paddr =
759 			(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
760 			 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
761 	else
762 		buf_info->paddr =
763 			(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
764 			 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
765 }
766 #else
767 static inline void
768 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
769 				struct hal_buf_info *buf_info)
770 {
771 	buf_info->paddr =
772 		(HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) |
773 		 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32));
774 }
775 #endif
776 #else /* !DP_FEATURE_HW_COOKIE_CONVERSION */
777 static inline void
778 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc,
779 				struct hal_buf_info *buf_info)
780 {
781 	buf_info->paddr =
782 		(HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) |
783 		 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32));
784 }
785 #endif
786 
787 #ifdef DP_UMAC_HW_RESET_SUPPORT
788 /**
789  * hal_unregister_reo_send_cmd_be() - Unregister Reo send command callback.
790  * @hal_soc_hdl: HAL soc handle
791  *
792  * Return: None
793  */
794 static
795 void hal_unregister_reo_send_cmd_be(struct hal_soc *hal_soc)
796 {
797 	hal_soc->ops->hal_reo_send_cmd = NULL;
798 }
799 
800 /**
801  * hal_register_reo_send_cmd_be() - Register Reo send command callback.
802  * @hal_soc_hdl: HAL soc handle
803  *
804  * Return: None
805  */
806 static
807 void hal_register_reo_send_cmd_be(struct hal_soc *hal_soc)
808 {
809 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
810 }
811 
812 /**
813  * hal_reset_rx_reo_tid_q_be() - reset the reo tid queue.
814  * @hal_soc_hdl: HAL soc handle
815  * @hw_qdesc_vaddr:start address of the tid queue
816  * @size:size of address pointed by hw_qdesc_vaddr
817  *
818  * Return: None
819  */
820 static void
821 hal_reset_rx_reo_tid_q_be(struct hal_soc *hal_soc, void *hw_qdesc_vaddr,
822 			  uint32_t size)
823 {
824 	struct rx_reo_queue *hw_qdesc = (struct rx_reo_queue *)hw_qdesc_vaddr;
825 	int i;
826 
827 	if (!hw_qdesc)
828 		return;
829 
830 	hw_qdesc->svld = 0;
831 	hw_qdesc->ssn = 0;
832 	hw_qdesc->current_index = 0;
833 	hw_qdesc->pn_valid = 0;
834 	hw_qdesc->pn_31_0 = 0;
835 	hw_qdesc->pn_63_32 = 0;
836 	hw_qdesc->pn_95_64 = 0;
837 	hw_qdesc->pn_127_96 = 0;
838 	hw_qdesc->last_rx_enqueue_timestamp = 0;
839 	hw_qdesc->last_rx_dequeue_timestamp = 0;
840 	hw_qdesc->ptr_to_next_aging_queue_39_32 = 0;
841 	hw_qdesc->ptr_to_next_aging_queue_31_0 = 0;
842 	hw_qdesc->ptr_to_previous_aging_queue_31_0 = 0;
843 	hw_qdesc->ptr_to_previous_aging_queue_39_32 = 0;
844 	hw_qdesc->rx_bitmap_31_0 = 0;
845 	hw_qdesc->rx_bitmap_63_32 = 0;
846 	hw_qdesc->rx_bitmap_95_64 = 0;
847 	hw_qdesc->rx_bitmap_127_96 = 0;
848 	hw_qdesc->rx_bitmap_159_128 = 0;
849 	hw_qdesc->rx_bitmap_191_160 = 0;
850 	hw_qdesc->rx_bitmap_223_192 = 0;
851 	hw_qdesc->rx_bitmap_255_224 = 0;
852 	hw_qdesc->rx_bitmap_287_256 = 0;
853 	hw_qdesc->current_msdu_count = 0;
854 	hw_qdesc->current_mpdu_count = 0;
855 	hw_qdesc->last_sn_reg_index = 0;
856 
857 	if (size > sizeof(struct rx_reo_queue)) {
858 		struct rx_reo_queue_ext *ext_desc;
859 		struct rx_reo_queue_1k *kdesc;
860 
861 		i = ((size - sizeof(struct rx_reo_queue)) /
862 				sizeof(struct rx_reo_queue_ext));
863 
864 		if (i > 10) {
865 			i = 10;
866 			kdesc = (struct rx_reo_queue_1k *)
867 				(hw_qdesc_vaddr + sizeof(struct rx_reo_queue) +
868 				 (10 * sizeof(struct rx_reo_queue_ext)));
869 
870 			kdesc->rx_bitmap_319_288 = 0;
871 			kdesc->rx_bitmap_351_320 = 0;
872 			kdesc->rx_bitmap_383_352 = 0;
873 			kdesc->rx_bitmap_415_384 = 0;
874 			kdesc->rx_bitmap_447_416 = 0;
875 			kdesc->rx_bitmap_479_448 = 0;
876 			kdesc->rx_bitmap_511_480 = 0;
877 			kdesc->rx_bitmap_543_512 = 0;
878 			kdesc->rx_bitmap_575_544 = 0;
879 			kdesc->rx_bitmap_607_576 = 0;
880 			kdesc->rx_bitmap_639_608 = 0;
881 			kdesc->rx_bitmap_671_640 = 0;
882 			kdesc->rx_bitmap_703_672 = 0;
883 			kdesc->rx_bitmap_735_704 = 0;
884 			kdesc->rx_bitmap_767_736 = 0;
885 			kdesc->rx_bitmap_799_768 = 0;
886 			kdesc->rx_bitmap_831_800 = 0;
887 			kdesc->rx_bitmap_863_832 = 0;
888 			kdesc->rx_bitmap_895_864 = 0;
889 			kdesc->rx_bitmap_927_896 = 0;
890 			kdesc->rx_bitmap_959_928 = 0;
891 			kdesc->rx_bitmap_991_960 = 0;
892 			kdesc->rx_bitmap_1023_992 = 0;
893 		}
894 
895 		ext_desc = (struct rx_reo_queue_ext *)
896 			(hw_qdesc_vaddr + (sizeof(struct rx_reo_queue)));
897 
898 		while (i > 0) {
899 			qdf_mem_zero(&ext_desc->mpdu_link_pointer_0,
900 				     (15 * sizeof(struct rx_mpdu_link_ptr)));
901 
902 			ext_desc++;
903 			i--;
904 		}
905 	}
906 }
907 #endif
908 
909 /**
910  * hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for
911  *		beryllium chipsets.
912  * @hal_soc_hdl: HAL soc handle
913  *
914  * Return: None
915  */
916 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc)
917 {
918 	hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be;
919 	hal_soc->ops->hal_get_rx_max_ba_window = hal_get_rx_max_ba_window_be;
920 	hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be;
921 	hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be;
922 	hal_soc->ops->hal_get_reo_reg_base_offset =
923 					hal_get_reo_reg_base_offset_be;
924 	hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be;
925 	hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be;
926 	hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be;
927 	hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be;
928 
929 	hal_soc->ops->hal_rx_ret_buf_manager_get =
930 						hal_rx_ret_buf_manager_get_be;
931 	hal_soc->ops->hal_rxdma_buff_addr_info_set =
932 					hal_rxdma_buff_addr_info_set_be;
933 	hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be;
934 	hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be;
935 	hal_soc->ops->hal_gen_reo_remap_val =
936 				hal_gen_reo_remap_val_generic_be;
937 	hal_soc->ops->hal_tx_comp_get_buffer_source =
938 				hal_tx_comp_get_buffer_source_generic_be;
939 	hal_soc->ops->hal_tx_comp_get_release_reason =
940 				hal_tx_comp_get_release_reason_generic_be;
941 	hal_soc->ops->hal_get_wbm_internal_error =
942 					hal_get_wbm_internal_error_generic_be;
943 	hal_soc->ops->hal_rx_mpdu_desc_info_get =
944 				hal_rx_mpdu_desc_info_get_be;
945 	hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be;
946 	hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be;
947 	hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be;
948 	hal_soc->ops->hal_rx_wbm_rel_buf_paddr_get =
949 					hal_rx_wbm_rel_buf_paddr_get_be;
950 
951 	hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be;
952 	hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be;
953 	hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be;
954 	hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be;
955 	hal_soc->ops->hal_rx_msdu_reo_dst_ind_get =
956 						hal_rx_msdu_reo_dst_ind_get_be;
957 	hal_soc->ops->hal_get_idle_link_bm_id = hal_get_idle_link_bm_id_be;
958 	hal_soc->ops->hal_rx_msdu_ext_desc_info_get_ptr =
959 					hal_rx_msdu_ext_desc_info_get_ptr_be;
960 	hal_soc->ops->hal_msdu_desc_info_set = hal_msdu_desc_info_set_be;
961 	hal_soc->ops->hal_mpdu_desc_info_set = hal_mpdu_desc_info_set_be;
962 #ifdef DP_UMAC_HW_RESET_SUPPORT
963 	hal_soc->ops->hal_unregister_reo_send_cmd =
964 					hal_unregister_reo_send_cmd_be;
965 	hal_soc->ops->hal_register_reo_send_cmd = hal_register_reo_send_cmd_be;
966 	hal_soc->ops->hal_reset_rx_reo_tid_q = hal_reset_rx_reo_tid_q_be;
967 #endif
968 	hal_soc->ops->hal_rx_tlv_get_pn_num = hal_rx_tlv_get_pn_num_be;
969 	hal_soc->ops->hal_rx_get_qdesc_addr = hal_rx_get_qdesc_addr_be;
970 	hal_soc->ops->hal_set_reo_ent_desc_reo_dest_ind =
971 					hal_set_reo_ent_desc_reo_dest_ind_be;
972 	hal_soc->ops->hal_get_reo_ent_desc_qdesc_addr =
973 					hal_get_reo_ent_desc_qdesc_addr_be;
974 }
975