1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <qdf_module.h> 21 #include "hal_be_api.h" 22 #include "hal_be_hw_headers.h" 23 #include "hal_be_reo.h" 24 #include "hal_tx.h" //HAL_SET_FLD 25 #include "hal_be_rx.h" //HAL_RX_BUF_RBM_GET 26 #include "rx_reo_queue_1k.h" 27 #include "hal_be_rx_tlv.h" 28 29 /* 30 * The 4 bits REO destination ring value is defined as: 0: TCL 31 * 1:SW1 2:SW2 3:SW3 4:SW4 5:Release 6:FW(WIFI) 7:SW5 32 * 8:SW6 9:SW7 10:SW8 11: NOT_USED. 33 * 34 */ 35 uint32_t reo_dest_ring_remap[] = {REO_REMAP_SW1, REO_REMAP_SW2, 36 REO_REMAP_SW3, REO_REMAP_SW4, 37 REO_REMAP_SW5, REO_REMAP_SW6, 38 REO_REMAP_SW7, REO_REMAP_SW8}; 39 /* 40 * WBM idle link descriptor for Return Buffer Manager in case of 41 * multi-chip configuration. 42 */ 43 #define HAL_NUM_CHIPS 4 44 #define HAL_WBM_CHIP_INVALID 0 45 #define HAL_WBM_CHIP0_IDLE_DESC_MAP 1 46 #define HAL_WBM_CHIP1_IDLE_DESC_MAP 2 47 #define HAL_WBM_CHIP2_IDLE_DESC_MAP 3 48 #define HAL_WBM_CHIP3_IDLE_DESC_MAP 12 49 50 uint8_t wbm_idle_link_bm_map[] = {HAL_WBM_CHIP0_IDLE_DESC_MAP, 51 HAL_WBM_CHIP1_IDLE_DESC_MAP, 52 HAL_WBM_CHIP2_IDLE_DESC_MAP, 53 HAL_WBM_CHIP3_IDLE_DESC_MAP}; 54 55 #if defined(QDF_BIG_ENDIAN_MACHINE) 56 void hal_setup_reo_swap(struct hal_soc *soc) 57 { 58 uint32_t reg_val; 59 60 reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR( 61 REO_REG_REG_BASE)); 62 63 reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1); 64 reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1); 65 66 HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR( 67 REO_REG_REG_BASE), reg_val); 68 } 69 #else 70 void hal_setup_reo_swap(struct hal_soc *soc) 71 { 72 } 73 #endif 74 75 /** 76 * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG 77 * @hal_soc_hdl: Handle to HAL SoC structure 78 * @hal_srng: Handle to HAL SRNG structure 79 * 80 * Return: none 81 */ 82 static void 83 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl, 84 hal_ring_handle_t hal_ring_hdl) 85 { 86 } 87 88 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams, 89 int qref_reset) 90 { 91 uint32_t reg_val; 92 struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams; 93 94 reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR( 95 REO_REG_REG_BASE)); 96 97 hal_reo_config(soc, reg_val, reo_params); 98 /* Other ring enable bits and REO_ENABLE will be set by FW */ 99 100 /* TODO: Setup destination ring mapping if enabled */ 101 102 /* TODO: Error destination ring setting is left to default. 103 * Default setting is to send all errors to release ring. 104 */ 105 106 /* Set the reo descriptor swap bits in case of BIG endian platform */ 107 hal_setup_reo_swap(soc); 108 109 HAL_REG_WRITE(soc, 110 HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE), 111 HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000); 112 113 HAL_REG_WRITE(soc, 114 HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE), 115 (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000)); 116 117 HAL_REG_WRITE(soc, 118 HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE), 119 (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000)); 120 121 HAL_REG_WRITE(soc, 122 HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE), 123 (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000)); 124 125 /* 126 * When hash based routing is enabled, routing of the rx packet 127 * is done based on the following value: 1 _ _ _ _ The last 4 128 * bits are based on hash[3:0]. This means the possible values 129 * are 0x10 to 0x1f. This value is used to look-up the 130 * ring ID configured in Destination_Ring_Ctrl_IX_* register. 131 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3 132 * registers need to be configured to set-up the 16 entries to 133 * map the hash values to a ring number. There are 3 bits per 134 * hash entry which are mapped as follows: 135 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI), 136 * 7: NOT_USED. 137 */ 138 if (reo_params->rx_hash_enabled) { 139 HAL_REG_WRITE(soc, 140 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 141 REO_REG_REG_BASE), 142 reo_params->remap1); 143 144 hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x", 145 HAL_REG_READ(soc, 146 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 147 REO_REG_REG_BASE))); 148 149 HAL_REG_WRITE(soc, 150 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 151 REO_REG_REG_BASE), 152 reo_params->remap2); 153 154 hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x", 155 HAL_REG_READ(soc, 156 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 157 REO_REG_REG_BASE))); 158 } 159 160 /* TODO: Check if the following registers shoould be setup by host: 161 * AGING_CONTROL 162 * HIGH_MEMORY_THRESHOLD 163 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2] 164 * GLOBAL_LINK_DESC_COUNT_CTRL 165 */ 166 } 167 168 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie, 169 qdf_dma_addr_t link_desc_paddr, 170 uint8_t bm_id) 171 { 172 uint32_t *buf_addr = (uint32_t *)desc; 173 174 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0, 175 link_desc_paddr & 0xffffffff); 176 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32, 177 (uint64_t)link_desc_paddr >> 32); 178 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER, 179 bm_id); 180 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE, 181 cookie); 182 } 183 184 static uint16_t hal_get_rx_max_ba_window_be(int tid) 185 { 186 return HAL_RX_BA_WINDOW_256; 187 } 188 189 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid) 190 { 191 /* Hardcode the ba_window_size to HAL_RX_MAX_BA_WINDOW for 192 * NON_QOS_TID until HW issues are resolved. 193 */ 194 if (tid != HAL_NON_QOS_TID) 195 ba_window_size = hal_get_rx_max_ba_window_be(tid); 196 197 /* Return descriptor size corresponding to window size of 2 since 198 * we set ba_window_size to 2 while setting up REO descriptors as 199 * a WAR to get 2k jump exception aggregates are received without 200 * a BA session. 201 */ 202 if (ba_window_size <= 1) { 203 if (tid != HAL_NON_QOS_TID) 204 return sizeof(struct rx_reo_queue) + 205 sizeof(struct rx_reo_queue_ext); 206 else 207 return sizeof(struct rx_reo_queue); 208 } 209 210 if (ba_window_size <= 105) 211 return sizeof(struct rx_reo_queue) + 212 sizeof(struct rx_reo_queue_ext); 213 214 if (ba_window_size <= 210) 215 return sizeof(struct rx_reo_queue) + 216 (2 * sizeof(struct rx_reo_queue_ext)); 217 218 return sizeof(struct rx_reo_queue) + 219 (3 * sizeof(struct rx_reo_queue_ext)); 220 } 221 222 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr) 223 { 224 return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr); 225 } 226 227 #if defined(QCA_WIFI_KIWI) && !defined(QCA_WIFI_KIWI_V2) 228 static inline uint32_t 229 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir) 230 { 231 uint32_t buf_src; 232 233 buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc); 234 switch (buf_src) { 235 case HAL_BE_RX_WBM_ERR_SRC_RXDMA: 236 return HAL_RX_WBM_ERR_SRC_RXDMA; 237 case HAL_BE_RX_WBM_ERR_SRC_REO: 238 return HAL_RX_WBM_ERR_SRC_REO; 239 case HAL_BE_RX_WBM_ERR_SRC_FW_RX: 240 if (dir != HAL_BE_WBM_RELEASE_DIR_RX) 241 qdf_assert_always(0); 242 return HAL_RX_WBM_ERR_SRC_FW; 243 case HAL_BE_RX_WBM_ERR_SRC_SW_RX: 244 if (dir != HAL_BE_WBM_RELEASE_DIR_RX) 245 qdf_assert_always(0); 246 return HAL_RX_WBM_ERR_SRC_SW; 247 case HAL_BE_RX_WBM_ERR_SRC_TQM: 248 return HAL_RX_WBM_ERR_SRC_TQM; 249 case HAL_BE_RX_WBM_ERR_SRC_FW_TX: 250 if (dir != HAL_BE_WBM_RELEASE_DIR_TX) 251 qdf_assert_always(0); 252 return HAL_RX_WBM_ERR_SRC_FW; 253 case HAL_BE_RX_WBM_ERR_SRC_SW_TX: 254 if (dir != HAL_BE_WBM_RELEASE_DIR_TX) 255 qdf_assert_always(0); 256 return HAL_RX_WBM_ERR_SRC_SW; 257 default: 258 qdf_assert_always(0); 259 } 260 261 return buf_src; 262 } 263 #else 264 static inline uint32_t 265 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir) 266 { 267 return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc); 268 } 269 #endif 270 271 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc) 272 { 273 return hal_wbm2sw_release_source_get(hal_desc, 274 HAL_BE_WBM_RELEASE_DIR_TX); 275 } 276 277 /** 278 * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason 279 * @hal_desc: completion ring descriptor pointer 280 * 281 * This function will return the type of pointer - buffer or descriptor 282 * 283 * Return: buffer type 284 */ 285 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc) 286 { 287 uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) + 288 WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET); 289 290 return (comp_desc & 291 WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >> 292 WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB; 293 } 294 295 /** 296 * hal_get_wbm_internal_error_generic_be() - is WBM internal error 297 * @hal_desc: completion ring descriptor pointer 298 * 299 * This function will return 0 or 1 - is it WBM internal error or not 300 * 301 * Return: uint8_t 302 */ 303 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc) 304 { 305 /* 306 * TODO - This func is called by tx comp and wbm error handler 307 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX 308 */ 309 uint32_t comp_desc = 310 *(uint32_t *)(((uint8_t *)hal_desc) + 311 HAL_WBM_INTERNAL_ERROR_OFFSET); 312 313 return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >> 314 HAL_WBM_INTERNAL_ERROR_LSB; 315 } 316 317 /** 318 * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor 319 * @ring_desc: ring descriptor 320 * 321 * Return: wbm error source 322 */ 323 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc) 324 { 325 return hal_wbm2sw_release_source_get(ring_desc, 326 HAL_BE_WBM_RELEASE_DIR_RX); 327 } 328 329 /** 330 * hal_rx_ret_buf_manager_get_be() - Get return buffer manager from ring desc 331 * @ring_desc: ring descriptor 332 * 333 * Return: rbm 334 */ 335 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc) 336 { 337 /* 338 * The following macro takes buf_addr_info as argument, 339 * but since buf_addr_info is the first field in ring_desc 340 * Hence the following call is OK 341 */ 342 return HAL_RX_BUF_RBM_GET(ring_desc); 343 } 344 345 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \ 346 (WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \ 347 WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \ 348 WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB) 349 350 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \ 351 (WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \ 352 WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \ 353 WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB) 354 355 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc) \ 356 (((*(((uint32_t *)wbm_desc) + \ 357 (WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \ 358 WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >> \ 359 WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB) 360 361 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc) \ 362 (((*(((uint32_t *)wbm_desc) + \ 363 (WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \ 364 WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >> \ 365 WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB) 366 367 /** 368 * hal_rx_wbm_err_info_get_generic_be(): Retrieves WBM error code and reason and 369 * save it to hal_wbm_err_desc_info structure passed by caller 370 * @wbm_desc: wbm ring descriptor 371 * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter. 372 * Return: void 373 */ 374 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1) 375 { 376 struct hal_wbm_err_desc_info *wbm_er_info = 377 (struct hal_wbm_err_desc_info *)wbm_er_info1; 378 379 wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc); 380 wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc); 381 wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc); 382 wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc); 383 wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc); 384 } 385 386 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc, 387 struct hal_buf_info *buf_info) 388 { 389 struct reo_destination_ring *reo_ring = 390 (struct reo_destination_ring *)rx_desc; 391 392 buf_info->paddr = 393 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) | 394 ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32)); 395 buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring); 396 } 397 398 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl, 399 void *src_srng_desc, 400 hal_buff_addrinfo_t buf_addr_info, 401 uint8_t bm_action) 402 { 403 /* 404 * The offsets for fields used in this function are same in 405 * wbm_release_ring for Lithium and wbm_release_ring_tx 406 * for Beryllium. hence we can use wbm_release_ring directly. 407 */ 408 struct wbm_release_ring *wbm_rel_srng = 409 (struct wbm_release_ring *)src_srng_desc; 410 uint32_t addr_31_0; 411 uint8_t addr_39_32; 412 413 /* Structure copy !!! */ 414 wbm_rel_srng->released_buff_or_desc_addr_info = 415 *((struct buffer_addr_info *)buf_addr_info); 416 417 addr_31_0 = 418 wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0; 419 addr_39_32 = 420 wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32; 421 422 HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, 423 RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW); 424 HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION, 425 bm_action); 426 HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, 427 BUFFER_OR_DESC_TYPE, 428 HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC); 429 430 /* WBM error is indicated when any of the link descriptors given to 431 * WBM has a NULL address, and one those paths is the link descriptors 432 * released from host after processing RXDMA errors, 433 * or from Rx defrag path, and we want to add an assert here to ensure 434 * host is not releasing descriptors with NULL address. 435 */ 436 437 if (qdf_unlikely(!addr_31_0 && !addr_39_32)) { 438 hal_dump_wbm_rel_desc(src_srng_desc); 439 qdf_assert_always(0); 440 } 441 } 442 443 /** 444 * hal_rx_reo_ent_buf_paddr_get_be: Gets the physical address and 445 * cookie from the REO entrance ring element 446 * 447 * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to 448 * the current descriptor 449 * @ buf_info: structure to return the buffer information 450 * @ msdu_cnt: pointer to msdu count in MPDU 451 * Return: void 452 */ 453 static 454 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl, 455 hal_buf_info_t buf_info_hdl) 456 { 457 struct hal_buf_info *buf_info = 458 (struct hal_buf_info *)buf_info_hdl; 459 struct buffer_addr_info *buf_addr_info = 460 (struct buffer_addr_info *)buf_addr_info_hdl; 461 462 buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); 463 /* 464 * buffer addr info is the first member of ring desc, so the typecast 465 * can be done. 466 */ 467 buf_info->rbm = hal_rx_ret_buf_manager_get_be( 468 (hal_ring_desc_t)buf_addr_info); 469 } 470 471 /* 472 * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the 473 * rxdma ring entry. 474 * @rxdma_entry: descriptor entry 475 * @paddr: physical address of nbuf data pointer. 476 * @cookie: SW cookie used as a index to SW rx desc. 477 * @manager: who owns the nbuf (host, NSS, etc...). 478 * 479 */ 480 static inline void 481 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry, 482 qdf_dma_addr_t paddr, uint32_t cookie, 483 uint8_t manager) 484 { 485 uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff); 486 uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32; 487 488 HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo); 489 HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi); 490 HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie); 491 HAL_RXDMA_MANAGER_SET(rxdma_entry, manager); 492 } 493 494 /** 495 * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc 496 * @rx_desc: rx descriptor 497 * 498 * Return: REO error code 499 */ 500 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc) 501 { 502 struct reo_destination_ring *reo_desc = 503 (struct reo_destination_ring *)rx_desc; 504 505 return HAL_RX_REO_ERROR_GET(reo_desc); 506 } 507 508 /** 509 * hal_gen_reo_remap_val_generic_be() - Generate the reo map value 510 * @ix0_map: mapping values for reo 511 * 512 * Return: IX0 reo remap register value to be written 513 */ 514 static uint32_t 515 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg, 516 uint8_t *ix0_map) 517 { 518 uint32_t ix_val = 0; 519 520 switch (remap_reg) { 521 case HAL_REO_REMAP_REG_IX0: 522 ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) | 523 HAL_REO_REMAP_IX0(ix0_map[1], 1) | 524 HAL_REO_REMAP_IX0(ix0_map[2], 2) | 525 HAL_REO_REMAP_IX0(ix0_map[3], 3) | 526 HAL_REO_REMAP_IX0(ix0_map[4], 4) | 527 HAL_REO_REMAP_IX0(ix0_map[5], 5) | 528 HAL_REO_REMAP_IX0(ix0_map[6], 6) | 529 HAL_REO_REMAP_IX0(ix0_map[7], 7); 530 break; 531 case HAL_REO_REMAP_REG_IX2: 532 ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) | 533 HAL_REO_REMAP_IX2(ix0_map[1], 17) | 534 HAL_REO_REMAP_IX2(ix0_map[2], 18) | 535 HAL_REO_REMAP_IX2(ix0_map[3], 19) | 536 HAL_REO_REMAP_IX2(ix0_map[4], 20) | 537 HAL_REO_REMAP_IX2(ix0_map[5], 21) | 538 HAL_REO_REMAP_IX2(ix0_map[6], 22) | 539 HAL_REO_REMAP_IX2(ix0_map[7], 23); 540 break; 541 default: 542 break; 543 } 544 545 return ix_val; 546 } 547 548 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc) 549 { 550 return HAL_RX_ERROR_STATUS_GET(rx_desc); 551 } 552 553 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl, 554 hal_ring_desc_t reo_desc, 555 void *st_handle, 556 uint32_t tlv, int *num_ref) 557 { 558 union hal_reo_status *reo_status_ref; 559 560 reo_status_ref = (union hal_reo_status *)st_handle; 561 562 switch (tlv) { 563 case HAL_REO_QUEUE_STATS_STATUS_TLV: 564 hal_reo_queue_stats_status_be(reo_desc, 565 &reo_status_ref->queue_status, 566 hal_soc_hdl); 567 *num_ref = reo_status_ref->queue_status.header.cmd_num; 568 break; 569 case HAL_REO_FLUSH_QUEUE_STATUS_TLV: 570 hal_reo_flush_queue_status_be(reo_desc, 571 &reo_status_ref->fl_queue_status, 572 hal_soc_hdl); 573 *num_ref = reo_status_ref->fl_queue_status.header.cmd_num; 574 break; 575 case HAL_REO_FLUSH_CACHE_STATUS_TLV: 576 hal_reo_flush_cache_status_be(reo_desc, 577 &reo_status_ref->fl_cache_status, 578 hal_soc_hdl); 579 *num_ref = reo_status_ref->fl_cache_status.header.cmd_num; 580 break; 581 case HAL_REO_UNBLK_CACHE_STATUS_TLV: 582 hal_reo_unblock_cache_status_be 583 (reo_desc, hal_soc_hdl, 584 &reo_status_ref->unblk_cache_status); 585 *num_ref = reo_status_ref->unblk_cache_status.header.cmd_num; 586 break; 587 case HAL_REO_TIMOUT_LIST_STATUS_TLV: 588 hal_reo_flush_timeout_list_status_be( 589 reo_desc, 590 &reo_status_ref->fl_timeout_status, 591 hal_soc_hdl); 592 *num_ref = reo_status_ref->fl_timeout_status.header.cmd_num; 593 break; 594 case HAL_REO_DESC_THRES_STATUS_TLV: 595 hal_reo_desc_thres_reached_status_be( 596 reo_desc, 597 &reo_status_ref->thres_status, 598 hal_soc_hdl); 599 *num_ref = reo_status_ref->thres_status.header.cmd_num; 600 break; 601 case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: 602 hal_reo_rx_update_queue_status_be( 603 reo_desc, 604 &reo_status_ref->rx_queue_status, 605 hal_soc_hdl); 606 *num_ref = reo_status_ref->rx_queue_status.header.cmd_num; 607 break; 608 default: 609 QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN, 610 "hal_soc %pK: no handler for TLV:%d", 611 hal_soc_hdl, tlv); 612 return QDF_STATUS_E_FAILURE; 613 } /* switch */ 614 615 return QDF_STATUS_SUCCESS; 616 } 617 618 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc) 619 { 620 return HAL_RX_REO_BUF_TYPE_GET(rx_desc); 621 } 622 623 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 624 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000 625 #endif 626 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl, 627 struct hal_hw_cc_config *cc_cfg) 628 { 629 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 630 631 hal_soc->ops->hal_cookie_conversion_reg_cfg_be(hal_soc_hdl, cc_cfg); 632 } 633 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be); 634 635 static inline void 636 hal_msdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl, 637 void *msdu_desc, uint32_t dst_ind, 638 uint32_t nbuf_len) 639 { 640 struct rx_msdu_desc_info *msdu_desc_info = 641 (struct rx_msdu_desc_info *)msdu_desc; 642 struct rx_msdu_ext_desc_info *msdu_ext_desc_info = 643 (struct rx_msdu_ext_desc_info *)(msdu_desc_info + 1); 644 645 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 646 FIRST_MSDU_IN_MPDU_FLAG, 1); 647 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 648 LAST_MSDU_IN_MPDU_FLAG, 1); 649 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 650 MSDU_CONTINUATION, 0x0); 651 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 652 MSDU_LENGTH, nbuf_len); 653 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 654 SA_IS_VALID, 1); 655 HAL_RX_MSDU_DESC_INFO_SET(msdu_desc_info, 656 DA_IS_VALID, 1); 657 HAL_RX_MSDU_REO_DST_IND_SET(msdu_ext_desc_info, 658 REO_DESTINATION_INDICATION, dst_ind); 659 } 660 661 static inline void 662 hal_mpdu_desc_info_set_be(hal_soc_handle_t hal_soc_hdl, 663 void *ent_desc, 664 void *mpdu_desc, 665 uint32_t seq_no) 666 { 667 struct rx_mpdu_desc_info *mpdu_desc_info = 668 (struct rx_mpdu_desc_info *)mpdu_desc; 669 uint8_t *desc = (uint8_t *)ent_desc; 670 671 HAL_RX_FLD_SET(desc, REO_ENTRANCE_RING, 672 MPDU_SEQUENCE_NUMBER, seq_no); 673 674 HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info, 675 MSDU_COUNT, 0x1); 676 /* unset frag bit */ 677 HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info, 678 FRAGMENT_FLAG, 0x0); 679 HAL_RX_MPDU_DESC_INFO_SET(mpdu_desc_info, 680 RAW_MPDU, 0x0); 681 } 682 683 /** 684 * hal_rx_msdu_reo_dst_ind_get: Gets the REO 685 * destination ring ID from the msdu desc info 686 * 687 * @msdu_link_desc : Opaque cookie pointer used by HAL to get to 688 * the current descriptor 689 * 690 * Return: dst_ind (REO destination ring ID) 691 */ 692 static inline 693 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl, 694 void *msdu_link_desc) 695 { 696 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 697 struct rx_msdu_details *msdu_details; 698 struct rx_msdu_desc_info *msdu_desc_info; 699 struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc; 700 uint32_t dst_ind; 701 702 msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc); 703 704 /* The first msdu in the link should exist */ 705 msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0], 706 hal_soc); 707 dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info); 708 return dst_ind; 709 } 710 711 uint32_t 712 hal_reo_ix_remap_value_get_be(hal_soc_handle_t hal_soc_hdl, 713 uint8_t rx_ring_mask) 714 { 715 uint32_t num_rings = 0; 716 uint32_t i = 0; 717 uint32_t ring_remap_arr[HAL_MAX_REO2SW_RINGS] = {0}; 718 uint32_t reo_remap_val = 0; 719 uint32_t ring_idx = 0; 720 uint8_t ix_map[HAL_NUM_RX_RING_PER_IX_MAP] = {0}; 721 722 /* create reo ring remap array */ 723 while (i < HAL_MAX_REO2SW_RINGS) { 724 if (rx_ring_mask & (1 << i)) { 725 ring_remap_arr[num_rings] = reo_dest_ring_remap[i]; 726 num_rings++; 727 } 728 i++; 729 } 730 731 for (i = 0; i < HAL_NUM_RX_RING_PER_IX_MAP; i++) { 732 if (rx_ring_mask) { 733 ix_map[i] = ring_remap_arr[ring_idx]; 734 ring_idx = ((ring_idx + 1) % num_rings); 735 } else { 736 /* if ring mask is zero configure to release to WBM */ 737 ix_map[i] = REO_REMAP_RELEASE; 738 } 739 } 740 741 reo_remap_val = HAL_REO_REMAP_IX0(ix_map[0], 0) | 742 HAL_REO_REMAP_IX0(ix_map[1], 1) | 743 HAL_REO_REMAP_IX0(ix_map[2], 2) | 744 HAL_REO_REMAP_IX0(ix_map[3], 3) | 745 HAL_REO_REMAP_IX0(ix_map[4], 4) | 746 HAL_REO_REMAP_IX0(ix_map[5], 5) | 747 HAL_REO_REMAP_IX0(ix_map[6], 6) | 748 HAL_REO_REMAP_IX0(ix_map[7], 7); 749 750 return reo_remap_val; 751 } 752 753 qdf_export_symbol(hal_reo_ix_remap_value_get_be); 754 755 uint8_t hal_reo_ring_remap_value_get_be(uint8_t rx_ring_id) 756 { 757 if (rx_ring_id >= HAL_MAX_REO2SW_RINGS) 758 return REO_REMAP_RELEASE; 759 760 return reo_dest_ring_remap[rx_ring_id]; 761 } 762 763 qdf_export_symbol(hal_reo_ring_remap_value_get_be); 764 765 uint8_t hal_get_idle_link_bm_id_be(uint8_t chip_id) 766 { 767 if (chip_id >= HAL_NUM_CHIPS) 768 return HAL_WBM_CHIP_INVALID; 769 770 return wbm_idle_link_bm_map[chip_id]; 771 } 772 773 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION 774 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 775 static inline void 776 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc, 777 struct hal_buf_info *buf_info) 778 { 779 if (hal_rx_wbm_get_cookie_convert_done(rx_desc)) 780 buf_info->paddr = 781 (HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) | 782 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32)); 783 else 784 buf_info->paddr = 785 (HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) | 786 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32)); 787 } 788 #else 789 static inline void 790 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc, 791 struct hal_buf_info *buf_info) 792 { 793 buf_info->paddr = 794 (HAL_RX_WBM_COMP_BUF_ADDR_31_0_GET(rx_desc) | 795 ((uint64_t)(HAL_RX_WBM_COMP_BUF_ADDR_39_32_GET(rx_desc)) << 32)); 796 } 797 #endif 798 #else /* !DP_FEATURE_HW_COOKIE_CONVERSION */ 799 static inline void 800 hal_rx_wbm_rel_buf_paddr_get_be(hal_ring_desc_t rx_desc, 801 struct hal_buf_info *buf_info) 802 { 803 buf_info->paddr = 804 (HAL_RX_WBM_BUF_ADDR_31_0_GET(rx_desc) | 805 ((uint64_t)(HAL_RX_WBM_BUF_ADDR_39_32_GET(rx_desc)) << 32)); 806 } 807 #endif 808 809 #ifdef DP_UMAC_HW_RESET_SUPPORT 810 /** 811 * hal_unregister_reo_send_cmd_be() - Unregister Reo send command callback. 812 * @hal_soc_hdl: HAL soc handle 813 * 814 * Return: None 815 */ 816 static 817 void hal_unregister_reo_send_cmd_be(struct hal_soc *hal_soc) 818 { 819 hal_soc->ops->hal_reo_send_cmd = NULL; 820 } 821 822 /** 823 * hal_register_reo_send_cmd_be() - Register Reo send command callback. 824 * @hal_soc_hdl: HAL soc handle 825 * 826 * Return: None 827 */ 828 static 829 void hal_register_reo_send_cmd_be(struct hal_soc *hal_soc) 830 { 831 hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be; 832 } 833 834 /** 835 * hal_reset_rx_reo_tid_q_be() - reset the reo tid queue. 836 * @hal_soc_hdl: HAL soc handle 837 * @hw_qdesc_vaddr:start address of the tid queue 838 * @size:size of address pointed by hw_qdesc_vaddr 839 * 840 * Return: None 841 */ 842 static void 843 hal_reset_rx_reo_tid_q_be(struct hal_soc *hal_soc, void *hw_qdesc_vaddr, 844 uint32_t size) 845 { 846 struct rx_reo_queue *hw_qdesc = (struct rx_reo_queue *)hw_qdesc_vaddr; 847 int i; 848 849 if (!hw_qdesc) 850 return; 851 852 hw_qdesc->svld = 0; 853 hw_qdesc->ssn = 0; 854 hw_qdesc->current_index = 0; 855 hw_qdesc->pn_valid = 0; 856 hw_qdesc->pn_31_0 = 0; 857 hw_qdesc->pn_63_32 = 0; 858 hw_qdesc->pn_95_64 = 0; 859 hw_qdesc->pn_127_96 = 0; 860 hw_qdesc->last_rx_enqueue_timestamp = 0; 861 hw_qdesc->last_rx_dequeue_timestamp = 0; 862 hw_qdesc->ptr_to_next_aging_queue_39_32 = 0; 863 hw_qdesc->ptr_to_next_aging_queue_31_0 = 0; 864 hw_qdesc->ptr_to_previous_aging_queue_31_0 = 0; 865 hw_qdesc->ptr_to_previous_aging_queue_39_32 = 0; 866 hw_qdesc->rx_bitmap_31_0 = 0; 867 hw_qdesc->rx_bitmap_63_32 = 0; 868 hw_qdesc->rx_bitmap_95_64 = 0; 869 hw_qdesc->rx_bitmap_127_96 = 0; 870 hw_qdesc->rx_bitmap_159_128 = 0; 871 hw_qdesc->rx_bitmap_191_160 = 0; 872 hw_qdesc->rx_bitmap_223_192 = 0; 873 hw_qdesc->rx_bitmap_255_224 = 0; 874 hw_qdesc->rx_bitmap_287_256 = 0; 875 hw_qdesc->current_msdu_count = 0; 876 hw_qdesc->current_mpdu_count = 0; 877 hw_qdesc->last_sn_reg_index = 0; 878 879 if (size > sizeof(struct rx_reo_queue)) { 880 struct rx_reo_queue_ext *ext_desc; 881 struct rx_reo_queue_1k *kdesc; 882 883 i = ((size - sizeof(struct rx_reo_queue)) / 884 sizeof(struct rx_reo_queue_ext)); 885 886 if (i > 10) { 887 i = 10; 888 kdesc = (struct rx_reo_queue_1k *) 889 (hw_qdesc_vaddr + sizeof(struct rx_reo_queue) + 890 (10 * sizeof(struct rx_reo_queue_ext))); 891 892 kdesc->rx_bitmap_319_288 = 0; 893 kdesc->rx_bitmap_351_320 = 0; 894 kdesc->rx_bitmap_383_352 = 0; 895 kdesc->rx_bitmap_415_384 = 0; 896 kdesc->rx_bitmap_447_416 = 0; 897 kdesc->rx_bitmap_479_448 = 0; 898 kdesc->rx_bitmap_511_480 = 0; 899 kdesc->rx_bitmap_543_512 = 0; 900 kdesc->rx_bitmap_575_544 = 0; 901 kdesc->rx_bitmap_607_576 = 0; 902 kdesc->rx_bitmap_639_608 = 0; 903 kdesc->rx_bitmap_671_640 = 0; 904 kdesc->rx_bitmap_703_672 = 0; 905 kdesc->rx_bitmap_735_704 = 0; 906 kdesc->rx_bitmap_767_736 = 0; 907 kdesc->rx_bitmap_799_768 = 0; 908 kdesc->rx_bitmap_831_800 = 0; 909 kdesc->rx_bitmap_863_832 = 0; 910 kdesc->rx_bitmap_895_864 = 0; 911 kdesc->rx_bitmap_927_896 = 0; 912 kdesc->rx_bitmap_959_928 = 0; 913 kdesc->rx_bitmap_991_960 = 0; 914 kdesc->rx_bitmap_1023_992 = 0; 915 } 916 917 ext_desc = (struct rx_reo_queue_ext *) 918 (hw_qdesc_vaddr + (sizeof(struct rx_reo_queue))); 919 920 while (i > 0) { 921 qdf_mem_zero(&ext_desc->mpdu_link_pointer_0, 922 (15 * sizeof(struct rx_mpdu_link_ptr))); 923 924 ext_desc++; 925 i--; 926 } 927 } 928 } 929 #endif 930 931 /** 932 * hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for 933 * beryllium chipsets. 934 * @hal_soc_hdl: HAL soc handle 935 * 936 * Return: None 937 */ 938 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc) 939 { 940 hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be; 941 hal_soc->ops->hal_get_rx_max_ba_window = hal_get_rx_max_ba_window_be; 942 hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be; 943 hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be; 944 hal_soc->ops->hal_get_reo_reg_base_offset = 945 hal_get_reo_reg_base_offset_be; 946 hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be; 947 hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be; 948 hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be; 949 hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be; 950 951 hal_soc->ops->hal_rx_ret_buf_manager_get = 952 hal_rx_ret_buf_manager_get_be; 953 hal_soc->ops->hal_rxdma_buff_addr_info_set = 954 hal_rxdma_buff_addr_info_set_be; 955 hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be; 956 hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be; 957 hal_soc->ops->hal_gen_reo_remap_val = 958 hal_gen_reo_remap_val_generic_be; 959 hal_soc->ops->hal_tx_comp_get_buffer_source = 960 hal_tx_comp_get_buffer_source_generic_be; 961 hal_soc->ops->hal_tx_comp_get_release_reason = 962 hal_tx_comp_get_release_reason_generic_be; 963 hal_soc->ops->hal_get_wbm_internal_error = 964 hal_get_wbm_internal_error_generic_be; 965 hal_soc->ops->hal_rx_mpdu_desc_info_get = 966 hal_rx_mpdu_desc_info_get_be; 967 hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be; 968 hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be; 969 hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be; 970 hal_soc->ops->hal_rx_wbm_rel_buf_paddr_get = 971 hal_rx_wbm_rel_buf_paddr_get_be; 972 973 hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be; 974 hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be; 975 hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be; 976 hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be; 977 hal_soc->ops->hal_rx_msdu_reo_dst_ind_get = 978 hal_rx_msdu_reo_dst_ind_get_be; 979 hal_soc->ops->hal_get_idle_link_bm_id = hal_get_idle_link_bm_id_be; 980 hal_soc->ops->hal_rx_msdu_ext_desc_info_get_ptr = 981 hal_rx_msdu_ext_desc_info_get_ptr_be; 982 hal_soc->ops->hal_msdu_desc_info_set = hal_msdu_desc_info_set_be; 983 hal_soc->ops->hal_mpdu_desc_info_set = hal_mpdu_desc_info_set_be; 984 #ifdef DP_UMAC_HW_RESET_SUPPORT 985 hal_soc->ops->hal_unregister_reo_send_cmd = 986 hal_unregister_reo_send_cmd_be; 987 hal_soc->ops->hal_register_reo_send_cmd = hal_register_reo_send_cmd_be; 988 hal_soc->ops->hal_reset_rx_reo_tid_q = hal_reset_rx_reo_tid_q_be; 989 #endif 990 hal_soc->ops->hal_rx_tlv_get_pn_num = hal_rx_tlv_get_pn_num_be; 991 #ifndef CONFIG_WORD_BASED_TLV 992 hal_soc->ops->hal_rx_get_qdesc_addr = hal_rx_get_qdesc_addr_be; 993 #endif 994 hal_soc->ops->hal_set_reo_ent_desc_reo_dest_ind = 995 hal_set_reo_ent_desc_reo_dest_ind_be; 996 hal_soc->ops->hal_get_reo_ent_desc_qdesc_addr = 997 hal_get_reo_ent_desc_qdesc_addr_be; 998 } 999