1 /* 2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <qdf_module.h> 20 #include "hal_be_api.h" 21 #include "hal_be_hw_headers.h" 22 #include "hal_be_reo.h" 23 #include "hal_tx.h" //HAL_SET_FLD 24 #include "hal_be_rx.h" //HAL_RX_BUF_RBM_GET 25 26 #if defined(QDF_BIG_ENDIAN_MACHINE) 27 /** 28 * hal_setup_reo_swap() - Set the swap flag for big endian machines 29 * @soc: HAL soc handle 30 * 31 * Return: None 32 */ 33 static void hal_setup_reo_swap(struct hal_soc *soc) 34 { 35 uint32_t reg_val; 36 37 reg_val = HAL_REG_READ(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR( 38 REO_REG_REG_BASE)); 39 40 reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, WRITE_STRUCT_SWAP, 1); 41 reg_val |= HAL_SM(HWIO_REO_R0_CACHE_CTL_CONFIG, READ_STRUCT_SWAP, 1); 42 43 HAL_REG_WRITE(soc, HWIO_REO_R0_CACHE_CTL_CONFIG_ADDR( 44 REO_REG_REG_BASE), reg_val); 45 } 46 #else 47 static inline void hal_setup_reo_swap(struct hal_soc *soc) 48 { 49 } 50 #endif 51 52 /** 53 * hal_tx_init_data_ring_be() - Initialize all the TCL Descriptors in SRNG 54 * @hal_soc_hdl: Handle to HAL SoC structure 55 * @hal_srng: Handle to HAL SRNG structure 56 * 57 * Return: none 58 */ 59 static void 60 hal_tx_init_data_ring_be(hal_soc_handle_t hal_soc_hdl, 61 hal_ring_handle_t hal_ring_hdl) 62 { 63 } 64 65 void hal_reo_setup_generic_be(struct hal_soc *soc, void *reoparams) 66 { 67 uint32_t reg_val; 68 struct hal_reo_params *reo_params = (struct hal_reo_params *)reoparams; 69 70 reg_val = HAL_REG_READ(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR( 71 REO_REG_REG_BASE)); 72 73 hal_reo_config(soc, reg_val, reo_params); 74 /* Other ring enable bits and REO_ENABLE will be set by FW */ 75 76 /* TODO: Setup destination ring mapping if enabled */ 77 78 /* TODO: Error destination ring setting is left to default. 79 * Default setting is to send all errors to release ring. 80 */ 81 82 /* Set the reo descriptor swap bits in case of BIG endian platform */ 83 hal_setup_reo_swap(soc); 84 85 HAL_REG_WRITE(soc, 86 HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(REO_REG_REG_BASE), 87 HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000); 88 89 HAL_REG_WRITE(soc, 90 HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(REO_REG_REG_BASE), 91 (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000)); 92 93 HAL_REG_WRITE(soc, 94 HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(REO_REG_REG_BASE), 95 (HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_MS * 1000)); 96 97 HAL_REG_WRITE(soc, 98 HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(REO_REG_REG_BASE), 99 (HAL_DEFAULT_VO_REO_TIMEOUT_MS * 1000)); 100 101 /* 102 * When hash based routing is enabled, routing of the rx packet 103 * is done based on the following value: 1 _ _ _ _ The last 4 104 * bits are based on hash[3:0]. This means the possible values 105 * are 0x10 to 0x1f. This value is used to look-up the 106 * ring ID configured in Destination_Ring_Ctrl_IX_* register. 107 * The Destination_Ring_Ctrl_IX_2 and Destination_Ring_Ctrl_IX_3 108 * registers need to be configured to set-up the 16 entries to 109 * map the hash values to a ring number. There are 3 bits per 110 * hash entry which are mapped as follows: 111 * 0: TCL, 1:SW1, 2:SW2, * 3:SW3, 4:SW4, 5:Release, 6:FW(WIFI), 112 * 7: NOT_USED. 113 */ 114 if (reo_params->rx_hash_enabled) { 115 HAL_REG_WRITE(soc, 116 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 117 REO_REG_REG_BASE), 118 reo_params->remap1); 119 120 hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x", 121 HAL_REG_READ(soc, 122 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR( 123 REO_REG_REG_BASE))); 124 125 HAL_REG_WRITE(soc, 126 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 127 REO_REG_REG_BASE), 128 reo_params->remap2); 129 130 hal_debug("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x", 131 HAL_REG_READ(soc, 132 HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR( 133 REO_REG_REG_BASE))); 134 } 135 136 /* TODO: Check if the following registers shoould be setup by host: 137 * AGING_CONTROL 138 * HIGH_MEMORY_THRESHOLD 139 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2] 140 * GLOBAL_LINK_DESC_COUNT_CTRL 141 */ 142 } 143 144 void hal_set_link_desc_addr_be(void *desc, uint32_t cookie, 145 qdf_dma_addr_t link_desc_paddr) 146 { 147 uint32_t *buf_addr = (uint32_t *)desc; 148 149 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_31_0, 150 link_desc_paddr & 0xffffffff); 151 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, BUFFER_ADDR_39_32, 152 (uint64_t)link_desc_paddr >> 32); 153 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, RETURN_BUFFER_MANAGER, 154 WBM_IDLE_DESC_LIST); 155 HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO, SW_BUFFER_COOKIE, 156 cookie); 157 } 158 159 static uint32_t hal_get_reo_qdesc_size_be(uint32_t ba_window_size, int tid) 160 { 161 /* Return descriptor size corresponding to window size of 2 since 162 * we set ba_window_size to 2 while setting up REO descriptors as 163 * a WAR to get 2k jump exception aggregates are received without 164 * a BA session. 165 */ 166 if (ba_window_size <= 1) { 167 if (tid != HAL_NON_QOS_TID) 168 return sizeof(struct rx_reo_queue) + 169 sizeof(struct rx_reo_queue_ext); 170 else 171 return sizeof(struct rx_reo_queue); 172 } 173 174 if (ba_window_size <= 105) 175 return sizeof(struct rx_reo_queue) + 176 sizeof(struct rx_reo_queue_ext); 177 178 if (ba_window_size <= 210) 179 return sizeof(struct rx_reo_queue) + 180 (2 * sizeof(struct rx_reo_queue_ext)); 181 182 return sizeof(struct rx_reo_queue) + 183 (3 * sizeof(struct rx_reo_queue_ext)); 184 } 185 186 void *hal_rx_msdu_ext_desc_info_get_ptr_be(void *msdu_details_ptr) 187 { 188 return HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr); 189 } 190 191 #ifdef QCA_WIFI_WCN7850 192 static inline uint32_t 193 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir) 194 { 195 uint32_t buf_src; 196 197 buf_src = HAL_WBM2SW_RELEASE_SRC_GET(hal_desc); 198 switch (buf_src) { 199 case HAL_BE_RX_WBM_ERR_SRC_RXDMA: 200 return HAL_RX_WBM_ERR_SRC_RXDMA; 201 case HAL_BE_RX_WBM_ERR_SRC_REO: 202 return HAL_RX_WBM_ERR_SRC_REO; 203 case HAL_BE_RX_WBM_ERR_SRC_FW_RX: 204 if (dir != HAL_BE_WBM_RELEASE_DIR_RX) 205 qdf_assert_always(0); 206 return HAL_RX_WBM_ERR_SRC_FW; 207 case HAL_BE_RX_WBM_ERR_SRC_SW_RX: 208 if (dir != HAL_BE_WBM_RELEASE_DIR_RX) 209 qdf_assert_always(0); 210 return HAL_RX_WBM_ERR_SRC_SW; 211 case HAL_BE_RX_WBM_ERR_SRC_TQM: 212 return HAL_RX_WBM_ERR_SRC_TQM; 213 case HAL_BE_RX_WBM_ERR_SRC_FW_TX: 214 if (dir != HAL_BE_WBM_RELEASE_DIR_TX) 215 qdf_assert_always(0); 216 return HAL_RX_WBM_ERR_SRC_FW; 217 case HAL_BE_RX_WBM_ERR_SRC_SW_TX: 218 if (dir != HAL_BE_WBM_RELEASE_DIR_TX) 219 qdf_assert_always(0); 220 return HAL_RX_WBM_ERR_SRC_SW; 221 default: 222 qdf_assert_always(0); 223 } 224 225 return buf_src; 226 } 227 #else 228 static inline uint32_t 229 hal_wbm2sw_release_source_get(void *hal_desc, enum hal_be_wbm_release_dir dir) 230 { 231 return HAL_WBM2SW_RELEASE_SRC_GET(hal_desc); 232 } 233 #endif 234 235 uint32_t hal_tx_comp_get_buffer_source_generic_be(void *hal_desc) 236 { 237 return hal_wbm2sw_release_source_get(hal_desc, 238 HAL_BE_WBM_RELEASE_DIR_TX); 239 } 240 241 /** 242 * hal_tx_comp_get_release_reason_generic_be() - TQM Release reason 243 * @hal_desc: completion ring descriptor pointer 244 * 245 * This function will return the type of pointer - buffer or descriptor 246 * 247 * Return: buffer type 248 */ 249 static uint8_t hal_tx_comp_get_release_reason_generic_be(void *hal_desc) 250 { 251 uint32_t comp_desc = *(uint32_t *)(((uint8_t *)hal_desc) + 252 WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_OFFSET); 253 254 return (comp_desc & 255 WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_MASK) >> 256 WBM2SW_COMPLETION_RING_TX_TQM_RELEASE_REASON_LSB; 257 } 258 259 /** 260 * hal_get_wbm_internal_error_generic_be() - is WBM internal error 261 * @hal_desc: completion ring descriptor pointer 262 * 263 * This function will return 0 or 1 - is it WBM internal error or not 264 * 265 * Return: uint8_t 266 */ 267 static uint8_t hal_get_wbm_internal_error_generic_be(void *hal_desc) 268 { 269 /* 270 * TODO - This func is called by tx comp and wbm error handler 271 * Check if one needs to use WBM2SW-TX and other WBM2SW-RX 272 */ 273 uint32_t comp_desc = 274 *(uint32_t *)(((uint8_t *)hal_desc) + 275 HAL_WBM_INTERNAL_ERROR_OFFSET); 276 277 return (comp_desc & HAL_WBM_INTERNAL_ERROR_MASK) >> 278 HAL_WBM_INTERNAL_ERROR_LSB; 279 } 280 281 /** 282 * hal_setup_link_idle_list_generic_be - Setup scattered idle list using the 283 * buffer list provided 284 * 285 * @hal_soc: Opaque HAL SOC handle 286 * @scatter_bufs_base_paddr: Array of physical base addresses 287 * @scatter_bufs_base_vaddr: Array of virtual base addresses 288 * @num_scatter_bufs: Number of scatter buffers in the above lists 289 * @scatter_buf_size: Size of each scatter buffer 290 * @last_buf_end_offset: Offset to the last entry 291 * @num_entries: Total entries of all scatter bufs 292 * 293 * Return: None 294 */ 295 static void 296 hal_setup_link_idle_list_generic_be(struct hal_soc *soc, 297 qdf_dma_addr_t scatter_bufs_base_paddr[], 298 void *scatter_bufs_base_vaddr[], 299 uint32_t num_scatter_bufs, 300 uint32_t scatter_buf_size, 301 uint32_t last_buf_end_offset, 302 uint32_t num_entries) 303 { 304 int i; 305 uint32_t *prev_buf_link_ptr = NULL; 306 uint32_t reg_scatter_buf_size, reg_tot_scatter_buf_size; 307 uint32_t val; 308 309 /* Link the scatter buffers */ 310 for (i = 0; i < num_scatter_bufs; i++) { 311 if (i > 0) { 312 prev_buf_link_ptr[0] = 313 scatter_bufs_base_paddr[i] & 0xffffffff; 314 prev_buf_link_ptr[1] = HAL_SM( 315 HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, 316 BASE_ADDRESS_39_32, 317 ((uint64_t)(scatter_bufs_base_paddr[i]) 318 >> 32)) | HAL_SM( 319 HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, 320 ADDRESS_MATCH_TAG, 321 ADDRESS_MATCH_TAG_VAL); 322 } 323 prev_buf_link_ptr = (uint32_t *)(scatter_bufs_base_vaddr[i] + 324 scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE); 325 } 326 327 /* TBD: Register programming partly based on MLD & the rest based on 328 * inputs from HW team. Not complete yet. 329 */ 330 331 reg_scatter_buf_size = (scatter_buf_size - 332 WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) / 64; 333 reg_tot_scatter_buf_size = ((scatter_buf_size - 334 WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) * num_scatter_bufs) / 64; 335 336 HAL_REG_WRITE(soc, 337 HWIO_WBM_R0_IDLE_LIST_CONTROL_ADDR( 338 WBM_REG_REG_BASE), 339 HAL_SM(HWIO_WBM_R0_IDLE_LIST_CONTROL, SCATTER_BUFFER_SIZE, 340 reg_scatter_buf_size) | 341 HAL_SM(HWIO_WBM_R0_IDLE_LIST_CONTROL, LINK_DESC_IDLE_LIST_MODE, 342 0x1)); 343 344 HAL_REG_WRITE(soc, 345 HWIO_WBM_R0_IDLE_LIST_SIZE_ADDR( 346 WBM_REG_REG_BASE), 347 HAL_SM(HWIO_WBM_R0_IDLE_LIST_SIZE, 348 SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, 349 reg_tot_scatter_buf_size)); 350 351 HAL_REG_WRITE(soc, 352 HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_LSB_ADDR( 353 WBM_REG_REG_BASE), 354 scatter_bufs_base_paddr[0] & 0xffffffff); 355 356 HAL_REG_WRITE(soc, 357 HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR( 358 WBM_REG_REG_BASE), 359 ((uint64_t)(scatter_bufs_base_paddr[0]) >> 32) & 360 HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_BASE_ADDRESS_39_32_BMSK); 361 362 HAL_REG_WRITE(soc, 363 HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR( 364 WBM_REG_REG_BASE), 365 HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, 366 BASE_ADDRESS_39_32, ((uint64_t)(scatter_bufs_base_paddr[0]) 367 >> 32)) | 368 HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB, 369 ADDRESS_MATCH_TAG, ADDRESS_MATCH_TAG_VAL)); 370 371 /* ADDRESS_MATCH_TAG field in the above register is expected to match 372 * with the upper bits of link pointer. The above write sets this field 373 * to zero and we are also setting the upper bits of link pointers to 374 * zero while setting up the link list of scatter buffers above 375 */ 376 377 /* Setup head and tail pointers for the idle list */ 378 HAL_REG_WRITE(soc, 379 HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR( 380 WBM_REG_REG_BASE), 381 scatter_bufs_base_paddr[num_scatter_bufs - 1] & 0xffffffff); 382 HAL_REG_WRITE(soc, 383 HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1_ADDR( 384 WBM_REG_REG_BASE), 385 HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1, 386 BUFFER_ADDRESS_39_32, 387 ((uint64_t)(scatter_bufs_base_paddr[num_scatter_bufs - 1]) 388 >> 32)) | 389 HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1, 390 HEAD_POINTER_OFFSET, last_buf_end_offset >> 2)); 391 392 HAL_REG_WRITE(soc, 393 HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR( 394 WBM_REG_REG_BASE), 395 scatter_bufs_base_paddr[0] & 0xffffffff); 396 397 HAL_REG_WRITE(soc, 398 HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX0_ADDR( 399 WBM_REG_REG_BASE), 400 scatter_bufs_base_paddr[0] & 0xffffffff); 401 HAL_REG_WRITE(soc, 402 HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1_ADDR( 403 WBM_REG_REG_BASE), 404 HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1, 405 BUFFER_ADDRESS_39_32, 406 ((uint64_t)(scatter_bufs_base_paddr[0]) >> 407 32)) | HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1, 408 TAIL_POINTER_OFFSET, 0)); 409 410 HAL_REG_WRITE(soc, 411 HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HP_ADDR( 412 WBM_REG_REG_BASE), 413 2 * num_entries); 414 415 /* Set RING_ID_DISABLE */ 416 val = HAL_SM(HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC, RING_ID_DISABLE, 1); 417 418 /* 419 * SRNG_ENABLE bit is not available in HWK v1 (QCA8074v1). Hence 420 * check the presence of the bit before toggling it. 421 */ 422 #ifdef HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE_BMSK 423 val |= HAL_SM(HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC, SRNG_ENABLE, 1); 424 #endif 425 HAL_REG_WRITE(soc, 426 HWIO_WBM_R0_WBM_IDLE_LINK_RING_MISC_ADDR(WBM_REG_REG_BASE), 427 val); 428 } 429 430 /** 431 * hal_rx_wbm_err_src_get_be() - Get WBM error source from descriptor 432 * @ring_desc: ring descriptor 433 * 434 * Return: wbm error source 435 */ 436 static uint32_t hal_rx_wbm_err_src_get_be(hal_ring_desc_t ring_desc) 437 { 438 return hal_wbm2sw_release_source_get(ring_desc, 439 HAL_BE_WBM_RELEASE_DIR_RX); 440 } 441 442 /** 443 * hal_rx_ret_buf_manager_get_be() - Get return buffer manager from ring desc 444 * @ring_desc: ring descriptor 445 * 446 * Return: rbm 447 */ 448 uint8_t hal_rx_ret_buf_manager_get_be(hal_ring_desc_t ring_desc) 449 { 450 /* 451 * The following macro takes buf_addr_info as argument, 452 * but since buf_addr_info is the first field in ring_desc 453 * Hence the following call is OK 454 */ 455 return HAL_RX_BUF_RBM_GET(ring_desc); 456 } 457 458 #define HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \ 459 (WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_OFFSET >> 2))) & \ 460 WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_MASK) >> \ 461 WBM2SW_COMPLETION_RING_RX_REO_PUSH_REASON_LSB) 462 463 #define HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc) (((*(((uint32_t *)wbm_desc) + \ 464 (WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_OFFSET >> 2))) & \ 465 WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_MASK) >> \ 466 WBM2SW_COMPLETION_RING_RX_REO_ERROR_CODE_LSB) 467 468 #define HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc) \ 469 (((*(((uint32_t *)wbm_desc) + \ 470 (WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_OFFSET >> 2))) & \ 471 WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_MASK) >> \ 472 WBM2SW_COMPLETION_RING_RX_RXDMA_PUSH_REASON_LSB) 473 474 #define HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc) \ 475 (((*(((uint32_t *)wbm_desc) + \ 476 (WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_OFFSET >> 2))) & \ 477 WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_MASK) >> \ 478 WBM2SW_COMPLETION_RING_RX_RXDMA_ERROR_CODE_LSB) 479 480 /** 481 * hal_rx_wbm_err_info_get_generic_be(): Retrieves WBM error code and reason and 482 * save it to hal_wbm_err_desc_info structure passed by caller 483 * @wbm_desc: wbm ring descriptor 484 * @wbm_er_info1: hal_wbm_err_desc_info structure, output parameter. 485 * Return: void 486 */ 487 void hal_rx_wbm_err_info_get_generic_be(void *wbm_desc, void *wbm_er_info1) 488 { 489 struct hal_wbm_err_desc_info *wbm_er_info = 490 (struct hal_wbm_err_desc_info *)wbm_er_info1; 491 492 wbm_er_info->wbm_err_src = hal_rx_wbm_err_src_get_be(wbm_desc); 493 wbm_er_info->reo_psh_rsn = HAL_RX_WBM_REO_PUSH_REASON_GET(wbm_desc); 494 wbm_er_info->reo_err_code = HAL_RX_WBM_REO_ERROR_CODE_GET(wbm_desc); 495 wbm_er_info->rxdma_psh_rsn = HAL_RX_WBM_RXDMA_PUSH_REASON_GET(wbm_desc); 496 wbm_er_info->rxdma_err_code = HAL_RX_WBM_RXDMA_ERROR_CODE_GET(wbm_desc); 497 } 498 499 static void hal_rx_reo_buf_paddr_get_be(hal_ring_desc_t rx_desc, 500 struct hal_buf_info *buf_info) 501 { 502 struct reo_destination_ring *reo_ring = 503 (struct reo_destination_ring *)rx_desc; 504 505 buf_info->paddr = 506 (HAL_RX_REO_BUFFER_ADDR_31_0_GET(reo_ring) | 507 ((uint64_t)(HAL_RX_REO_BUFFER_ADDR_39_32_GET(reo_ring)) << 32)); 508 buf_info->sw_cookie = HAL_RX_REO_BUF_COOKIE_GET(reo_ring); 509 } 510 511 static void hal_rx_msdu_link_desc_set_be(hal_soc_handle_t hal_soc_hdl, 512 void *src_srng_desc, 513 hal_buff_addrinfo_t buf_addr_info, 514 uint8_t bm_action) 515 { 516 /* 517 * The offsets for fields used in this function are same in 518 * wbm_release_ring for Lithium and wbm_release_ring_tx 519 * for Beryllium. hence we can use wbm_release_ring directly. 520 */ 521 struct wbm_release_ring *wbm_rel_srng = 522 (struct wbm_release_ring *)src_srng_desc; 523 uint32_t addr_31_0; 524 uint8_t addr_39_32; 525 526 /* Structure copy !!! */ 527 wbm_rel_srng->released_buff_or_desc_addr_info = 528 *((struct buffer_addr_info *)buf_addr_info); 529 530 addr_31_0 = 531 wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_31_0; 532 addr_39_32 = 533 wbm_rel_srng->released_buff_or_desc_addr_info.buffer_addr_39_32; 534 535 HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, 536 RELEASE_SOURCE_MODULE, HAL_RX_WBM_ERR_SRC_SW); 537 HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, BM_ACTION, 538 bm_action); 539 HAL_DESC_SET_FIELD(src_srng_desc, HAL_SW2WBM_RELEASE_RING, 540 BUFFER_OR_DESC_TYPE, 541 HAL_RX_WBM_BUF_TYPE_MSDU_LINK_DESC); 542 543 /* WBM error is indicated when any of the link descriptors given to 544 * WBM has a NULL address, and one those paths is the link descriptors 545 * released from host after processing RXDMA errors, 546 * or from Rx defrag path, and we want to add an assert here to ensure 547 * host is not releasing descriptors with NULL address. 548 */ 549 550 if (qdf_unlikely(!addr_31_0 && !addr_39_32)) { 551 hal_dump_wbm_rel_desc(src_srng_desc); 552 qdf_assert_always(0); 553 } 554 } 555 556 /** 557 * hal_rx_reo_ent_buf_paddr_get_be: Gets the physical address and 558 * cookie from the REO entrance ring element 559 * 560 * @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to 561 * the current descriptor 562 * @ buf_info: structure to return the buffer information 563 * @ msdu_cnt: pointer to msdu count in MPDU 564 * Return: void 565 */ 566 static 567 void hal_rx_buf_cookie_rbm_get_be(uint32_t *buf_addr_info_hdl, 568 hal_buf_info_t buf_info_hdl) 569 { 570 struct hal_buf_info *buf_info = 571 (struct hal_buf_info *)buf_info_hdl; 572 struct buffer_addr_info *buf_addr_info = 573 (struct buffer_addr_info *)buf_addr_info_hdl; 574 575 buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info); 576 /* 577 * buffer addr info is the first member of ring desc, so the typecast 578 * can be done. 579 */ 580 buf_info->rbm = hal_rx_ret_buf_manager_get_be( 581 (hal_ring_desc_t)buf_addr_info); 582 } 583 584 /* 585 * hal_rxdma_buff_addr_info_set_be() - set the buffer_addr_info of the 586 * rxdma ring entry. 587 * @rxdma_entry: descriptor entry 588 * @paddr: physical address of nbuf data pointer. 589 * @cookie: SW cookie used as a index to SW rx desc. 590 * @manager: who owns the nbuf (host, NSS, etc...). 591 * 592 */ 593 static inline void 594 hal_rxdma_buff_addr_info_set_be(void *rxdma_entry, 595 qdf_dma_addr_t paddr, uint32_t cookie, 596 uint8_t manager) 597 { 598 uint32_t paddr_lo = ((u64)paddr & 0x00000000ffffffff); 599 uint32_t paddr_hi = ((u64)paddr & 0xffffffff00000000) >> 32; 600 601 HAL_RXDMA_PADDR_LO_SET(rxdma_entry, paddr_lo); 602 HAL_RXDMA_PADDR_HI_SET(rxdma_entry, paddr_hi); 603 HAL_RXDMA_COOKIE_SET(rxdma_entry, cookie); 604 HAL_RXDMA_MANAGER_SET(rxdma_entry, manager); 605 } 606 607 /** 608 * hal_rx_get_reo_error_code_be() - Get REO error code from ring desc 609 * @rx_desc: rx descriptor 610 * 611 * Return: REO error code 612 */ 613 static uint32_t hal_rx_get_reo_error_code_be(hal_ring_desc_t rx_desc) 614 { 615 struct reo_destination_ring *reo_desc = 616 (struct reo_destination_ring *)rx_desc; 617 618 return HAL_RX_REO_ERROR_GET(reo_desc); 619 } 620 621 /** 622 * hal_gen_reo_remap_val_generic_be() - Generate the reo map value 623 * @ix0_map: mapping values for reo 624 * 625 * Return: IX0 reo remap register value to be written 626 */ 627 static uint32_t 628 hal_gen_reo_remap_val_generic_be(enum hal_reo_remap_reg remap_reg, 629 uint8_t *ix0_map) 630 { 631 uint32_t ix_val = 0; 632 633 switch (remap_reg) { 634 case HAL_REO_REMAP_REG_IX0: 635 ix_val = HAL_REO_REMAP_IX0(ix0_map[0], 0) | 636 HAL_REO_REMAP_IX0(ix0_map[1], 1) | 637 HAL_REO_REMAP_IX0(ix0_map[2], 2) | 638 HAL_REO_REMAP_IX0(ix0_map[3], 3) | 639 HAL_REO_REMAP_IX0(ix0_map[4], 4) | 640 HAL_REO_REMAP_IX0(ix0_map[5], 5) | 641 HAL_REO_REMAP_IX0(ix0_map[6], 6) | 642 HAL_REO_REMAP_IX0(ix0_map[7], 7); 643 break; 644 case HAL_REO_REMAP_REG_IX2: 645 ix_val = HAL_REO_REMAP_IX2(ix0_map[0], 16) | 646 HAL_REO_REMAP_IX2(ix0_map[1], 17) | 647 HAL_REO_REMAP_IX2(ix0_map[2], 18) | 648 HAL_REO_REMAP_IX2(ix0_map[3], 19) | 649 HAL_REO_REMAP_IX2(ix0_map[4], 20) | 650 HAL_REO_REMAP_IX2(ix0_map[5], 21) | 651 HAL_REO_REMAP_IX2(ix0_map[6], 22) | 652 HAL_REO_REMAP_IX2(ix0_map[7], 23); 653 break; 654 default: 655 break; 656 } 657 658 return ix_val; 659 } 660 661 static uint8_t hal_rx_err_status_get_be(hal_ring_desc_t rx_desc) 662 { 663 return HAL_RX_ERROR_STATUS_GET(rx_desc); 664 } 665 666 static QDF_STATUS hal_reo_status_update_be(hal_soc_handle_t hal_soc_hdl, 667 hal_ring_desc_t reo_desc, 668 void *st_handle, 669 uint32_t tlv, int *num_ref) 670 { 671 union hal_reo_status *reo_status_ref; 672 673 reo_status_ref = (union hal_reo_status *)st_handle; 674 675 switch (tlv) { 676 case HAL_REO_QUEUE_STATS_STATUS_TLV: 677 hal_reo_queue_stats_status_be(reo_desc, 678 &reo_status_ref->queue_status, 679 hal_soc_hdl); 680 *num_ref = reo_status_ref->queue_status.header.cmd_num; 681 break; 682 case HAL_REO_FLUSH_QUEUE_STATUS_TLV: 683 hal_reo_flush_queue_status_be(reo_desc, 684 &reo_status_ref->fl_queue_status, 685 hal_soc_hdl); 686 *num_ref = reo_status_ref->fl_queue_status.header.cmd_num; 687 break; 688 case HAL_REO_FLUSH_CACHE_STATUS_TLV: 689 hal_reo_flush_cache_status_be(reo_desc, 690 &reo_status_ref->fl_cache_status, 691 hal_soc_hdl); 692 *num_ref = reo_status_ref->fl_cache_status.header.cmd_num; 693 break; 694 case HAL_REO_UNBLK_CACHE_STATUS_TLV: 695 hal_reo_unblock_cache_status_be 696 (reo_desc, hal_soc_hdl, 697 &reo_status_ref->unblk_cache_status); 698 *num_ref = reo_status_ref->unblk_cache_status.header.cmd_num; 699 break; 700 case HAL_REO_TIMOUT_LIST_STATUS_TLV: 701 hal_reo_flush_timeout_list_status_be( 702 reo_desc, 703 &reo_status_ref->fl_timeout_status, 704 hal_soc_hdl); 705 *num_ref = reo_status_ref->fl_timeout_status.header.cmd_num; 706 break; 707 case HAL_REO_DESC_THRES_STATUS_TLV: 708 hal_reo_desc_thres_reached_status_be( 709 reo_desc, 710 &reo_status_ref->thres_status, 711 hal_soc_hdl); 712 *num_ref = reo_status_ref->thres_status.header.cmd_num; 713 break; 714 case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV: 715 hal_reo_rx_update_queue_status_be( 716 reo_desc, 717 &reo_status_ref->rx_queue_status, 718 hal_soc_hdl); 719 *num_ref = reo_status_ref->rx_queue_status.header.cmd_num; 720 break; 721 default: 722 QDF_TRACE(QDF_MODULE_ID_DP_REO, QDF_TRACE_LEVEL_WARN, 723 "hal_soc %pK: no handler for TLV:%d", 724 hal_soc_hdl, tlv); 725 return QDF_STATUS_E_FAILURE; 726 } /* switch */ 727 728 return QDF_STATUS_SUCCESS; 729 } 730 731 static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc) 732 { 733 return HAL_RX_REO_BUF_TYPE_GET(rx_desc); 734 } 735 736 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 737 #define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000 738 #endif 739 void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl, 740 struct hal_hw_cc_config *cc_cfg) 741 { 742 uint32_t reg_addr, reg_val = 0; 743 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; 744 745 /* REO CFG */ 746 reg_addr = HWIO_REO_R0_SW_COOKIE_CFG0_ADDR(REO_REG_REG_BASE); 747 reg_val = cc_cfg->lut_base_addr_31_0; 748 HAL_REG_WRITE(soc, reg_addr, reg_val); 749 750 reg_addr = HWIO_REO_R0_SW_COOKIE_CFG1_ADDR(REO_REG_REG_BASE); 751 reg_val = 0; 752 reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1, 753 SW_COOKIE_CONVERT_GLOBAL_ENABLE, 754 cc_cfg->cc_global_en); 755 reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1, 756 SW_COOKIE_CONVERT_ENABLE, 757 cc_cfg->cc_global_en); 758 reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1, 759 PAGE_ALIGNMENT, 760 cc_cfg->page_4k_align); 761 reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1, 762 COOKIE_OFFSET_MSB, 763 cc_cfg->cookie_offset_msb); 764 reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1, 765 COOKIE_PAGE_MSB, 766 cc_cfg->cookie_page_msb); 767 reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1, 768 CMEM_LUT_BASE_ADDR_39_32, 769 cc_cfg->lut_base_addr_39_32); 770 HAL_REG_WRITE(soc, reg_addr, reg_val); 771 772 /* WBM CFG */ 773 reg_addr = HWIO_WBM_R0_SW_COOKIE_CFG0_ADDR(WBM_REG_REG_BASE); 774 reg_val = cc_cfg->lut_base_addr_31_0; 775 HAL_REG_WRITE(soc, reg_addr, reg_val); 776 777 reg_addr = HWIO_WBM_R0_SW_COOKIE_CFG1_ADDR(WBM_REG_REG_BASE); 778 reg_val = 0; 779 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1, 780 PAGE_ALIGNMENT, 781 cc_cfg->page_4k_align); 782 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1, 783 COOKIE_OFFSET_MSB, 784 cc_cfg->cookie_offset_msb); 785 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1, 786 COOKIE_PAGE_MSB, 787 cc_cfg->cookie_page_msb); 788 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1, 789 CMEM_LUT_BASE_ADDR_39_32, 790 cc_cfg->lut_base_addr_39_32); 791 HAL_REG_WRITE(soc, reg_addr, reg_val); 792 793 /* 794 * WCSS_UMAC_WBM_R0_SW_COOKIE_CONVERT_CFG default value is 0x1FE, 795 */ 796 reg_addr = HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG_ADDR(WBM_REG_REG_BASE); 797 reg_val = 0; 798 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 799 WBM_COOKIE_CONV_GLOBAL_ENABLE, 800 cc_cfg->cc_global_en); 801 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 802 WBM2SW6_COOKIE_CONVERSION_EN, 803 cc_cfg->wbm2sw6_cc_en); 804 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 805 WBM2SW5_COOKIE_CONVERSION_EN, 806 cc_cfg->wbm2sw5_cc_en); 807 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 808 WBM2SW4_COOKIE_CONVERSION_EN, 809 cc_cfg->wbm2sw4_cc_en); 810 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 811 WBM2SW3_COOKIE_CONVERSION_EN, 812 cc_cfg->wbm2sw3_cc_en); 813 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 814 WBM2SW2_COOKIE_CONVERSION_EN, 815 cc_cfg->wbm2sw2_cc_en); 816 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 817 WBM2SW1_COOKIE_CONVERSION_EN, 818 cc_cfg->wbm2sw1_cc_en); 819 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 820 WBM2SW0_COOKIE_CONVERSION_EN, 821 cc_cfg->wbm2sw0_cc_en); 822 reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG, 823 WBM2FW_COOKIE_CONVERSION_EN, 824 cc_cfg->wbm2fw_cc_en); 825 HAL_REG_WRITE(soc, reg_addr, reg_val); 826 827 #ifdef HWIO_WBM_R0_WBM_CFG_2_COOKIE_DEBUG_SEL_BMSK 828 reg_addr = HWIO_WBM_R0_WBM_CFG_2_ADDR(WBM_REG_REG_BASE); 829 reg_val = 0; 830 reg_val |= HAL_SM(HWIO_WBM_R0_WBM_CFG_2, 831 COOKIE_DEBUG_SEL, 832 cc_cfg->cc_global_en); 833 834 reg_val |= HAL_SM(HWIO_WBM_R0_WBM_CFG_2, 835 COOKIE_CONV_INDICATION_EN, 836 cc_cfg->cc_global_en); 837 838 reg_val |= HAL_SM(HWIO_WBM_R0_WBM_CFG_2, 839 ERROR_PATH_COOKIE_CONV_EN, 840 cc_cfg->error_path_cookie_conv_en); 841 842 reg_val |= HAL_SM(HWIO_WBM_R0_WBM_CFG_2, 843 RELEASE_PATH_COOKIE_CONV_EN, 844 cc_cfg->release_path_cookie_conv_en); 845 846 HAL_REG_WRITE(soc, reg_addr, reg_val); 847 #endif 848 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION 849 /* 850 * To enable indication for HW cookie conversion done or not for 851 * WBM, WCSS_UMAC_WBM_R0_MISC_CONTROL spare_control field 15th 852 * bit spare_control[15] should be set. 853 */ 854 reg_addr = HWIO_WBM_R0_MISC_CONTROL_ADDR(WBM_REG_REG_BASE); 855 reg_val = HAL_REG_READ(soc, reg_addr); 856 reg_val |= HAL_SM(HWIO_WCSS_UMAC_WBM_R0_MISC_CONTROL, 857 SPARE_CONTROL, 858 HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15); 859 HAL_REG_WRITE(soc, reg_addr, reg_val); 860 #endif 861 } 862 qdf_export_symbol(hal_cookie_conversion_reg_cfg_be); 863 864 /** 865 * hal_rx_msdu_reo_dst_ind_get: Gets the REO 866 * destination ring ID from the msdu desc info 867 * 868 * @msdu_link_desc : Opaque cookie pointer used by HAL to get to 869 * the current descriptor 870 * 871 * Return: dst_ind (REO destination ring ID) 872 */ 873 static inline 874 uint32_t hal_rx_msdu_reo_dst_ind_get_be(hal_soc_handle_t hal_soc_hdl, 875 void *msdu_link_desc) 876 { 877 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; 878 struct rx_msdu_details *msdu_details; 879 struct rx_msdu_desc_info *msdu_desc_info; 880 struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc; 881 uint32_t dst_ind; 882 883 msdu_details = hal_rx_link_desc_msdu0_ptr(msdu_link, hal_soc); 884 885 /* The first msdu in the link should exsist */ 886 msdu_desc_info = hal_rx_msdu_ext_desc_info_get_ptr(&msdu_details[0], 887 hal_soc); 888 dst_ind = HAL_RX_MSDU_REO_DST_IND_GET(msdu_desc_info); 889 return dst_ind; 890 } 891 892 /** 893 * hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for 894 * beryllium chipsets. 895 * @hal_soc_hdl: HAL soc handle 896 * 897 * Return: None 898 */ 899 void hal_hw_txrx_default_ops_attach_be(struct hal_soc *hal_soc) 900 { 901 hal_soc->ops->hal_get_reo_qdesc_size = hal_get_reo_qdesc_size_be; 902 hal_soc->ops->hal_set_link_desc_addr = hal_set_link_desc_addr_be; 903 hal_soc->ops->hal_tx_init_data_ring = hal_tx_init_data_ring_be; 904 hal_soc->ops->hal_get_ba_aging_timeout = hal_get_ba_aging_timeout_be; 905 hal_soc->ops->hal_set_ba_aging_timeout = hal_set_ba_aging_timeout_be; 906 hal_soc->ops->hal_get_reo_reg_base_offset = 907 hal_get_reo_reg_base_offset_be; 908 hal_soc->ops->hal_setup_link_idle_list = 909 hal_setup_link_idle_list_generic_be; 910 hal_soc->ops->hal_reo_setup = hal_reo_setup_generic_be; 911 912 hal_soc->ops->hal_rx_reo_buf_paddr_get = hal_rx_reo_buf_paddr_get_be; 913 hal_soc->ops->hal_rx_msdu_link_desc_set = hal_rx_msdu_link_desc_set_be; 914 hal_soc->ops->hal_rx_buf_cookie_rbm_get = hal_rx_buf_cookie_rbm_get_be; 915 916 hal_soc->ops->hal_rx_ret_buf_manager_get = 917 hal_rx_ret_buf_manager_get_be; 918 hal_soc->ops->hal_rxdma_buff_addr_info_set = 919 hal_rxdma_buff_addr_info_set_be; 920 hal_soc->ops->hal_rx_msdu_flags_get = hal_rx_msdu_flags_get_be; 921 hal_soc->ops->hal_rx_get_reo_error_code = hal_rx_get_reo_error_code_be; 922 hal_soc->ops->hal_gen_reo_remap_val = 923 hal_gen_reo_remap_val_generic_be; 924 hal_soc->ops->hal_tx_comp_get_buffer_source = 925 hal_tx_comp_get_buffer_source_generic_be; 926 hal_soc->ops->hal_tx_comp_get_release_reason = 927 hal_tx_comp_get_release_reason_generic_be; 928 hal_soc->ops->hal_get_wbm_internal_error = 929 hal_get_wbm_internal_error_generic_be; 930 hal_soc->ops->hal_rx_mpdu_desc_info_get = 931 hal_rx_mpdu_desc_info_get_be; 932 hal_soc->ops->hal_rx_err_status_get = hal_rx_err_status_get_be; 933 hal_soc->ops->hal_rx_reo_buf_type_get = hal_rx_reo_buf_type_get_be; 934 hal_soc->ops->hal_rx_wbm_err_src_get = hal_rx_wbm_err_src_get_be; 935 936 hal_soc->ops->hal_reo_send_cmd = hal_reo_send_cmd_be; 937 hal_soc->ops->hal_reo_qdesc_setup = hal_reo_qdesc_setup_be; 938 hal_soc->ops->hal_reo_status_update = hal_reo_status_update_be; 939 hal_soc->ops->hal_get_tlv_hdr_size = hal_get_tlv_hdr_size_be; 940 hal_soc->ops->hal_rx_msdu_reo_dst_ind_get = 941 hal_rx_msdu_reo_dst_ind_get_be; 942 } 943