1 /* 2 * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are 6 * met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above 10 * copyright notice, this list of conditions and the following 11 * disclaimer in the documentation and/or other materials provided 12 * with the distribution. 13 * * Neither the name of The Linux Foundation nor the names of its 14 * contributors may be used to endorse or promote products derived 15 * from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 27 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include "hal_api.h" 31 #include "target_type.h" 32 #include "wcss_version.h" 33 #include "qdf_module.h" 34 35 /** 36 * Common SRNG register access macros: 37 * The SRNG registers are distributed across various UMAC and LMAC HW blocks, 38 * but the register group and format is exactly same for all rings, with some 39 * difference between producer rings (these are 'producer rings' with respect 40 * to HW and referred as 'destination rings' in SW) and consumer rings (these 41 * are 'consumer rings' with respect to HW and referred as 'source rings' in SW). 42 * The following macros provide uniform access to all SRNG rings. 43 */ 44 45 /* SRNG registers are split among two groups R0 and R2 and following 46 * definitions identify the group to which each register belongs to 47 */ 48 #define R0_INDEX 0 49 #define R2_INDEX 1 50 51 #define HWREG_INDEX(_reg_group) _reg_group ## _ ## INDEX 52 53 /* Registers in R0 group */ 54 #define BASE_LSB_GROUP R0 55 #define BASE_MSB_GROUP R0 56 #define ID_GROUP R0 57 #define STATUS_GROUP R0 58 #define MISC_GROUP R0 59 #define HP_ADDR_LSB_GROUP R0 60 #define HP_ADDR_MSB_GROUP R0 61 #define PRODUCER_INT_SETUP_GROUP R0 62 #define PRODUCER_INT_STATUS_GROUP R0 63 #define PRODUCER_FULL_COUNTER_GROUP R0 64 #define MSI1_BASE_LSB_GROUP R0 65 #define MSI1_BASE_MSB_GROUP R0 66 #define MSI1_DATA_GROUP R0 67 #define HP_TP_SW_OFFSET_GROUP R0 68 #define TP_ADDR_LSB_GROUP R0 69 #define TP_ADDR_MSB_GROUP R0 70 #define CONSUMER_INT_SETUP_IX0_GROUP R0 71 #define CONSUMER_INT_SETUP_IX1_GROUP R0 72 #define CONSUMER_INT_STATUS_GROUP R0 73 #define CONSUMER_EMPTY_COUNTER_GROUP R0 74 #define CONSUMER_PREFETCH_TIMER_GROUP R0 75 #define CONSUMER_PREFETCH_STATUS_GROUP R0 76 77 /* Registers in R2 group */ 78 #define HP_GROUP R2 79 #define TP_GROUP R2 80 81 /** 82 * Register definitions for all SRNG based rings are same, except few 83 * differences between source (HW consumer) and destination (HW producer) 84 * registers. Following macros definitions provide generic access to all 85 * SRNG based rings. 86 * For source rings, we will use the register/field definitions of SW2TCL1 87 * ring defined in the HW header file mac_tcl_reg_seq_hwioreg.h. To setup 88 * individual fields, SRNG_SM macros should be used with fields specified 89 * using SRNG_SRC_FLD(<register>, <field>), Register writes should be done 90 * using SRNG_SRC_REG_WRITE(<hal_srng>, <register>, <value>). 91 * Similarly for destination rings we will use definitions of REO2SW1 ring 92 * defined in the register reo_destination_ring.h. To setup individual 93 * fields SRNG_SM macros should be used with fields specified using 94 * SRNG_DST_FLD(<register>, <field>). Register writes should be done using 95 * SRNG_DST_REG_WRITE(<hal_srng>, <register>, <value>). 96 */ 97 98 #define SRNG_DST_REG_OFFSET(_reg, _reg_group) \ 99 HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg##_ADDR(0) 100 101 #define SRNG_SRC_REG_OFFSET(_reg, _reg_group) \ 102 HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg ## _ADDR(0) 103 104 #define _SRNG_DST_FLD(_reg_group, _reg_fld) \ 105 HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg_fld 106 #define _SRNG_SRC_FLD(_reg_group, _reg_fld) \ 107 HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg_fld 108 109 #define _SRNG_FLD(_reg_group, _reg_fld, _dir) \ 110 _SRNG_ ## _dir ## _FLD(_reg_group, _reg_fld) 111 112 #define SRNG_DST_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, DST) 113 #define SRNG_SRC_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, SRC) 114 115 #define SRNG_SRC_R0_START_OFFSET SRNG_SRC_REG_OFFSET(BASE_LSB, R0) 116 #define SRNG_DST_R0_START_OFFSET SRNG_DST_REG_OFFSET(BASE_LSB, R0) 117 118 #define SRNG_SRC_R2_START_OFFSET SRNG_SRC_REG_OFFSET(HP, R2) 119 #define SRNG_DST_R2_START_OFFSET SRNG_DST_REG_OFFSET(HP, R2) 120 121 #define SRNG_SRC_START_OFFSET(_reg_group) \ 122 SRNG_SRC_ ## _reg_group ## _START_OFFSET 123 #define SRNG_DST_START_OFFSET(_reg_group) \ 124 SRNG_DST_ ## _reg_group ## _START_OFFSET 125 126 #define SRNG_REG_ADDR(_srng, _reg, _reg_group, _dir) \ 127 ((_srng)->hwreg_base[HWREG_INDEX(_reg_group)] + \ 128 SRNG_ ## _dir ## _REG_OFFSET(_reg, _reg_group) - \ 129 SRNG_ ## _dir ## _START_OFFSET(_reg_group)) 130 131 #define SRNG_DST_ADDR(_srng, _reg) \ 132 SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, DST) 133 134 #define SRNG_SRC_ADDR(_srng, _reg) \ 135 SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, SRC) 136 137 #define SRNG_REG_WRITE(_srng, _reg, _value, _dir) \ 138 hal_write_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value)) 139 140 #define SRNG_REG_READ(_srng, _reg, _dir) \ 141 hal_read_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg)) 142 143 #define SRNG_SRC_REG_WRITE(_srng, _reg, _value) \ 144 SRNG_REG_WRITE(_srng, _reg, _value, SRC) 145 146 #define SRNG_DST_REG_WRITE(_srng, _reg, _value) \ 147 SRNG_REG_WRITE(_srng, _reg, _value, DST) 148 149 #define SRNG_SRC_REG_READ(_srng, _reg) \ 150 SRNG_REG_READ(_srng, _reg, SRC) 151 152 #define _SRNG_FM(_reg_fld) _reg_fld ## _BMSK 153 #define _SRNG_FS(_reg_fld) _reg_fld ## _SHFT 154 155 #define SRNG_SM(_reg_fld, _val) \ 156 (((_val) << _SRNG_FS(_reg_fld)) & _SRNG_FM(_reg_fld)) 157 158 #define SRNG_MS(_reg_fld, _val) \ 159 (((_val) & _SRNG_FM(_reg_fld)) >> _SRNG_FS(_reg_fld)) 160 161 #define SRNG_MAX_SIZE_DWORDS \ 162 (SRNG_MS(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 0xffffffff)) 163 164 #define HAL_RXDMA_MAX_RING_SIZE 0xFFFF 165 /** 166 * HW ring configuration table to identify hardware ring attributes like 167 * register addresses, number of rings, ring entry size etc., for each type 168 * of SRNG ring. 169 * 170 * Currently there is just one HW ring table, but there could be multiple 171 * configurations in future based on HW variants from the same wifi3.0 family 172 * and hence need to be attached with hal_soc based on HW type 173 */ 174 #define HAL_SRNG_CONFIG(_hal_soc, _ring_type) (&hw_srng_table[_ring_type]) 175 static struct hal_hw_srng_config hw_srng_table[] = { 176 /* TODO: max_rings can populated by querying HW capabilities */ 177 { /* REO_DST */ 178 .start_ring_id = HAL_SRNG_REO2SW1, 179 .max_rings = 4, 180 .entry_size = sizeof(struct reo_destination_ring) >> 2, 181 .lmac_ring = FALSE, 182 .ring_dir = HAL_SRNG_DST_RING, 183 .reg_start = { 184 HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( 185 SEQ_WCSS_UMAC_REO_REG_OFFSET), 186 HWIO_REO_R2_REO2SW1_RING_HP_ADDR( 187 SEQ_WCSS_UMAC_REO_REG_OFFSET) 188 }, 189 .reg_size = { 190 HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - 191 HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), 192 HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - 193 HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), 194 }, 195 .max_size = HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_BMSK >> 196 HWIO_REO_R0_REO2SW1_RING_BASE_MSB_RING_SIZE_SHFT, 197 }, 198 { /* REO_EXCEPTION */ 199 /* Designating REO2TCL ring as exception ring. This ring is 200 * similar to other REO2SW rings though it is named as REO2TCL. 201 * Any of theREO2SW rings can be used as exception ring. 202 */ 203 .start_ring_id = HAL_SRNG_REO2TCL, 204 .max_rings = 1, 205 .entry_size = sizeof(struct reo_destination_ring) >> 2, 206 .lmac_ring = FALSE, 207 .ring_dir = HAL_SRNG_DST_RING, 208 .reg_start = { 209 HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( 210 SEQ_WCSS_UMAC_REO_REG_OFFSET), 211 HWIO_REO_R2_REO2TCL_RING_HP_ADDR( 212 SEQ_WCSS_UMAC_REO_REG_OFFSET) 213 }, 214 /* Single ring - provide ring size if multiple rings of this 215 * type are supported */ 216 .reg_size = {}, 217 .max_size = HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_BMSK >> 218 HWIO_REO_R0_REO2TCL_RING_BASE_MSB_RING_SIZE_SHFT, 219 }, 220 { /* REO_REINJECT */ 221 .start_ring_id = HAL_SRNG_SW2REO, 222 .max_rings = 1, 223 .entry_size = sizeof(struct reo_entrance_ring) >> 2, 224 .lmac_ring = FALSE, 225 .ring_dir = HAL_SRNG_SRC_RING, 226 .reg_start = { 227 HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( 228 SEQ_WCSS_UMAC_REO_REG_OFFSET), 229 HWIO_REO_R2_SW2REO_RING_HP_ADDR( 230 SEQ_WCSS_UMAC_REO_REG_OFFSET) 231 }, 232 /* Single ring - provide ring size if multiple rings of this 233 * type are supported */ 234 .reg_size = {}, 235 .max_size = HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_BMSK >> 236 HWIO_REO_R0_SW2REO_RING_BASE_MSB_RING_SIZE_SHFT, 237 }, 238 { /* REO_CMD */ 239 .start_ring_id = HAL_SRNG_REO_CMD, 240 .max_rings = 1, 241 .entry_size = (sizeof(struct tlv_32_hdr) + 242 sizeof(struct reo_get_queue_stats)) >> 2, 243 .lmac_ring = FALSE, 244 .ring_dir = HAL_SRNG_SRC_RING, 245 .reg_start = { 246 HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( 247 SEQ_WCSS_UMAC_REO_REG_OFFSET), 248 HWIO_REO_R2_REO_CMD_RING_HP_ADDR( 249 SEQ_WCSS_UMAC_REO_REG_OFFSET), 250 }, 251 /* Single ring - provide ring size if multiple rings of this 252 * type are supported */ 253 .reg_size = {}, 254 .max_size = HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> 255 HWIO_REO_R0_REO_CMD_RING_BASE_MSB_RING_SIZE_SHFT, 256 }, 257 { /* REO_STATUS */ 258 .start_ring_id = HAL_SRNG_REO_STATUS, 259 .max_rings = 1, 260 .entry_size = (sizeof(struct tlv_32_hdr) + 261 sizeof(struct reo_get_queue_stats_status)) >> 2, 262 .lmac_ring = FALSE, 263 .ring_dir = HAL_SRNG_DST_RING, 264 .reg_start = { 265 HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( 266 SEQ_WCSS_UMAC_REO_REG_OFFSET), 267 HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( 268 SEQ_WCSS_UMAC_REO_REG_OFFSET), 269 }, 270 /* Single ring - provide ring size if multiple rings of this 271 * type are supported */ 272 .reg_size = {}, 273 .max_size = HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> 274 HWIO_REO_R0_REO_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, 275 }, 276 { /* TCL_DATA */ 277 .start_ring_id = HAL_SRNG_SW2TCL1, 278 .max_rings = 3, 279 .entry_size = (sizeof(struct tlv_32_hdr) + 280 sizeof(struct tcl_data_cmd)) >> 2, 281 .lmac_ring = FALSE, 282 .ring_dir = HAL_SRNG_SRC_RING, 283 .reg_start = { 284 HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( 285 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 286 HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( 287 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 288 }, 289 .reg_size = { 290 HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - 291 HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), 292 HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - 293 HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), 294 }, 295 .max_size = HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_BMSK >> 296 HWIO_TCL_R0_SW2TCL1_RING_BASE_MSB_RING_SIZE_SHFT, 297 }, 298 { /* TCL_CMD */ 299 .start_ring_id = HAL_SRNG_SW2TCL_CMD, 300 .max_rings = 1, 301 .entry_size = (sizeof(struct tlv_32_hdr) + 302 sizeof(struct tcl_gse_cmd)) >> 2, 303 .lmac_ring = FALSE, 304 .ring_dir = HAL_SRNG_SRC_RING, 305 .reg_start = { 306 HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( 307 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 308 HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( 309 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 310 }, 311 /* Single ring - provide ring size if multiple rings of this 312 * type are supported */ 313 .reg_size = {}, 314 .max_size = HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_BMSK >> 315 HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_MSB_RING_SIZE_SHFT, 316 }, 317 { /* TCL_STATUS */ 318 .start_ring_id = HAL_SRNG_TCL_STATUS, 319 .max_rings = 1, 320 .entry_size = (sizeof(struct tlv_32_hdr) + 321 sizeof(struct tcl_status_ring)) >> 2, 322 .lmac_ring = FALSE, 323 .ring_dir = HAL_SRNG_DST_RING, 324 .reg_start = { 325 HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( 326 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 327 HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( 328 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 329 }, 330 /* Single ring - provide ring size if multiple rings of this 331 * type are supported */ 332 .reg_size = {}, 333 .max_size = HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_BMSK >> 334 HWIO_TCL_R0_TCL_STATUS1_RING_BASE_MSB_RING_SIZE_SHFT, 335 }, 336 { /* CE_SRC */ 337 .start_ring_id = HAL_SRNG_CE_0_SRC, 338 .max_rings = 12, 339 .entry_size = sizeof(struct ce_src_desc) >> 2, 340 .lmac_ring = FALSE, 341 .ring_dir = HAL_SRNG_SRC_RING, 342 .reg_start = { 343 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( 344 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), 345 HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( 346 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), 347 }, 348 .reg_size = { 349 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - 350 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, 351 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - 352 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, 353 }, 354 .max_size = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> 355 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, 356 }, 357 { /* CE_DST */ 358 .start_ring_id = HAL_SRNG_CE_0_DST, 359 .max_rings = 12, 360 .entry_size = 8 >> 2, 361 /*TODO: entry_size above should actually be 362 * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition 363 * of struct ce_dst_desc in HW header files 364 */ 365 .lmac_ring = FALSE, 366 .ring_dir = HAL_SRNG_SRC_RING, 367 .reg_start = { 368 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( 369 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 370 HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( 371 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 372 }, 373 .reg_size = { 374 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 375 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 376 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 377 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 378 }, 379 .max_size = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_BMSK >> 380 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_MSB_RING_SIZE_SHFT, 381 }, 382 { /* CE_DST_STATUS */ 383 .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, 384 .max_rings = 12, 385 .entry_size = sizeof(struct ce_stat_desc) >> 2, 386 .lmac_ring = FALSE, 387 .ring_dir = HAL_SRNG_DST_RING, 388 .reg_start = { 389 HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( 390 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 391 HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( 392 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 393 }, 394 /* TODO: check destination status ring registers */ 395 .reg_size = { 396 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 397 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 398 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 399 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 400 }, 401 .max_size = 402 HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_BMSK >> 403 HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_MSB_RING_SIZE_SHFT, 404 }, 405 { /* WBM_IDLE_LINK */ 406 .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, 407 .max_rings = 1, 408 .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, 409 .lmac_ring = FALSE, 410 .ring_dir = HAL_SRNG_SRC_RING, 411 .reg_start = { 412 HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 413 HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 414 }, 415 /* Single ring - provide ring size if multiple rings of this 416 * type are supported */ 417 .reg_size = {}, 418 .max_size = 419 HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_BMSK >> 420 HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE_SHFT, 421 }, 422 { /* SW2WBM_RELEASE */ 423 .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, 424 .max_rings = 1, 425 .entry_size = sizeof(struct wbm_release_ring) >> 2, 426 .lmac_ring = FALSE, 427 .ring_dir = HAL_SRNG_SRC_RING, 428 .reg_start = { 429 HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 430 HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 431 }, 432 /* Single ring - provide ring size if multiple rings of this 433 * type are supported */ 434 .reg_size = {}, 435 .max_size = 436 HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> 437 HWIO_WBM_R0_SW_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, 438 }, 439 { /* WBM2SW_RELEASE */ 440 .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, 441 .max_rings = 4, 442 .entry_size = sizeof(struct wbm_release_ring) >> 2, 443 .lmac_ring = FALSE, 444 .ring_dir = HAL_SRNG_DST_RING, 445 .reg_start = { 446 HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 447 HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 448 }, 449 .reg_size = { 450 HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - 451 HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 452 HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - 453 HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 454 }, 455 .max_size = 456 HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_BMSK >> 457 HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_MSB_RING_SIZE_SHFT, 458 }, 459 { /* RXDMA_BUF */ 460 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0, 461 #ifdef IPA_OFFLOAD 462 .max_rings = 3, 463 #else 464 .max_rings = 2, 465 #endif 466 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 467 .lmac_ring = TRUE, 468 .ring_dir = HAL_SRNG_SRC_RING, 469 /* reg_start is not set because LMAC rings are not accessed 470 * from host 471 */ 472 .reg_start = {}, 473 .reg_size = {}, 474 .max_size = HAL_RXDMA_MAX_RING_SIZE, 475 }, 476 { /* RXDMA_DST */ 477 .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, 478 .max_rings = 1, 479 .entry_size = sizeof(struct reo_entrance_ring) >> 2, 480 .lmac_ring = TRUE, 481 .ring_dir = HAL_SRNG_DST_RING, 482 /* reg_start is not set because LMAC rings are not accessed 483 * from host 484 */ 485 .reg_start = {}, 486 .reg_size = {}, 487 .max_size = HAL_RXDMA_MAX_RING_SIZE, 488 }, 489 { /* RXDMA_MONITOR_BUF */ 490 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, 491 .max_rings = 1, 492 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 493 .lmac_ring = TRUE, 494 .ring_dir = HAL_SRNG_SRC_RING, 495 /* reg_start is not set because LMAC rings are not accessed 496 * from host 497 */ 498 .reg_start = {}, 499 .reg_size = {}, 500 .max_size = HAL_RXDMA_MAX_RING_SIZE, 501 }, 502 { /* RXDMA_MONITOR_STATUS */ 503 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, 504 .max_rings = 1, 505 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 506 .lmac_ring = TRUE, 507 .ring_dir = HAL_SRNG_SRC_RING, 508 /* reg_start is not set because LMAC rings are not accessed 509 * from host 510 */ 511 .reg_start = {}, 512 .reg_size = {}, 513 .max_size = HAL_RXDMA_MAX_RING_SIZE, 514 }, 515 { /* RXDMA_MONITOR_DST */ 516 .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, 517 .max_rings = 1, 518 .entry_size = sizeof(struct reo_entrance_ring) >> 2, 519 .lmac_ring = TRUE, 520 .ring_dir = HAL_SRNG_DST_RING, 521 /* reg_start is not set because LMAC rings are not accessed 522 * from host 523 */ 524 .reg_start = {}, 525 .reg_size = {}, 526 .max_size = HAL_RXDMA_MAX_RING_SIZE, 527 }, 528 { /* RXDMA_MONITOR_DESC */ 529 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, 530 .max_rings = 1, 531 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 532 .lmac_ring = TRUE, 533 .ring_dir = HAL_SRNG_SRC_RING, 534 /* reg_start is not set because LMAC rings are not accessed 535 * from host 536 */ 537 .reg_start = {}, 538 .reg_size = {}, 539 .max_size = HAL_RXDMA_MAX_RING_SIZE, 540 }, 541 { /* DIR_BUF_RX_DMA_SRC */ 542 .start_ring_id = HAL_SRNG_DIR_BUF_RX_SRC_DMA_RING, 543 .max_rings = 1, 544 .entry_size = 2, 545 .lmac_ring = TRUE, 546 .ring_dir = HAL_SRNG_SRC_RING, 547 /* reg_start is not set because LMAC rings are not accessed 548 * from host 549 */ 550 .reg_start = {}, 551 .reg_size = {}, 552 .max_size = HAL_RXDMA_MAX_RING_SIZE, 553 }, 554 #ifdef WLAN_FEATURE_CIF_CFR 555 { /* WIFI_POS_SRC */ 556 .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, 557 .max_rings = 1, 558 .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, 559 .lmac_ring = TRUE, 560 .ring_dir = HAL_SRNG_SRC_RING, 561 /* reg_start is not set because LMAC rings are not accessed 562 * from host 563 */ 564 .reg_start = {}, 565 .reg_size = {}, 566 .max_size = HAL_RXDMA_MAX_RING_SIZE, 567 }, 568 #endif 569 }; 570 571 /** 572 * hal_get_srng_ring_id() - get the ring id of a descriped ring 573 * @hal: hal_soc data structure 574 * @ring_type: type enum describing the ring 575 * @ring_num: which ring of the ring type 576 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 577 * 578 * Return: the ring id or -EINVAL if the ring does not exist. 579 */ 580 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 581 int ring_num, int mac_id) 582 { 583 struct hal_hw_srng_config *ring_config = 584 HAL_SRNG_CONFIG(hal, ring_type); 585 int ring_id; 586 587 if (ring_num >= ring_config->max_rings) { 588 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 589 "%s: ring_num exceeded maximum no. of supported rings\n", 590 __func__); 591 /* TODO: This is a programming error. Assert if this happens */ 592 return -EINVAL; 593 } 594 595 if (ring_config->lmac_ring) { 596 ring_id = ring_config->start_ring_id + ring_num + 597 (mac_id * HAL_MAX_RINGS_PER_LMAC); 598 } else { 599 ring_id = ring_config->start_ring_id + ring_num; 600 } 601 602 return ring_id; 603 } 604 605 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 606 { 607 /* TODO: Should we allocate srng structures dynamically? */ 608 return &(hal->srng_list[ring_id]); 609 } 610 611 #define HP_OFFSET_IN_REG_START 1 612 #define OFFSET_FROM_HP_TO_TP 4 613 static void hal_update_srng_hp_tp_address(void *hal_soc, 614 int shadow_config_index, 615 int ring_type, 616 int ring_num) 617 { 618 struct hal_srng *srng; 619 struct hal_soc *hal = (struct hal_soc *)hal_soc; 620 int ring_id; 621 622 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 623 if (ring_id < 0) 624 return; 625 626 srng = hal_get_srng(hal_soc, ring_id); 627 628 if (srng->ring_dir == HAL_SRNG_DST_RING) 629 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 630 + hal->dev_base_addr; 631 else 632 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 633 + hal->dev_base_addr; 634 } 635 636 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 637 int ring_type, 638 int ring_num) 639 { 640 uint32_t target_register; 641 struct hal_soc *hal = (struct hal_soc *)hal_soc; 642 struct hal_hw_srng_config *srng_config = &hw_srng_table[ring_type]; 643 int shadow_config_index = hal->num_shadow_registers_configured; 644 645 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 646 QDF_ASSERT(0); 647 return QDF_STATUS_E_RESOURCES; 648 } 649 650 hal->num_shadow_registers_configured++; 651 652 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 653 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 654 *ring_num); 655 656 /* if the ring is a dst ring, we need to shadow the tail pointer */ 657 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 658 target_register += OFFSET_FROM_HP_TO_TP; 659 660 hal->shadow_config[shadow_config_index].addr = target_register; 661 662 /* update hp/tp addr in the hal_soc structure*/ 663 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 664 ring_num); 665 666 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 667 "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n", 668 __func__, target_register, shadow_config_index, 669 ring_type, ring_num); 670 671 return QDF_STATUS_SUCCESS; 672 } 673 674 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 675 { 676 int ring_type, ring_num; 677 678 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 679 struct hal_hw_srng_config *srng_config = 680 &hw_srng_table[ring_type]; 681 682 if (ring_type == CE_SRC || 683 ring_type == CE_DST || 684 ring_type == CE_DST_STATUS) 685 continue; 686 687 if (srng_config->lmac_ring) 688 continue; 689 690 for (ring_num = 0; ring_num < srng_config->max_rings; 691 ring_num++) 692 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 693 } 694 695 return QDF_STATUS_SUCCESS; 696 } 697 698 void hal_get_shadow_config(void *hal_soc, 699 struct pld_shadow_reg_v2_cfg **shadow_config, 700 int *num_shadow_registers_configured) 701 { 702 struct hal_soc *hal = (struct hal_soc *)hal_soc; 703 704 *shadow_config = hal->shadow_config; 705 *num_shadow_registers_configured = 706 hal->num_shadow_registers_configured; 707 708 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 709 "%s\n", __func__); 710 } 711 712 713 static void hal_validate_shadow_register(struct hal_soc *hal, 714 uint32_t *destination, 715 uint32_t *shadow_address) 716 { 717 unsigned int index; 718 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 719 int destination_ba_offset = 720 ((char *)destination) - (char *)hal->dev_base_addr; 721 722 index = shadow_address - shadow_0_offset; 723 724 if (index >= MAX_SHADOW_REGISTERS) { 725 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 726 "%s: index %x out of bounds\n", __func__, index); 727 goto error; 728 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 729 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 730 "%s: sanity check failure, expected %x, found %x\n", 731 __func__, destination_ba_offset, 732 hal->shadow_config[index].addr); 733 goto error; 734 } 735 return; 736 error: 737 qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x", 738 __func__, hal->dev_base_addr, destination, shadow_address, 739 shadow_0_offset, index); 740 QDF_BUG(0); 741 return; 742 } 743 744 static void hal_target_based_configure(struct hal_soc *hal) 745 { 746 struct hif_target_info *tgt_info = 747 hif_get_target_info_handle(hal->hif_handle); 748 749 switch (tgt_info->target_type) { 750 case TARGET_TYPE_QCA6290: 751 hal->use_register_windowing = true; 752 break; 753 default: 754 break; 755 } 756 } 757 758 /** 759 * hal_attach - Initialize HAL layer 760 * @hif_handle: Opaque HIF handle 761 * @qdf_dev: QDF device 762 * 763 * Return: Opaque HAL SOC handle 764 * NULL on failure (if given ring is not available) 765 * 766 * This function should be called as part of HIF initialization (for accessing 767 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 768 * 769 */ 770 void *hal_attach(void *hif_handle, qdf_device_t qdf_dev) 771 { 772 struct hal_soc *hal; 773 int i; 774 775 hal = qdf_mem_malloc(sizeof(*hal)); 776 777 if (!hal) { 778 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 779 "%s: hal_soc allocation failed\n", __func__); 780 goto fail0; 781 } 782 hal->hif_handle = hif_handle; 783 hal->dev_base_addr = hif_get_dev_ba(hif_handle); 784 hal->qdf_dev = qdf_dev; 785 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 786 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 787 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 788 if (!hal->shadow_rdptr_mem_paddr) { 789 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 790 "%s: hal->shadow_rdptr_mem_paddr allocation failed\n", 791 __func__); 792 goto fail1; 793 } 794 795 hal->shadow_wrptr_mem_vaddr = 796 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 797 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 798 &(hal->shadow_wrptr_mem_paddr)); 799 if (!hal->shadow_wrptr_mem_vaddr) { 800 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 801 "%s: hal->shadow_wrptr_mem_vaddr allocation failed\n", 802 __func__); 803 goto fail2; 804 } 805 806 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 807 hal->srng_list[i].initialized = 0; 808 hal->srng_list[i].ring_id = i; 809 } 810 811 qdf_spinlock_create(&hal->register_access_lock); 812 hal->register_window = 0; 813 814 hal_target_based_configure(hal); 815 816 return (void *)hal; 817 818 fail2: 819 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 820 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 821 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 822 fail1: 823 qdf_mem_free(hal); 824 fail0: 825 return NULL; 826 } 827 qdf_export_symbol(hal_attach); 828 829 /** 830 * hal_mem_info - Retrieve hal memory base address 831 * 832 * @hal_soc: Opaque HAL SOC handle 833 * @mem: pointer to structure to be updated with hal mem info 834 */ 835 void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem ) 836 { 837 struct hal_soc *hal = (struct hal_soc *)hal_soc; 838 mem->dev_base_addr = (void *)hal->dev_base_addr; 839 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 840 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 841 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 842 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 843 hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr); 844 return; 845 } 846 qdf_export_symbol(hal_get_meminfo); 847 848 /** 849 * hal_detach - Detach HAL layer 850 * @hal_soc: HAL SOC handle 851 * 852 * Return: Opaque HAL SOC handle 853 * NULL on failure (if given ring is not available) 854 * 855 * This function should be called as part of HIF initialization (for accessing 856 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 857 * 858 */ 859 extern void hal_detach(void *hal_soc) 860 { 861 struct hal_soc *hal = (struct hal_soc *)hal_soc; 862 863 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 864 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 865 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 866 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 867 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 868 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 869 qdf_mem_free(hal); 870 871 return; 872 } 873 qdf_export_symbol(hal_detach); 874 875 /** 876 * hal_srng_src_hw_init - Private function to initialize SRNG 877 * source ring HW 878 * @hal_soc: HAL SOC handle 879 * @srng: SRNG ring pointer 880 */ 881 static inline void hal_srng_src_hw_init(struct hal_soc *hal, 882 struct hal_srng *srng) 883 { 884 uint32_t reg_val = 0; 885 uint64_t tp_addr = 0; 886 887 HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id); 888 889 if (srng->flags & HAL_SRNG_MSI_INTR) { 890 SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB, 891 srng->msi_addr & 0xffffffff); 892 reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR), 893 (uint64_t)(srng->msi_addr) >> 32) | 894 SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, 895 MSI1_ENABLE), 1); 896 SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); 897 SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data); 898 } 899 900 SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); 901 reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB), 902 ((uint64_t)(srng->ring_base_paddr) >> 32)) | 903 SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 904 srng->entry_size * srng->num_entries); 905 SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val); 906 907 #if defined(WCSS_VERSION) && \ 908 ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ 909 (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) 910 reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); 911 #else 912 reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) | 913 SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); 914 #endif 915 SRNG_SRC_REG_WRITE(srng, ID, reg_val); 916 917 /** 918 * Interrupt setup: 919 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE 920 * if level mode is required 921 */ 922 reg_val = 0; 923 924 /* 925 * WAR - Hawkeye v1 has a hardware bug which requires timer value to be 926 * programmed in terms of 1us resolution instead of 8us resolution as 927 * given in MLD. 928 */ 929 if (srng->intr_timer_thres_us) { 930 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, 931 INTERRUPT_TIMER_THRESHOLD), 932 srng->intr_timer_thres_us); 933 /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */ 934 } 935 936 if (srng->intr_batch_cntr_thres_entries) { 937 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, 938 BATCH_COUNTER_THRESHOLD), 939 srng->intr_batch_cntr_thres_entries * 940 srng->entry_size); 941 } 942 SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val); 943 944 reg_val = 0; 945 if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 946 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1, 947 LOW_THRESHOLD), srng->u.src_ring.low_threshold); 948 } 949 950 SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val); 951 952 /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should 953 * remain 0 to avoid some WBM stability issues. Remote head/tail 954 * pointers are not required since this ring is completely managed 955 * by WBM HW */ 956 if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) { 957 tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + 958 ((unsigned long)(srng->u.src_ring.tp_addr) - 959 (unsigned long)(hal->shadow_rdptr_mem_vaddr))); 960 SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff); 961 SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32); 962 } 963 964 /* Initilaize head and tail pointers to indicate ring is empty */ 965 SRNG_SRC_REG_WRITE(srng, HP, 0); 966 SRNG_SRC_REG_WRITE(srng, TP, 0); 967 *(srng->u.src_ring.tp_addr) = 0; 968 969 reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? 970 SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | 971 ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? 972 SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | 973 ((srng->flags & HAL_SRNG_MSI_SWAP) ? 974 SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0); 975 976 /* Loop count is not used for SRC rings */ 977 reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1); 978 979 /* 980 * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); 981 * todo: update fw_api and replace with above line 982 * (when SRNG_ENABLE field for the MISC register is available in fw_api) 983 * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) 984 */ 985 reg_val |= 0x40; 986 987 SRNG_SRC_REG_WRITE(srng, MISC, reg_val); 988 989 } 990 991 /** 992 * hal_ce_dst_setup - Initialize CE destination ring registers 993 * @hal_soc: HAL SOC handle 994 * @srng: SRNG ring pointer 995 */ 996 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 997 int ring_num) 998 { 999 uint32_t reg_val = 0; 1000 uint32_t reg_addr; 1001 struct hal_hw_srng_config *ring_config = 1002 HAL_SRNG_CONFIG(hal, CE_DST); 1003 1004 /* set DEST_MAX_LENGTH according to ce assignment */ 1005 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 1006 ring_config->reg_start[R0_INDEX] + 1007 (ring_num * ring_config->reg_size[R0_INDEX])); 1008 1009 reg_val = HAL_REG_READ(hal, reg_addr); 1010 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1011 reg_val |= srng->u.dst_ring.max_buffer_length & 1012 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 1013 HAL_REG_WRITE(hal, reg_addr, reg_val); 1014 } 1015 1016 /** 1017 * hal_reo_remap_IX0 - Remap REO ring destination 1018 * @hal: HAL SOC handle 1019 * @remap_val: Remap value 1020 */ 1021 void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val) 1022 { 1023 uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR( 1024 SEQ_WCSS_UMAC_REO_REG_OFFSET); 1025 HAL_REG_WRITE(hal, reg_offset, remap_val); 1026 } 1027 1028 /** 1029 * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer 1030 * @srng: sring pointer 1031 * @paddr: physical address 1032 */ 1033 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng, 1034 uint64_t paddr) 1035 { 1036 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, 1037 paddr & 0xffffffff); 1038 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, 1039 paddr >> 32); 1040 } 1041 1042 /** 1043 * hal_srng_dst_init_hp() - Initilaize destination ring head pointer 1044 * @srng: sring pointer 1045 * @vaddr: virtual address 1046 */ 1047 void hal_srng_dst_init_hp(struct hal_srng *srng, 1048 uint32_t *vaddr) 1049 { 1050 srng->u.dst_ring.hp_addr = vaddr; 1051 SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp); 1052 *(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp; 1053 1054 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, 1055 "hp_addr=%pK, cached_hp=%d, hp=%d\n", 1056 (void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp, 1057 *(srng->u.dst_ring.hp_addr)); 1058 } 1059 1060 /** 1061 * hal_srng_dst_hw_init - Private function to initialize SRNG 1062 * destination ring HW 1063 * @hal_soc: HAL SOC handle 1064 * @srng: SRNG ring pointer 1065 */ 1066 static inline void hal_srng_dst_hw_init(struct hal_soc *hal, 1067 struct hal_srng *srng) 1068 { 1069 uint32_t reg_val = 0; 1070 uint64_t hp_addr = 0; 1071 1072 HIF_DBG("%s: hw_init srng %d", __func__, srng->ring_id); 1073 1074 if (srng->flags & HAL_SRNG_MSI_INTR) { 1075 SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB, 1076 srng->msi_addr & 0xffffffff); 1077 reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR), 1078 (uint64_t)(srng->msi_addr) >> 32) | 1079 SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, 1080 MSI1_ENABLE), 1); 1081 SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); 1082 SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data); 1083 } 1084 1085 SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); 1086 reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB), 1087 ((uint64_t)(srng->ring_base_paddr) >> 32)) | 1088 SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE), 1089 srng->entry_size * srng->num_entries); 1090 SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val); 1091 1092 reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) | 1093 SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size); 1094 SRNG_DST_REG_WRITE(srng, ID, reg_val); 1095 1096 1097 /** 1098 * Interrupt setup: 1099 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE 1100 * if level mode is required 1101 */ 1102 reg_val = 0; 1103 if (srng->intr_timer_thres_us) { 1104 reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, 1105 INTERRUPT_TIMER_THRESHOLD), 1106 srng->intr_timer_thres_us >> 3); 1107 } 1108 1109 if (srng->intr_batch_cntr_thres_entries) { 1110 reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, 1111 BATCH_COUNTER_THRESHOLD), 1112 srng->intr_batch_cntr_thres_entries * 1113 srng->entry_size); 1114 } 1115 1116 SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val); 1117 hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + 1118 ((unsigned long)(srng->u.dst_ring.hp_addr) - 1119 (unsigned long)(hal->shadow_rdptr_mem_vaddr))); 1120 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff); 1121 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32); 1122 1123 /* Initilaize head and tail pointers to indicate ring is empty */ 1124 SRNG_DST_REG_WRITE(srng, HP, 0); 1125 SRNG_DST_REG_WRITE(srng, TP, 0); 1126 *(srng->u.dst_ring.hp_addr) = 0; 1127 1128 reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? 1129 SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | 1130 ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? 1131 SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | 1132 ((srng->flags & HAL_SRNG_MSI_SWAP) ? 1133 SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0); 1134 1135 /* 1136 * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); 1137 * todo: update fw_api and replace with above line 1138 * (when SRNG_ENABLE field for the MISC register is available in fw_api) 1139 * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) 1140 */ 1141 reg_val |= 0x40; 1142 1143 SRNG_DST_REG_WRITE(srng, MISC, reg_val); 1144 1145 } 1146 1147 /** 1148 * hal_srng_hw_init - Private function to initialize SRNG HW 1149 * @hal_soc: HAL SOC handle 1150 * @srng: SRNG ring pointer 1151 */ 1152 static inline void hal_srng_hw_init(struct hal_soc *hal, 1153 struct hal_srng *srng) 1154 { 1155 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1156 hal_srng_src_hw_init(hal, srng); 1157 else 1158 hal_srng_dst_hw_init(hal, srng); 1159 } 1160 1161 #ifdef CONFIG_SHADOW_V2 1162 #define ignore_shadow false 1163 #define CHECK_SHADOW_REGISTERS true 1164 #else 1165 #define ignore_shadow true 1166 #define CHECK_SHADOW_REGISTERS false 1167 #endif 1168 1169 /** 1170 * hal_srng_setup - Initialize HW SRNG ring. 1171 * @hal_soc: Opaque HAL SOC handle 1172 * @ring_type: one of the types from hal_ring_type 1173 * @ring_num: Ring number if there are multiple rings of same type (staring 1174 * from 0) 1175 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1176 * @ring_params: SRNG ring params in hal_srng_params structure. 1177 1178 * Callers are expected to allocate contiguous ring memory of size 1179 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1180 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1181 * hal_srng_params structure. Ring base address should be 8 byte aligned 1182 * and size of each ring entry should be queried using the API 1183 * hal_srng_get_entrysize 1184 * 1185 * Return: Opaque pointer to ring on success 1186 * NULL on failure (if given ring is not available) 1187 */ 1188 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1189 int mac_id, struct hal_srng_params *ring_params) 1190 { 1191 int ring_id; 1192 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1193 struct hal_srng *srng; 1194 struct hal_hw_srng_config *ring_config = 1195 HAL_SRNG_CONFIG(hal, ring_type); 1196 void *dev_base_addr; 1197 int i; 1198 1199 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1200 if (ring_id < 0) 1201 return NULL; 1202 1203 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1204 "%s: mac_id %d ring_id %d\n", 1205 __func__, mac_id, ring_id); 1206 1207 srng = hal_get_srng(hal_soc, ring_id); 1208 1209 if (srng->initialized) { 1210 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1211 "%s: Ring (ring_type, ring_num) already initialized\n", 1212 __func__); 1213 return NULL; 1214 } 1215 1216 dev_base_addr = hal->dev_base_addr; 1217 srng->ring_id = ring_id; 1218 srng->ring_dir = ring_config->ring_dir; 1219 srng->ring_base_paddr = ring_params->ring_base_paddr; 1220 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1221 srng->entry_size = ring_config->entry_size; 1222 srng->num_entries = ring_params->num_entries; 1223 srng->ring_size = srng->num_entries * srng->entry_size; 1224 srng->ring_size_mask = srng->ring_size - 1; 1225 srng->msi_addr = ring_params->msi_addr; 1226 srng->msi_data = ring_params->msi_data; 1227 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1228 srng->intr_batch_cntr_thres_entries = 1229 ring_params->intr_batch_cntr_thres_entries; 1230 srng->hal_soc = hal_soc; 1231 1232 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1233 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1234 + (ring_num * ring_config->reg_size[i]); 1235 } 1236 1237 /* Zero out the entire ring memory */ 1238 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1239 srng->num_entries) << 2); 1240 1241 srng->flags = ring_params->flags; 1242 #ifdef BIG_ENDIAN_HOST 1243 /* TODO: See if we should we get these flags from caller */ 1244 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1245 srng->flags |= HAL_SRNG_MSI_SWAP; 1246 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1247 #endif 1248 1249 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1250 srng->u.src_ring.hp = 0; 1251 srng->u.src_ring.reap_hp = srng->ring_size - 1252 srng->entry_size; 1253 srng->u.src_ring.tp_addr = 1254 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1255 srng->u.src_ring.low_threshold = 1256 ring_params->low_threshold * srng->entry_size; 1257 if (ring_config->lmac_ring) { 1258 /* For LMAC rings, head pointer updates will be done 1259 * through FW by writing to a shared memory location 1260 */ 1261 srng->u.src_ring.hp_addr = 1262 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1263 HAL_SRNG_LMAC1_ID_START]); 1264 srng->flags |= HAL_SRNG_LMAC_RING; 1265 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1266 srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP); 1267 1268 if (CHECK_SHADOW_REGISTERS) { 1269 QDF_TRACE(QDF_MODULE_ID_TXRX, 1270 QDF_TRACE_LEVEL_ERROR, 1271 "%s: Ring (%d, %d) missing shadow config\n", 1272 __func__, ring_type, ring_num); 1273 } 1274 } else { 1275 hal_validate_shadow_register(hal, 1276 SRNG_SRC_ADDR(srng, HP), 1277 srng->u.src_ring.hp_addr); 1278 } 1279 } else { 1280 /* During initialization loop count in all the descriptors 1281 * will be set to zero, and HW will set it to 1 on completing 1282 * descriptor update in first loop, and increments it by 1 on 1283 * subsequent loops (loop count wraps around after reaching 1284 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1285 * loop count in descriptors updated by HW (to be processed 1286 * by SW). 1287 */ 1288 srng->u.dst_ring.loop_cnt = 1; 1289 srng->u.dst_ring.tp = 0; 1290 srng->u.dst_ring.hp_addr = 1291 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1292 if (ring_config->lmac_ring) { 1293 /* For LMAC rings, tail pointer updates will be done 1294 * through FW by writing to a shared memory location 1295 */ 1296 srng->u.dst_ring.tp_addr = 1297 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1298 HAL_SRNG_LMAC1_ID_START]); 1299 srng->flags |= HAL_SRNG_LMAC_RING; 1300 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1301 srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP); 1302 1303 if (CHECK_SHADOW_REGISTERS) { 1304 QDF_TRACE(QDF_MODULE_ID_TXRX, 1305 QDF_TRACE_LEVEL_ERROR, 1306 "%s: Ring (%d, %d) missing shadow config\n", 1307 __func__, ring_type, ring_num); 1308 } 1309 } else { 1310 hal_validate_shadow_register(hal, 1311 SRNG_DST_ADDR(srng, TP), 1312 srng->u.dst_ring.tp_addr); 1313 } 1314 } 1315 1316 if (!(ring_config->lmac_ring)) { 1317 hal_srng_hw_init(hal, srng); 1318 1319 if (ring_type == CE_DST) { 1320 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1321 hal_ce_dst_setup(hal, srng, ring_num); 1322 } 1323 } 1324 1325 SRNG_LOCK_INIT(&srng->lock); 1326 1327 srng->initialized = true; 1328 1329 return (void *)srng; 1330 } 1331 qdf_export_symbol(hal_srng_setup); 1332 1333 /** 1334 * hal_srng_cleanup - Deinitialize HW SRNG ring. 1335 * @hal_soc: Opaque HAL SOC handle 1336 * @hal_srng: Opaque HAL SRNG pointer 1337 */ 1338 void hal_srng_cleanup(void *hal_soc, void *hal_srng) 1339 { 1340 struct hal_srng *srng = (struct hal_srng *)hal_srng; 1341 SRNG_LOCK_DESTROY(&srng->lock); 1342 srng->initialized = 0; 1343 } 1344 qdf_export_symbol(hal_srng_cleanup); 1345 1346 /** 1347 * hal_srng_get_entrysize - Returns size of ring entry in bytes 1348 * @hal_soc: Opaque HAL SOC handle 1349 * @ring_type: one of the types from hal_ring_type 1350 * 1351 */ 1352 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1353 { 1354 struct hal_hw_srng_config *ring_config = 1355 HAL_SRNG_CONFIG(hal, ring_type); 1356 return ring_config->entry_size << 2; 1357 } 1358 qdf_export_symbol(hal_srng_get_entrysize); 1359 1360 /** 1361 * hal_srng_max_entries - Returns maximum possible number of ring entries 1362 * @hal_soc: Opaque HAL SOC handle 1363 * @ring_type: one of the types from hal_ring_type 1364 * 1365 * Return: Maximum number of entries for the given ring_type 1366 */ 1367 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1368 { 1369 struct hal_hw_srng_config *ring_config = 1370 HAL_SRNG_CONFIG(hal, ring_type); 1371 1372 return ring_config->max_size / ring_config->entry_size; 1373 } 1374 qdf_export_symbol(hal_srng_max_entries); 1375 1376 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type) 1377 { 1378 struct hal_hw_srng_config *ring_config = 1379 HAL_SRNG_CONFIG(hal, ring_type); 1380 1381 return ring_config->ring_dir; 1382 } 1383 1384 /** 1385 * hal_srng_dump - Dump ring status 1386 * @srng: hal srng pointer 1387 */ 1388 void hal_srng_dump(struct hal_srng *srng) 1389 { 1390 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1391 qdf_print("=== SRC RING %d ===", srng->ring_id); 1392 qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u", 1393 srng->u.src_ring.hp, 1394 srng->u.src_ring.reap_hp, 1395 *srng->u.src_ring.tp_addr, 1396 srng->u.src_ring.cached_tp); 1397 } else { 1398 qdf_print("=== DST RING %d ===", srng->ring_id); 1399 qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u", 1400 srng->u.dst_ring.tp, 1401 *srng->u.dst_ring.hp_addr, 1402 srng->u.dst_ring.cached_hp, 1403 srng->u.dst_ring.loop_cnt); 1404 } 1405 } 1406 1407 /** 1408 * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL 1409 * 1410 * @hal_soc: Opaque HAL SOC handle 1411 * @hal_ring: Ring pointer (Source or Destination ring) 1412 * @ring_params: SRNG parameters will be returned through this structure 1413 */ 1414 extern void hal_get_srng_params(void *hal_soc, void *hal_ring, 1415 struct hal_srng_params *ring_params) 1416 { 1417 struct hal_srng *srng = (struct hal_srng *)hal_ring; 1418 int i =0; 1419 ring_params->ring_id = srng->ring_id; 1420 ring_params->ring_dir = srng->ring_dir; 1421 ring_params->entry_size = srng->entry_size; 1422 1423 ring_params->ring_base_paddr = srng->ring_base_paddr; 1424 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1425 ring_params->num_entries = srng->num_entries; 1426 ring_params->msi_addr = srng->msi_addr; 1427 ring_params->msi_data = srng->msi_data; 1428 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1429 ring_params->intr_batch_cntr_thres_entries = 1430 srng->intr_batch_cntr_thres_entries; 1431 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1432 ring_params->flags = srng->flags; 1433 ring_params->ring_id = srng->ring_id; 1434 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1435 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1436 } 1437 qdf_export_symbol(hal_get_srng_params); 1438