1 /* 2 * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are 6 * met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above 10 * copyright notice, this list of conditions and the following 11 * disclaimer in the documentation and/or other materials provided 12 * with the distribution. 13 * * Neither the name of The Linux Foundation nor the names of its 14 * contributors may be used to endorse or promote products derived 15 * from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 27 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include "hal_api.h" 31 #include "wcss_version.h" 32 33 /** 34 * Common SRNG register access macros: 35 * The SRNG registers are distributed accross various UMAC and LMAC HW blocks, 36 * but the register group and format is exactly same for all rings, with some 37 * difference between producer rings (these are 'producer rings' with respect 38 * to HW and refered as 'destination rings' in SW) and consumer rings (these 39 * are 'consumer rings' with respect to HW and refered as 'source rings' in SW). 40 * The following macros provide uniform access to all SRNG rings. 41 */ 42 43 /* SRNG registers are split among two groups R0 and R2 and following 44 * definitions identify the group to which each register belongs to 45 */ 46 #define R0_INDEX 0 47 #define R2_INDEX 1 48 49 #define HWREG_INDEX(_reg_group) _reg_group ## _ ## INDEX 50 51 /* Registers in R0 group */ 52 #define BASE_LSB_GROUP R0 53 #define BASE_MSB_GROUP R0 54 #define ID_GROUP R0 55 #define STATUS_GROUP R0 56 #define MISC_GROUP R0 57 #define HP_ADDR_LSB_GROUP R0 58 #define HP_ADDR_MSB_GROUP R0 59 #define PRODUCER_INT_SETUP_GROUP R0 60 #define PRODUCER_INT_STATUS_GROUP R0 61 #define PRODUCER_FULL_COUNTER_GROUP R0 62 #define MSI1_BASE_LSB_GROUP R0 63 #define MSI1_BASE_MSB_GROUP R0 64 #define MSI1_DATA_GROUP R0 65 #define HP_TP_SW_OFFSET_GROUP R0 66 #define TP_ADDR_LSB_GROUP R0 67 #define TP_ADDR_MSB_GROUP R0 68 #define CONSUMER_INT_SETUP_IX0_GROUP R0 69 #define CONSUMER_INT_SETUP_IX1_GROUP R0 70 #define CONSUMER_INT_STATUS_GROUP R0 71 #define CONSUMER_EMPTY_COUNTER_GROUP R0 72 #define CONSUMER_PREFETCH_TIMER_GROUP R0 73 #define CONSUMER_PREFETCH_STATUS_GROUP R0 74 75 /* Registers in R2 group */ 76 #define HP_GROUP R2 77 #define TP_GROUP R2 78 79 /** 80 * Register definitions for all SRNG based rings are same, except few 81 * differences between source (HW consumer) and destination (HW producer) 82 * registers. Following macros definitions provide generic access to all 83 * SRNG based rings. 84 * For source rings, we will use the register/field definitions of SW2TCL1 85 * ring defined in the HW header file mac_tcl_reg_seq_hwioreg.h. To setup 86 * individual fields, SRNG_SM macros should be used with fields specified 87 * using SRNG_SRC_FLD(<register>, <field>), Register writes should be done 88 * using SRNG_SRC_REG_WRITE(<hal_srng>, <register>, <value>). 89 * Similarly for destination rings we will use definitions of REO2SW1 ring 90 * defined in the register reo_destination_ring.h. To setup individual 91 * fields SRNG_SM macros should be used with fields specified using 92 * SRNG_DST_FLD(<register>, <field>). Register writes should be done using 93 * SRNG_DST_REG_WRITE(<hal_srng>, <register>, <value>). 94 */ 95 96 #define SRNG_DST_REG_OFFSET(_reg, _reg_group) \ 97 HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg##_ADDR(0) 98 99 #define SRNG_SRC_REG_OFFSET(_reg, _reg_group) \ 100 HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg ## _ADDR(0) 101 102 #define _SRNG_DST_FLD(_reg_group, _reg_fld) \ 103 HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg_fld 104 #define _SRNG_SRC_FLD(_reg_group, _reg_fld) \ 105 HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg_fld 106 107 #define _SRNG_FLD(_reg_group, _reg_fld, _dir) \ 108 _SRNG_ ## _dir ## _FLD(_reg_group, _reg_fld) 109 110 #define SRNG_DST_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, DST) 111 #define SRNG_SRC_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, SRC) 112 113 #define SRNG_SRC_R0_START_OFFSET SRNG_SRC_REG_OFFSET(BASE_LSB, R0) 114 #define SRNG_DST_R0_START_OFFSET SRNG_DST_REG_OFFSET(BASE_LSB, R0) 115 116 #define SRNG_SRC_R2_START_OFFSET SRNG_SRC_REG_OFFSET(HP, R2) 117 #define SRNG_DST_R2_START_OFFSET SRNG_DST_REG_OFFSET(HP, R2) 118 119 #define SRNG_SRC_START_OFFSET(_reg_group) \ 120 SRNG_SRC_ ## _reg_group ## _START_OFFSET 121 #define SRNG_DST_START_OFFSET(_reg_group) \ 122 SRNG_DST_ ## _reg_group ## _START_OFFSET 123 124 #define SRNG_REG_ADDR(_srng, _reg, _reg_group, _dir) \ 125 ((_srng)->hwreg_base[HWREG_INDEX(_reg_group)] + \ 126 SRNG_ ## _dir ## _REG_OFFSET(_reg, _reg_group) - \ 127 SRNG_ ## _dir ## _START_OFFSET(_reg_group)) 128 129 #define SRNG_DST_ADDR(_srng, _reg) \ 130 SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, DST) 131 132 #define SRNG_SRC_ADDR(_srng, _reg) \ 133 SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, SRC) 134 135 #define SRNG_REG_WRITE(_srng, _reg, _value, _dir) \ 136 hal_write_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value)) 137 138 #define SRNG_REG_READ(_srng, _reg, _dir) \ 139 hal_read_address_32_mb(_srng->hal_soc, SRNG_ ## _dir ## _ADDR(_srng, _reg)) 140 141 #define SRNG_SRC_REG_WRITE(_srng, _reg, _value) \ 142 SRNG_REG_WRITE(_srng, _reg, _value, SRC) 143 144 #define SRNG_DST_REG_WRITE(_srng, _reg, _value) \ 145 SRNG_REG_WRITE(_srng, _reg, _value, DST) 146 147 #define SRNG_SRC_REG_READ(_srng, _reg) \ 148 SRNG_REG_READ(_srng, _reg, SRC) 149 150 #define _SRNG_FM(_reg_fld) _reg_fld ## _BMSK 151 #define _SRNG_FS(_reg_fld) _reg_fld ## _SHFT 152 153 #define SRNG_SM(_reg_fld, _val) \ 154 (((_val) << _SRNG_FS(_reg_fld)) & _SRNG_FM(_reg_fld)) 155 156 #define SRNG_MS(_reg_fld, _val) \ 157 (((_val) & _SRNG_FM(_reg_fld)) >> _SRNG_FS(_reg_fld)) 158 159 #define SRNG_MAX_SIZE_DWORDS \ 160 (SRNG_MS(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 0xffffffff)) 161 162 /** 163 * HW ring configuration table to identify hardware ring attributes like 164 * register addresses, number of rings, ring entry size etc., for each type 165 * of SRNG ring. 166 * 167 * Currently there is just one HW ring table, but there could be multiple 168 * configurations in future based on HW variants from the same wifi3.0 family 169 * and hence need to be attached with hal_soc based on HW type 170 */ 171 #define HAL_SRNG_CONFIG(_hal_soc, _ring_type) (&hw_srng_table[_ring_type]) 172 static struct hal_hw_srng_config hw_srng_table[] = { 173 /* TODO: max_rings can populated by querying HW capabilities */ 174 { /* REO_DST */ 175 .start_ring_id = HAL_SRNG_REO2SW1, 176 .max_rings = 4, 177 .entry_size = sizeof(struct reo_destination_ring) >> 2, 178 .lmac_ring = FALSE, 179 .ring_dir = HAL_SRNG_DST_RING, 180 .reg_start = { 181 HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR( 182 SEQ_WCSS_UMAC_REO_REG_OFFSET), 183 HWIO_REO_R2_REO2SW1_RING_HP_ADDR( 184 SEQ_WCSS_UMAC_REO_REG_OFFSET) 185 }, 186 .reg_size = { 187 HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) - 188 HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0), 189 HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0) - 190 HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0), 191 }, 192 }, 193 { /* REO_EXCEPTION */ 194 /* Designating REO2TCL ring as exception ring. This ring is 195 * similar to other REO2SW rings though it is named as REO2TCL. 196 * Any of theREO2SW rings can be used as exception ring. 197 */ 198 .start_ring_id = HAL_SRNG_REO2TCL, 199 .max_rings = 1, 200 .entry_size = sizeof(struct reo_destination_ring) >> 2, 201 .lmac_ring = FALSE, 202 .ring_dir = HAL_SRNG_DST_RING, 203 .reg_start = { 204 HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR( 205 SEQ_WCSS_UMAC_REO_REG_OFFSET), 206 HWIO_REO_R2_REO2TCL_RING_HP_ADDR( 207 SEQ_WCSS_UMAC_REO_REG_OFFSET) 208 }, 209 /* Single ring - provide ring size if multiple rings of this 210 * type are supported */ 211 .reg_size = {}, 212 }, 213 { /* REO_REINJECT */ 214 .start_ring_id = HAL_SRNG_SW2REO, 215 .max_rings = 1, 216 .entry_size = sizeof(struct reo_entrance_ring) >> 2, 217 .lmac_ring = FALSE, 218 .ring_dir = HAL_SRNG_SRC_RING, 219 .reg_start = { 220 HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR( 221 SEQ_WCSS_UMAC_REO_REG_OFFSET), 222 HWIO_REO_R2_SW2REO_RING_HP_ADDR( 223 SEQ_WCSS_UMAC_REO_REG_OFFSET) 224 }, 225 /* Single ring - provide ring size if multiple rings of this 226 * type are supported */ 227 .reg_size = {}, 228 }, 229 { /* REO_CMD */ 230 .start_ring_id = HAL_SRNG_REO_CMD, 231 .max_rings = 1, 232 .entry_size = (sizeof(struct tlv_32_hdr) + 233 sizeof(struct reo_get_queue_stats)) >> 2, 234 .lmac_ring = FALSE, 235 .ring_dir = HAL_SRNG_SRC_RING, 236 .reg_start = { 237 HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR( 238 SEQ_WCSS_UMAC_REO_REG_OFFSET), 239 HWIO_REO_R2_REO_CMD_RING_HP_ADDR( 240 SEQ_WCSS_UMAC_REO_REG_OFFSET), 241 }, 242 /* Single ring - provide ring size if multiple rings of this 243 * type are supported */ 244 .reg_size = {}, 245 }, 246 { /* REO_STATUS */ 247 .start_ring_id = HAL_SRNG_REO_STATUS, 248 .max_rings = 1, 249 .entry_size = (sizeof(struct tlv_32_hdr) + 250 sizeof(struct reo_get_queue_stats_status)) >> 2, 251 .lmac_ring = FALSE, 252 .ring_dir = HAL_SRNG_DST_RING, 253 .reg_start = { 254 HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR( 255 SEQ_WCSS_UMAC_REO_REG_OFFSET), 256 HWIO_REO_R2_REO_STATUS_RING_HP_ADDR( 257 SEQ_WCSS_UMAC_REO_REG_OFFSET), 258 }, 259 /* Single ring - provide ring size if multiple rings of this 260 * type are supported */ 261 .reg_size = {}, 262 }, 263 { /* TCL_DATA */ 264 .start_ring_id = HAL_SRNG_SW2TCL1, 265 .max_rings = 3, 266 .entry_size = (sizeof(struct tlv_32_hdr) + 267 sizeof(struct tcl_data_cmd)) >> 2, 268 .lmac_ring = FALSE, 269 .ring_dir = HAL_SRNG_SRC_RING, 270 .reg_start = { 271 HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR( 272 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 273 HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR( 274 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 275 }, 276 .reg_size = { 277 HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) - 278 HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0), 279 HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) - 280 HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0), 281 }, 282 }, 283 { /* TCL_CMD */ 284 .start_ring_id = HAL_SRNG_SW2TCL_CMD, 285 .max_rings = 1, 286 .entry_size = (sizeof(struct tlv_32_hdr) + 287 sizeof(struct tcl_gse_cmd)) >> 2, 288 .lmac_ring = FALSE, 289 .ring_dir = HAL_SRNG_SRC_RING, 290 .reg_start = { 291 HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR( 292 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 293 HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR( 294 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 295 }, 296 /* Single ring - provide ring size if multiple rings of this 297 * type are supported */ 298 .reg_size = {}, 299 }, 300 { /* TCL_STATUS */ 301 .start_ring_id = HAL_SRNG_TCL_STATUS, 302 .max_rings = 1, 303 .entry_size = (sizeof(struct tlv_32_hdr) + 304 sizeof(struct tcl_status_ring)) >> 2, 305 .lmac_ring = FALSE, 306 .ring_dir = HAL_SRNG_DST_RING, 307 .reg_start = { 308 HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR( 309 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 310 HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR( 311 SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET), 312 }, 313 /* Single ring - provide ring size if multiple rings of this 314 * type are supported */ 315 .reg_size = {}, 316 }, 317 { /* CE_SRC */ 318 .start_ring_id = HAL_SRNG_CE_0_SRC, 319 .max_rings = 12, 320 .entry_size = sizeof(struct ce_src_desc) >> 2, 321 .lmac_ring = FALSE, 322 .ring_dir = HAL_SRNG_SRC_RING, 323 .reg_start = { 324 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( 325 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), 326 HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( 327 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET), 328 }, 329 .reg_size = { 330 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - 331 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, 332 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET - 333 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET, 334 }, 335 }, 336 { /* CE_DST */ 337 .start_ring_id = HAL_SRNG_CE_0_DST, 338 .max_rings = 12, 339 .entry_size = 8 >> 2, 340 /*TODO: entry_size above should actually be 341 * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition 342 * of struct ce_dst_desc in HW header files 343 */ 344 .lmac_ring = FALSE, 345 .ring_dir = HAL_SRNG_SRC_RING, 346 .reg_start = { 347 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR( 348 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 349 HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR( 350 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 351 }, 352 .reg_size = { 353 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 354 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 355 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 356 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 357 }, 358 }, 359 { /* CE_DST_STATUS */ 360 .start_ring_id = HAL_SRNG_CE_0_DST_STATUS, 361 .max_rings = 12, 362 .entry_size = sizeof(struct ce_stat_desc) >> 2, 363 .lmac_ring = FALSE, 364 .ring_dir = HAL_SRNG_DST_RING, 365 .reg_start = { 366 HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR( 367 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 368 HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR( 369 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET), 370 }, 371 /* TODO: check destination status ring registers */ 372 .reg_size = { 373 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 374 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 375 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET - 376 SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET, 377 }, 378 }, 379 { /* WBM_IDLE_LINK */ 380 .start_ring_id = HAL_SRNG_WBM_IDLE_LINK, 381 .max_rings = 1, 382 .entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2, 383 .lmac_ring = FALSE, 384 .ring_dir = HAL_SRNG_SRC_RING, 385 .reg_start = { 386 HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 387 HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 388 }, 389 /* Single ring - provide ring size if multiple rings of this 390 * type are supported */ 391 .reg_size = {}, 392 }, 393 { /* SW2WBM_RELEASE */ 394 .start_ring_id = HAL_SRNG_WBM_SW_RELEASE, 395 .max_rings = 1, 396 .entry_size = sizeof(struct wbm_release_ring) >> 2, 397 .lmac_ring = FALSE, 398 .ring_dir = HAL_SRNG_SRC_RING, 399 .reg_start = { 400 HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 401 HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 402 }, 403 /* Single ring - provide ring size if multiple rings of this 404 * type are supported */ 405 .reg_size = {}, 406 }, 407 { /* WBM2SW_RELEASE */ 408 .start_ring_id = HAL_SRNG_WBM2SW0_RELEASE, 409 .max_rings = 4, 410 .entry_size = sizeof(struct wbm_release_ring) >> 2, 411 .lmac_ring = FALSE, 412 .ring_dir = HAL_SRNG_DST_RING, 413 .reg_start = { 414 HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 415 HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 416 }, 417 .reg_size = { 418 HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - 419 HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 420 HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) - 421 HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET), 422 }, 423 }, 424 { /* RXDMA_BUF */ 425 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF, 426 .max_rings = 2, 427 /* TODO: Check if the additional IPA buffer ring needs to be 428 * setup here (in which case max_rings should be set to 2), 429 * or it will be setup by IPA host driver 430 */ 431 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 432 .lmac_ring = TRUE, 433 .ring_dir = HAL_SRNG_SRC_RING, 434 /* reg_start is not set because LMAC rings are not accessed 435 * from host 436 */ 437 .reg_start = {}, 438 .reg_size = {}, 439 }, 440 { /* RXDMA_DST */ 441 .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0, 442 .max_rings = 1, 443 .entry_size = sizeof(struct reo_entrance_ring) >> 2, 444 .lmac_ring = TRUE, 445 .ring_dir = HAL_SRNG_DST_RING, 446 /* reg_start is not set because LMAC rings are not accessed 447 * from host 448 */ 449 .reg_start = {}, 450 .reg_size = {}, 451 }, 452 { /* RXDMA_MONITOR_BUF */ 453 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA2_BUF, 454 .max_rings = 1, 455 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 456 .lmac_ring = TRUE, 457 .ring_dir = HAL_SRNG_SRC_RING, 458 /* reg_start is not set because LMAC rings are not accessed 459 * from host 460 */ 461 .reg_start = {}, 462 .reg_size = {}, 463 }, 464 { /* RXDMA_MONITOR_STATUS */ 465 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF, 466 .max_rings = 1, 467 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 468 .lmac_ring = TRUE, 469 .ring_dir = HAL_SRNG_SRC_RING, 470 /* reg_start is not set because LMAC rings are not accessed 471 * from host 472 */ 473 .reg_start = {}, 474 .reg_size = {}, 475 }, 476 { /* RXDMA_MONITOR_DST */ 477 .start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1, 478 .max_rings = 1, 479 .entry_size = sizeof(struct reo_entrance_ring) >> 2, 480 .lmac_ring = TRUE, 481 .ring_dir = HAL_SRNG_DST_RING, 482 /* reg_start is not set because LMAC rings are not accessed 483 * from host 484 */ 485 .reg_start = {}, 486 .reg_size = {}, 487 }, 488 { /* RXDMA_MONITOR_DESC */ 489 .start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC, 490 .max_rings = 1, 491 .entry_size = sizeof(struct wbm_buffer_ring) >> 2, 492 .lmac_ring = TRUE, 493 .ring_dir = HAL_SRNG_SRC_RING, 494 /* reg_start is not set because LMAC rings are not accessed 495 * from host 496 */ 497 .reg_start = {}, 498 .reg_size = {}, 499 }, 500 #ifdef WLAN_FEATURE_CIF_CFR 501 { /* WIFI_POS_SRC */ 502 .start_ring_id = HAL_SRNG_WIFI_POS_SRC_DMA_RING, 503 .max_rings = 1, 504 .entry_size = sizeof(wmi_oem_dma_buf_release_entry) >> 2, 505 .lmac_ring = TRUE, 506 .ring_dir = HAL_SRNG_SRC_RING, 507 /* reg_start is not set because LMAC rings are not accessed 508 * from host 509 */ 510 .reg_start = {}, 511 .reg_size = {}, 512 }, 513 #endif 514 }; 515 516 /** 517 * hal_get_srng_ring_id() - get the ring id of a descriped ring 518 * @hal: hal_soc data structure 519 * @ring_type: type enum describing the ring 520 * @ring_num: which ring of the ring type 521 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings) 522 * 523 * Return: the ring id or -EINVAL if the ring does not exist. 524 */ 525 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type, 526 int ring_num, int mac_id) 527 { 528 struct hal_hw_srng_config *ring_config = 529 HAL_SRNG_CONFIG(hal, ring_type); 530 int ring_id; 531 532 if (ring_num >= ring_config->max_rings) { 533 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 534 "%s: ring_num exceeded maximum no. of supported rings\n", 535 __func__); 536 return -EINVAL; 537 } 538 539 if (ring_config->lmac_ring) { 540 ring_id = ring_config->start_ring_id + ring_num + 541 (mac_id * HAL_MAX_RINGS_PER_LMAC); 542 } else { 543 ring_id = ring_config->start_ring_id + ring_num; 544 } 545 546 return ring_id; 547 } 548 549 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id) 550 { 551 /* TODO: Should we allocate srng structures dynamically? */ 552 return &(hal->srng_list[ring_id]); 553 } 554 555 #define HP_OFFSET_IN_REG_START 1 556 #define OFFSET_FROM_HP_TO_TP 4 557 static void hal_update_srng_hp_tp_address(void *hal_soc, 558 int shadow_config_index, 559 int ring_type, 560 int ring_num) 561 { 562 struct hal_srng *srng; 563 struct hal_soc *hal = (struct hal_soc *)hal_soc; 564 int ring_id; 565 566 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0); 567 if (ring_id < 0) 568 return; 569 570 srng = hal_get_srng(hal_soc, ring_id); 571 572 if (srng->ring_dir == HAL_SRNG_DST_RING) 573 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index) 574 + hal->dev_base_addr; 575 else 576 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index) 577 + hal->dev_base_addr; 578 } 579 580 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, 581 int ring_type, 582 int ring_num) 583 { 584 uint32_t target_register; 585 struct hal_soc *hal = (struct hal_soc *)hal_soc; 586 struct hal_hw_srng_config *srng_config = &hw_srng_table[ring_type]; 587 int shadow_config_index = hal->num_shadow_registers_configured; 588 589 if (shadow_config_index >= MAX_SHADOW_REGISTERS) { 590 QDF_ASSERT(0); 591 return QDF_STATUS_E_RESOURCES; 592 } 593 594 hal->num_shadow_registers_configured++; 595 596 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START]; 597 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START] 598 *ring_num); 599 600 /* if the ring is a dst ring, we need to shadow the tail pointer */ 601 if (srng_config->ring_dir == HAL_SRNG_DST_RING) 602 target_register += OFFSET_FROM_HP_TO_TP; 603 604 hal->shadow_config[shadow_config_index].addr = target_register; 605 606 /* update hp/tp addr in the hal_soc structure*/ 607 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type, 608 ring_num); 609 610 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 611 "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n", 612 __func__, target_register, shadow_config_index, 613 ring_type, ring_num); 614 615 return QDF_STATUS_SUCCESS; 616 } 617 618 QDF_STATUS hal_construct_shadow_config(void *hal_soc) 619 { 620 int ring_type, ring_num; 621 622 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) { 623 struct hal_hw_srng_config *srng_config = 624 &hw_srng_table[ring_type]; 625 626 if (ring_type == CE_SRC || 627 ring_type == CE_DST || 628 ring_type == CE_DST_STATUS) 629 continue; 630 631 if (srng_config->lmac_ring) 632 continue; 633 634 for (ring_num = 0; ring_num < srng_config->max_rings; 635 ring_num++) 636 hal_set_one_shadow_config(hal_soc, ring_type, ring_num); 637 } 638 639 return QDF_STATUS_SUCCESS; 640 } 641 642 void hal_get_shadow_config(void *hal_soc, 643 struct pld_shadow_reg_v2_cfg **shadow_config, 644 int *num_shadow_registers_configured) 645 { 646 struct hal_soc *hal = (struct hal_soc *)hal_soc; 647 648 *shadow_config = hal->shadow_config; 649 *num_shadow_registers_configured = 650 hal->num_shadow_registers_configured; 651 652 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 653 "%s\n", __func__); 654 } 655 656 657 static void hal_validate_shadow_register(struct hal_soc *hal, 658 uint32_t *destination, 659 uint32_t *shadow_address) 660 { 661 unsigned int index; 662 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr; 663 int destination_ba_offset = 664 ((char *)destination) - (char *)hal->dev_base_addr; 665 666 index = shadow_address - shadow_0_offset; 667 668 if (index > MAX_SHADOW_REGISTERS) { 669 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 670 "%s: index %x out of bounds\n", __func__, index); 671 goto error; 672 } else if (hal->shadow_config[index].addr != destination_ba_offset) { 673 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 674 "%s: sanity check failure, expected %x, found %x\n", 675 __func__, destination_ba_offset, 676 hal->shadow_config[index].addr); 677 goto error; 678 } 679 return; 680 error: 681 qdf_print("%s: baddr %p, desination %p, shadow_address %p s0offset %p index %x", 682 __func__, hal->dev_base_addr, destination, shadow_address, 683 shadow_0_offset, index); 684 QDF_BUG(0); 685 return; 686 } 687 688 static void hal_target_based_configure(struct hal_soc *hal) 689 { 690 struct hif_target_info *tgt_info = 691 hif_get_target_info_handle(hal->hif_handle); 692 693 switch (tgt_info->target_type) { 694 case TARGET_TYPE_QCA6290: 695 hal->use_register_windowing = true; 696 break; 697 default: 698 break; 699 } 700 } 701 702 /** 703 * hal_attach - Initalize HAL layer 704 * @hif_handle: Opaque HIF handle 705 * @qdf_dev: QDF device 706 * 707 * Return: Opaque HAL SOC handle 708 * NULL on failure (if given ring is not available) 709 * 710 * This function should be called as part of HIF initialization (for accessing 711 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 712 * 713 */ 714 void *hal_attach(void *hif_handle, qdf_device_t qdf_dev) 715 { 716 struct hal_soc *hal; 717 int i; 718 719 hal = qdf_mem_malloc(sizeof(*hal)); 720 721 if (!hal) { 722 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 723 "%s: hal_soc allocation failed\n", __func__); 724 goto fail0; 725 } 726 hal->hif_handle = hif_handle; 727 hal->dev_base_addr = hif_get_dev_ba(hif_handle); 728 hal->qdf_dev = qdf_dev; 729 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent( 730 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) * 731 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr)); 732 if (!hal->shadow_rdptr_mem_paddr) { 733 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 734 "%s: hal->shadow_rdptr_mem_paddr allocation failed\n", 735 __func__); 736 goto fail1; 737 } 738 739 hal->shadow_wrptr_mem_vaddr = 740 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, 741 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 742 &(hal->shadow_wrptr_mem_paddr)); 743 if (!hal->shadow_wrptr_mem_vaddr) { 744 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 745 "%s: hal->shadow_wrptr_mem_vaddr allocation failed\n", 746 __func__); 747 goto fail2; 748 } 749 750 for (i = 0; i < HAL_SRNG_ID_MAX; i++) { 751 hal->srng_list[i].initialized = 0; 752 hal->srng_list[i].ring_id = i; 753 } 754 755 qdf_spinlock_create(&hal->register_access_lock); 756 hal->register_window = 0; 757 758 hal_target_based_configure(hal); 759 760 return (void *)hal; 761 762 fail2: 763 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, 764 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 765 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 766 fail1: 767 qdf_mem_free(hal); 768 fail0: 769 return NULL; 770 } 771 772 /** 773 * hal_mem_info - Retreive hal memory base address 774 * 775 * @hal_soc: Opaque HAL SOC handle 776 * @mem: pointer to structure to be updated with hal mem info 777 */ 778 void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem ) 779 { 780 struct hal_soc *hal = (struct hal_soc *)hal_soc; 781 mem->dev_base_addr = (void *)hal->dev_base_addr; 782 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr; 783 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr; 784 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr; 785 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr; 786 hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr); 787 return; 788 } 789 790 /** 791 * hal_detach - Detach HAL layer 792 * @hal_soc: HAL SOC handle 793 * 794 * Return: Opaque HAL SOC handle 795 * NULL on failure (if given ring is not available) 796 * 797 * This function should be called as part of HIF initialization (for accessing 798 * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle() 799 * 800 */ 801 extern void hal_detach(void *hal_soc) 802 { 803 struct hal_soc *hal = (struct hal_soc *)hal_soc; 804 805 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 806 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX, 807 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0); 808 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev, 809 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS, 810 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0); 811 qdf_mem_free(hal); 812 813 return; 814 } 815 816 817 818 /** 819 * hal_srng_src_hw_init - Private function to initialize SRNG 820 * source ring HW 821 * @hal_soc: HAL SOC handle 822 * @srng: SRNG ring pointer 823 */ 824 static inline void hal_srng_src_hw_init(struct hal_soc *hal, 825 struct hal_srng *srng) 826 { 827 uint32_t reg_val = 0; 828 uint64_t tp_addr = 0; 829 830 HIF_INFO("%s: hw_init srng %d", __func__, srng->ring_id); 831 832 if (srng->flags & HAL_SRNG_MSI_INTR) { 833 SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB, 834 srng->msi_addr & 0xffffffff); 835 reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR), 836 (uint64_t)(srng->msi_addr) >> 32) | 837 SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, 838 MSI1_ENABLE), 1); 839 SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); 840 SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data); 841 } 842 843 HIF_INFO("%s: hw_init srng (msi_end) %d", __func__, srng->ring_id); 844 845 846 SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); 847 reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB), 848 ((uint64_t)(srng->ring_base_paddr) >> 32)) | 849 SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE), 850 srng->entry_size * srng->num_entries); 851 SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val); 852 853 #if defined(WCSS_VERSION) && \ 854 ((defined(CONFIG_WIN) && (WCSS_VERSION > 81)) || \ 855 (defined(CONFIG_MCL) && (WCSS_VERSION >= 72))) 856 reg_val = SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); 857 #else 858 reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) | 859 SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size); 860 #endif 861 SRNG_SRC_REG_WRITE(srng, ID, reg_val); 862 863 /** 864 * Interrupt setup: 865 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE 866 * if level mode is required 867 */ 868 reg_val = 0; 869 870 /* 871 * WAR - Hawkeye v1 has a hardware bug which requires timer value to be 872 * programmed in terms of 1us resolution instead of 8us resolution as 873 * given in MLD. 874 */ 875 if (srng->intr_timer_thres_us) { 876 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, 877 INTERRUPT_TIMER_THRESHOLD), 878 srng->intr_timer_thres_us); 879 /* For HK v2 this should be (srng->intr_timer_thres_us >> 3) */ 880 } 881 882 if (srng->intr_batch_cntr_thres_entries) { 883 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0, 884 BATCH_COUNTER_THRESHOLD), 885 srng->intr_batch_cntr_thres_entries * 886 srng->entry_size); 887 } 888 SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val); 889 890 if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) { 891 reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1, 892 LOW_THRESHOLD), srng->u.src_ring.low_threshold); 893 } 894 895 SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val); 896 897 /* As per HW team, TP_ADDR and HP_ADDR for Idle link ring should 898 * remain 0 to avoid some WBM stability issues. Remote head/tail 899 * pointers are not required since this ring is completly managed 900 * by WBM HW */ 901 if (srng->ring_id != HAL_SRNG_WBM_IDLE_LINK) { 902 tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + 903 ((unsigned long)(srng->u.src_ring.tp_addr) - 904 (unsigned long)(hal->shadow_rdptr_mem_vaddr))); 905 SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff); 906 SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32); 907 } 908 909 /* Initilaize head and tail pointers to indicate ring is empty */ 910 SRNG_SRC_REG_WRITE(srng, HP, 0); 911 SRNG_SRC_REG_WRITE(srng, TP, 0); 912 *(srng->u.src_ring.tp_addr) = 0; 913 914 reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? 915 SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | 916 ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? 917 SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | 918 ((srng->flags & HAL_SRNG_MSI_SWAP) ? 919 SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0); 920 921 /* Loop count is not used for SRC rings */ 922 reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1); 923 924 /* 925 * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); 926 * todo: update fw_api and replace with above line 927 * (when SRNG_ENABLE field for the MISC register is available in fw_api) 928 * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) 929 */ 930 reg_val |= 0x40; 931 932 SRNG_SRC_REG_WRITE(srng, MISC, reg_val); 933 934 } 935 936 /** 937 * hal_ce_dst_setup - Initialize CE destination ring registers 938 * @hal_soc: HAL SOC handle 939 * @srng: SRNG ring pointer 940 */ 941 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng, 942 int ring_num) 943 { 944 uint32_t reg_val = 0; 945 uint32_t reg_addr; 946 struct hal_hw_srng_config *ring_config = 947 HAL_SRNG_CONFIG(hal, CE_DST); 948 949 /* set DEST_MAX_LENGTH according to ce assignment */ 950 reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR( 951 ring_config->reg_start[R0_INDEX] + 952 (ring_num * ring_config->reg_size[R0_INDEX])); 953 954 reg_val = HAL_REG_READ(hal, reg_addr); 955 reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 956 reg_val |= srng->u.dst_ring.max_buffer_length & 957 HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK; 958 HAL_REG_WRITE(hal, reg_addr, reg_val); 959 } 960 961 /** 962 * hal_srng_dst_hw_init - Private function to initialize SRNG 963 * destination ring HW 964 * @hal_soc: HAL SOC handle 965 * @srng: SRNG ring pointer 966 */ 967 static inline void hal_srng_dst_hw_init(struct hal_soc *hal, 968 struct hal_srng *srng) 969 { 970 uint32_t reg_val = 0; 971 uint64_t hp_addr = 0; 972 973 HIF_INFO("%s: hw_init srng %d", __func__, srng->ring_id); 974 975 if (srng->flags & HAL_SRNG_MSI_INTR) { 976 SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB, 977 srng->msi_addr & 0xffffffff); 978 reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR), 979 (uint64_t)(srng->msi_addr) >> 32) | 980 SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, 981 MSI1_ENABLE), 1); 982 SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val); 983 SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data); 984 } 985 986 HIF_INFO("%s: hw_init srng msi end %d", __func__, srng->ring_id); 987 988 SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff); 989 reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB), 990 ((uint64_t)(srng->ring_base_paddr) >> 32)) | 991 SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE), 992 srng->entry_size * srng->num_entries); 993 SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val); 994 995 reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) | 996 SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size); 997 SRNG_DST_REG_WRITE(srng, ID, reg_val); 998 999 1000 /** 1001 * Interrupt setup: 1002 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE 1003 * if level mode is required 1004 */ 1005 reg_val = 0; 1006 if (srng->intr_timer_thres_us) { 1007 reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, 1008 INTERRUPT_TIMER_THRESHOLD), 1009 srng->intr_timer_thres_us >> 3); 1010 } 1011 1012 if (srng->intr_batch_cntr_thres_entries) { 1013 reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP, 1014 BATCH_COUNTER_THRESHOLD), 1015 srng->intr_batch_cntr_thres_entries * 1016 srng->entry_size); 1017 } 1018 1019 SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val); 1020 hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr + 1021 ((unsigned long)(srng->u.dst_ring.hp_addr) - 1022 (unsigned long)(hal->shadow_rdptr_mem_vaddr))); 1023 SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff); 1024 SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32); 1025 1026 /* Initilaize head and tail pointers to indicate ring is empty */ 1027 SRNG_DST_REG_WRITE(srng, HP, 0); 1028 SRNG_DST_REG_WRITE(srng, TP, 0); 1029 *(srng->u.dst_ring.hp_addr) = 0; 1030 1031 reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ? 1032 SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) | 1033 ((srng->flags & HAL_SRNG_RING_PTR_SWAP) ? 1034 SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) | 1035 ((srng->flags & HAL_SRNG_MSI_SWAP) ? 1036 SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0); 1037 1038 /* 1039 * reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, SRNG_ENABLE), 1); 1040 * todo: update fw_api and replace with above line 1041 * (when SRNG_ENABLE field for the MISC register is available in fw_api) 1042 * (WCSS_UMAC_CE_0_SRC_WFSS_CE_CHANNEL_SRC_R0_SRC_RING_MISC) 1043 */ 1044 reg_val |= 0x40; 1045 1046 SRNG_DST_REG_WRITE(srng, MISC, reg_val); 1047 1048 } 1049 1050 /** 1051 * hal_srng_hw_init - Private function to initialize SRNG HW 1052 * @hal_soc: HAL SOC handle 1053 * @srng: SRNG ring pointer 1054 */ 1055 static inline void hal_srng_hw_init(struct hal_soc *hal, 1056 struct hal_srng *srng) 1057 { 1058 if (srng->ring_dir == HAL_SRNG_SRC_RING) 1059 hal_srng_src_hw_init(hal, srng); 1060 else 1061 hal_srng_dst_hw_init(hal, srng); 1062 } 1063 1064 #ifdef CONFIG_SHADOW_V2 1065 #define ignore_shadow false 1066 #define CHECK_SHADOW_REGISTERS true 1067 #else 1068 #define ignore_shadow true 1069 #define CHECK_SHADOW_REGISTERS false 1070 #endif 1071 1072 /** 1073 * hal_srng_setup - Initalize HW SRNG ring. 1074 * @hal_soc: Opaque HAL SOC handle 1075 * @ring_type: one of the types from hal_ring_type 1076 * @ring_num: Ring number if there are multiple rings of same type (staring 1077 * from 0) 1078 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings 1079 * @ring_params: SRNG ring params in hal_srng_params structure. 1080 1081 * Callers are expected to allocate contiguous ring memory of size 1082 * 'num_entries * entry_size' bytes and pass the physical and virtual base 1083 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in 1084 * hal_srng_params structure. Ring base address should be 8 byte aligned 1085 * and size of each ring entry should be queried using the API 1086 * hal_srng_get_entrysize 1087 * 1088 * Return: Opaque pointer to ring on success 1089 * NULL on failure (if given ring is not available) 1090 */ 1091 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num, 1092 int mac_id, struct hal_srng_params *ring_params) 1093 { 1094 int ring_id; 1095 struct hal_soc *hal = (struct hal_soc *)hal_soc; 1096 struct hal_srng *srng; 1097 struct hal_hw_srng_config *ring_config = 1098 HAL_SRNG_CONFIG(hal, ring_type); 1099 void *dev_base_addr; 1100 int i; 1101 1102 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id); 1103 if (ring_id < 0) 1104 return NULL; 1105 1106 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1107 "%s: mac_id %d ring_id %d\n", 1108 __func__, mac_id, ring_id); 1109 1110 srng = hal_get_srng(hal_soc, ring_id); 1111 1112 if (srng->initialized) { 1113 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 1114 "%s: Ring (ring_type, ring_num) already initialized\n", 1115 __func__); 1116 return NULL; 1117 } 1118 1119 dev_base_addr = hal->dev_base_addr; 1120 srng->ring_id = ring_id; 1121 srng->ring_dir = ring_config->ring_dir; 1122 srng->ring_base_paddr = ring_params->ring_base_paddr; 1123 srng->ring_base_vaddr = ring_params->ring_base_vaddr; 1124 srng->entry_size = ring_config->entry_size; 1125 srng->num_entries = ring_params->num_entries; 1126 srng->ring_size = srng->num_entries * srng->entry_size; 1127 srng->ring_size_mask = srng->ring_size - 1; 1128 srng->msi_addr = ring_params->msi_addr; 1129 srng->msi_data = ring_params->msi_data; 1130 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us; 1131 srng->intr_batch_cntr_thres_entries = 1132 ring_params->intr_batch_cntr_thres_entries; 1133 srng->hal_soc = hal_soc; 1134 1135 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) { 1136 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i] 1137 + (ring_num * ring_config->reg_size[i]); 1138 } 1139 1140 /* Zero out the entire ring memory */ 1141 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size * 1142 srng->num_entries) << 2); 1143 1144 srng->flags = ring_params->flags; 1145 #ifdef BIG_ENDIAN_HOST 1146 /* TODO: See if we should we get these flags from caller */ 1147 srng->flags |= HAL_SRNG_DATA_TLV_SWAP; 1148 srng->flags |= HAL_SRNG_MSI_SWAP; 1149 srng->flags |= HAL_SRNG_RING_PTR_SWAP; 1150 #endif 1151 1152 if (srng->ring_dir == HAL_SRNG_SRC_RING) { 1153 srng->u.src_ring.hp = 0; 1154 srng->u.src_ring.reap_hp = srng->ring_size - 1155 srng->entry_size; 1156 srng->u.src_ring.tp_addr = 1157 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1158 srng->u.src_ring.low_threshold = 1159 ring_params->low_threshold * srng->entry_size; 1160 if (ring_config->lmac_ring) { 1161 /* For LMAC rings, head pointer updates will be done 1162 * through FW by writing to a shared memory location 1163 */ 1164 srng->u.src_ring.hp_addr = 1165 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1166 HAL_SRNG_LMAC1_ID_START]); 1167 srng->flags |= HAL_SRNG_LMAC_RING; 1168 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) { 1169 srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP); 1170 1171 if (CHECK_SHADOW_REGISTERS) { 1172 QDF_TRACE(QDF_MODULE_ID_TXRX, 1173 QDF_TRACE_LEVEL_ERROR, 1174 "%s: Ring (%d, %d) missing shadow config\n", 1175 __func__, ring_type, ring_num); 1176 } 1177 } else { 1178 hal_validate_shadow_register(hal, 1179 SRNG_SRC_ADDR(srng, HP), 1180 srng->u.src_ring.hp_addr); 1181 } 1182 } else { 1183 /* During initialization loop count in all the descriptors 1184 * will be set to zero, and HW will set it to 1 on completing 1185 * descriptor update in first loop, and increments it by 1 on 1186 * subsequent loops (loop count wraps around after reaching 1187 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1188 * loop count in descriptors updated by HW (to be processed 1189 * by SW). 1190 */ 1191 srng->u.dst_ring.loop_cnt = 1; 1192 srng->u.dst_ring.tp = 0; 1193 srng->u.dst_ring.hp_addr = 1194 &(hal->shadow_rdptr_mem_vaddr[ring_id]); 1195 if (ring_config->lmac_ring) { 1196 /* For LMAC rings, tail pointer updates will be done 1197 * through FW by writing to a shared memory location 1198 */ 1199 srng->u.dst_ring.tp_addr = 1200 &(hal->shadow_wrptr_mem_vaddr[ring_id - 1201 HAL_SRNG_LMAC1_ID_START]); 1202 srng->flags |= HAL_SRNG_LMAC_RING; 1203 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) { 1204 srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP); 1205 1206 if (CHECK_SHADOW_REGISTERS) { 1207 QDF_TRACE(QDF_MODULE_ID_TXRX, 1208 QDF_TRACE_LEVEL_ERROR, 1209 "%s: Ring (%d, %d) missing shadow config\n", 1210 __func__, ring_type, ring_num); 1211 } 1212 } else { 1213 hal_validate_shadow_register(hal, 1214 SRNG_DST_ADDR(srng, TP), 1215 srng->u.dst_ring.tp_addr); 1216 } 1217 } 1218 1219 if (!(ring_config->lmac_ring)) { 1220 hal_srng_hw_init(hal, srng); 1221 1222 if (ring_type == CE_DST) { 1223 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length; 1224 hal_ce_dst_setup(hal, srng, ring_num); 1225 } 1226 } 1227 1228 SRNG_LOCK_INIT(&srng->lock); 1229 1230 return (void *)srng; 1231 } 1232 1233 /** 1234 * hal_srng_cleanup - Deinitialize HW SRNG ring. 1235 * @hal_soc: Opaque HAL SOC handle 1236 * @hal_srng: Opaque HAL SRNG pointer 1237 */ 1238 void hal_srng_cleanup(void *hal_soc, void *hal_srng) 1239 { 1240 struct hal_srng *srng = (struct hal_srng *)hal_srng; 1241 SRNG_LOCK_DESTROY(&srng->lock); 1242 srng->initialized = 0; 1243 } 1244 1245 /** 1246 * hal_srng_get_entrysize - Returns size of ring entry in bytes 1247 * @hal_soc: Opaque HAL SOC handle 1248 * @ring_type: one of the types from hal_ring_type 1249 * 1250 */ 1251 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type) 1252 { 1253 struct hal_hw_srng_config *ring_config = 1254 HAL_SRNG_CONFIG(hal, ring_type); 1255 return ring_config->entry_size << 2; 1256 } 1257 1258 /** 1259 * hal_srng_max_entries - Returns maximum possible number of ring entries 1260 * @hal_soc: Opaque HAL SOC handle 1261 * @ring_type: one of the types from hal_ring_type 1262 * 1263 * Return: Maximum number of entries for the given ring_type 1264 */ 1265 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type) 1266 { 1267 struct hal_hw_srng_config *ring_config = HAL_SRNG_CONFIG(hal, ring_type); 1268 return SRNG_MAX_SIZE_DWORDS / ring_config->entry_size; 1269 } 1270 1271 /** 1272 * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL 1273 * 1274 * @hal_soc: Opaque HAL SOC handle 1275 * @hal_ring: Ring pointer (Source or Destination ring) 1276 * @ring_params: SRNG parameters will be returned through this structure 1277 */ 1278 extern void hal_get_srng_params(void *hal_soc, void *hal_ring, 1279 struct hal_srng_params *ring_params) 1280 { 1281 struct hal_srng *srng = (struct hal_srng *)hal_ring; 1282 int i =0; 1283 ring_params->ring_id = srng->ring_id; 1284 ring_params->ring_dir = srng->ring_dir; 1285 ring_params->entry_size = srng->entry_size; 1286 1287 ring_params->ring_base_paddr = srng->ring_base_paddr; 1288 ring_params->ring_base_vaddr = srng->ring_base_vaddr; 1289 ring_params->num_entries = srng->num_entries; 1290 ring_params->msi_addr = srng->msi_addr; 1291 ring_params->msi_data = srng->msi_data; 1292 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us; 1293 ring_params->intr_batch_cntr_thres_entries = 1294 srng->intr_batch_cntr_thres_entries; 1295 ring_params->low_threshold = srng->u.src_ring.low_threshold; 1296 ring_params->flags = srng->flags; 1297 ring_params->ring_id = srng->ring_id; 1298 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) 1299 ring_params->hwreg_base[i] = srng->hwreg_base[i]; 1300 } 1301