1 /* 2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /** 21 * DOC: if_snoc.c 22 * 23 * c file for snoc specific implementations. 24 */ 25 26 #include "hif.h" 27 #include "hif_main.h" 28 #include "hif_debug.h" 29 #include "hif_io32.h" 30 #include "ce_main.h" 31 #include "ce_tasklet.h" 32 #include "ce_api.h" 33 #include "ce_internal.h" 34 #include "snoc_api.h" 35 #include "pld_common.h" 36 #include "qdf_util.h" 37 #ifdef IPA_OFFLOAD 38 #include <uapi/linux/msm_ipa.h> 39 #endif 40 #include "target_type.h" 41 42 /** 43 * hif_snoc_disable_isr(): disable isr 44 * @scn: struct hif_softc 45 * 46 * This function disables isr and kills tasklets 47 * 48 * Return: void 49 */ 50 void hif_snoc_disable_isr(struct hif_softc *scn) 51 { 52 hif_exec_kill(&scn->osc); 53 hif_nointrs(scn); 54 ce_tasklet_kill(scn); 55 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 56 qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0); 57 } 58 59 /** 60 * hif_snoc_dump_registers(): dump bus debug registers 61 * @hif_ctx: struct hif_opaque_softc 62 * 63 * This function dumps hif bus debug registers 64 * 65 * Return: 0 for success or error code 66 */ 67 int hif_snoc_dump_registers(struct hif_softc *hif_ctx) 68 { 69 int status; 70 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 71 72 status = hif_dump_ce_registers(scn); 73 if (status) 74 hif_err("Dump CE Registers Failed"); 75 76 return 0; 77 } 78 79 void hif_snoc_display_stats(struct hif_softc *hif_ctx) 80 { 81 if (!hif_ctx) { 82 hif_err("hif_ctx null"); 83 return; 84 } 85 hif_display_ce_stats(hif_ctx); 86 } 87 88 void hif_snoc_clear_stats(struct hif_softc *hif_ctx) 89 { 90 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 91 92 if (!hif_state) { 93 hif_err("hif_ctx null"); 94 return; 95 } 96 hif_clear_ce_stats(hif_state); 97 } 98 99 /** 100 * hif_snoc_close(): hif_bus_close 101 * @scn: pointer to the hif context. 102 * 103 * Return: n/a 104 */ 105 void hif_snoc_close(struct hif_softc *scn) 106 { 107 hif_ce_close(scn); 108 } 109 110 /** 111 * hif_snoc_open(): hif_bus_open 112 * @hif_ctx: hif context 113 * @bus_type: bus type 114 * 115 * Return: QDF_STATUS 116 */ 117 QDF_STATUS hif_snoc_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type) 118 { 119 return hif_ce_open(hif_ctx); 120 } 121 122 /** 123 * hif_snoc_get_soc_info() - populates scn with hw info 124 * @scn: pointer to the hif context. 125 * 126 * fills in the virtual and physical base address as well as 127 * soc version info. 128 * 129 * Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_FAILURE 130 */ 131 static QDF_STATUS hif_snoc_get_soc_info(struct hif_softc *scn) 132 { 133 int ret; 134 struct pld_soc_info soc_info; 135 136 qdf_mem_zero(&soc_info, sizeof(soc_info)); 137 138 ret = pld_get_soc_info(scn->qdf_dev->dev, &soc_info); 139 if (ret < 0) { 140 hif_err("pld_get_soc_info error = %d", ret); 141 return QDF_STATUS_E_FAILURE; 142 } 143 144 scn->mem = soc_info.v_addr; 145 scn->mem_pa = soc_info.p_addr; 146 147 scn->target_info.soc_version = soc_info.soc_id; 148 scn->target_info.target_version = soc_info.soc_id; 149 scn->target_info.target_revision = 0; 150 return QDF_STATUS_SUCCESS; 151 } 152 153 /** 154 * hif_snoc_bus_configure() - configure the snoc bus 155 * @scn: pointer to the hif context. 156 * 157 * return: 0 for success. nonzero for failure. 158 */ 159 int hif_snoc_bus_configure(struct hif_softc *scn) 160 { 161 int ret; 162 uint8_t wake_ce_id; 163 164 ret = hif_snoc_get_soc_info(scn); 165 if (ret) 166 return ret; 167 168 hif_ce_prepare_config(scn); 169 170 ret = hif_wlan_enable(scn); 171 if (ret) { 172 hif_err("hif_wlan_enable error = %d", ret); 173 return ret; 174 } 175 176 ret = hif_config_ce(scn); 177 if (ret) 178 goto wlan_disable; 179 180 ret = hif_get_wake_ce_id(scn, &wake_ce_id); 181 if (ret) 182 goto unconfig_ce; 183 184 scn->wake_irq = pld_get_irq(scn->qdf_dev->dev, wake_ce_id); 185 scn->wake_irq_type = HIF_PM_CE_WAKE; 186 187 hif_info("expecting wake from ce %d, irq %d", 188 wake_ce_id, scn->wake_irq); 189 190 return 0; 191 192 unconfig_ce: 193 hif_unconfig_ce(scn); 194 195 wlan_disable: 196 hif_wlan_disable(scn); 197 198 return ret; 199 } 200 201 /** 202 * hif_snoc_get_target_type(): Get the target type 203 * 204 * This function is used to query the target type. 205 * 206 * @ol_sc: hif_softc struct pointer 207 * @dev: device pointer 208 * @bdev: bus dev pointer 209 * @bid: bus id pointer 210 * @hif_type: HIF type such as HIF_TYPE_QCA6180 211 * @target_type: target type such as TARGET_TYPE_QCA6180 212 * 213 * Return: 0 for success 214 */ 215 static inline int hif_snoc_get_target_type(struct hif_softc *ol_sc, 216 struct device *dev, void *bdev, const struct hif_bus_id *bid, 217 uint32_t *hif_type, uint32_t *target_type) 218 { 219 /* TODO: need to use HW version. Hard code for now */ 220 #ifdef QCA_WIFI_3_0_ADRASTEA 221 *hif_type = HIF_TYPE_ADRASTEA; 222 *target_type = TARGET_TYPE_ADRASTEA; 223 #else 224 *hif_type = 0; 225 *target_type = 0; 226 #endif 227 return 0; 228 } 229 230 #ifdef IPA_OFFLOAD 231 static int hif_set_dma_coherent_mask(qdf_device_t osdev) 232 { 233 uint8_t addr_bits; 234 235 if (false == hif_get_ipa_present()) 236 return qdf_set_dma_coherent_mask(osdev->dev, 237 DMA_COHERENT_MASK_DEFAULT); 238 239 if (hif_get_ipa_hw_type() < IPA_HW_v3_0) 240 addr_bits = DMA_COHERENT_MASK_BELOW_IPA_VER_3; 241 else 242 addr_bits = DMA_COHERENT_MASK_DEFAULT; 243 244 return qdf_set_dma_coherent_mask(osdev->dev, addr_bits); 245 } 246 #else 247 static int hif_set_dma_coherent_mask(qdf_device_t osdev) 248 { 249 return qdf_set_dma_coherent_mask(osdev->dev, 250 DMA_COHERENT_MASK_DEFAULT); 251 } 252 #endif 253 254 /** 255 * hif_snoc_enable_bus(): hif_enable_bus 256 * @ol_sc: HIF context 257 * @dev: dev 258 * @bdev: bus dev 259 * @bid: bus id 260 * @type: bus type 261 * 262 * Return: QDF_STATUS 263 */ 264 QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc, 265 struct device *dev, void *bdev, 266 const struct hif_bus_id *bid, 267 enum hif_enable_type type) 268 { 269 int ret; 270 int hif_type; 271 int target_type; 272 273 if (!ol_sc) { 274 hif_err("hif_ctx is NULL"); 275 return QDF_STATUS_E_NOMEM; 276 } 277 278 ret = hif_set_dma_coherent_mask(ol_sc->qdf_dev); 279 if (ret) { 280 hif_err("Failed to set dma mask error = %d", ret); 281 return qdf_status_from_os_return(ret); 282 } 283 284 ret = qdf_device_init_wakeup(ol_sc->qdf_dev, true); 285 if (ret == -EEXIST) 286 hif_warn("device_init_wakeup already done"); 287 else if (ret) { 288 hif_err("device_init_wakeup: err= %d", ret); 289 return qdf_status_from_os_return(ret); 290 } 291 292 ret = hif_snoc_get_target_type(ol_sc, dev, bdev, bid, 293 &hif_type, &target_type); 294 if (ret < 0) { 295 hif_err("Invalid device id/revision_id"); 296 return QDF_STATUS_E_FAILURE; 297 } 298 299 ol_sc->target_info.target_type = target_type; 300 301 hif_register_tbl_attach(ol_sc, hif_type); 302 hif_target_register_tbl_attach(ol_sc, target_type); 303 304 /* the bus should remain on during suspend for snoc */ 305 hif_vote_link_up(GET_HIF_OPAQUE_HDL(ol_sc)); 306 307 hif_debug("X - hif_type = 0x%x, target_type = 0x%x", 308 hif_type, target_type); 309 310 return QDF_STATUS_SUCCESS; 311 } 312 313 /** 314 * hif_snoc_disable_bus(): hif_disable_bus 315 * @scn: HIF context 316 * 317 * This function disables the bus 318 * 319 * Return: none 320 */ 321 void hif_snoc_disable_bus(struct hif_softc *scn) 322 { 323 int ret; 324 325 hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn)); 326 327 ret = qdf_device_init_wakeup(scn->qdf_dev, false); 328 if (ret) 329 hif_err("device_init_wakeup: err %d", ret); 330 } 331 332 /** 333 * hif_snoc_nointrs(): disable IRQ 334 * @scn: struct hif_softc 335 * 336 * This function stops interrupt(s) 337 * 338 * Return: none 339 */ 340 void hif_snoc_nointrs(struct hif_softc *scn) 341 { 342 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 343 344 scn->free_irq_done = true; 345 ce_unregister_irq(hif_state, CE_ALL_BITMAP); 346 } 347 348 /** 349 * hif_snoc_irq_enable() - enable copy engine IRQ 350 * @scn: struct hif_softc 351 * @ce_id: ce_id 352 * 353 * Return: N/A 354 */ 355 void hif_snoc_irq_enable(struct hif_softc *scn, 356 int ce_id) 357 { 358 ce_enable_irq_in_individual_register(scn, ce_id); 359 } 360 361 /** 362 * hif_snoc_irq_disable() - disable copy engine IRQ 363 * @scn: struct hif_softc 364 * @ce_id: ce_id 365 * 366 * Return: N/A 367 */ 368 void hif_snoc_irq_disable(struct hif_softc *scn, int ce_id) 369 { 370 ce_disable_irq_in_individual_register(scn, ce_id); 371 } 372 373 /** 374 * hif_snoc_setup_wakeup_sources() - enable/disable irq wake on correct irqs 375 * @scn: hif context 376 * @enable: true to enable 377 * 378 * Firmware will send a wakeup request to the HTC_CTRL_RSVD_SVC when waking up 379 * the host driver. Ensure that the copy complete interrupt from this copy 380 * engine can wake up the apps processor. 381 * 382 * Return: 0 for success 383 */ 384 static 385 QDF_STATUS hif_snoc_setup_wakeup_sources(struct hif_softc *scn, bool enable) 386 { 387 int ret; 388 389 if (enable) 390 ret = enable_irq_wake(scn->wake_irq); 391 else 392 ret = disable_irq_wake(scn->wake_irq); 393 394 if (ret) { 395 hif_err("Fail to setup wake IRQ!"); 396 return QDF_STATUS_E_RESOURCES; 397 } 398 399 return QDF_STATUS_SUCCESS; 400 } 401 402 /** 403 * hif_snoc_bus_suspend() - prepare to suspend the bus 404 * @scn: hif context 405 * 406 * Setup wakeup interrupt configuration. 407 * Disable CE interrupts (wakeup interrupt will still wake apps) 408 * Drain tasklets. - make sure that we don't suspend while processing 409 * the wakeup message. 410 * 411 * Return: 0 on success. 412 */ 413 int hif_snoc_bus_suspend(struct hif_softc *scn) 414 { 415 if (hif_snoc_setup_wakeup_sources(scn, true) != QDF_STATUS_SUCCESS) 416 return -EFAULT; 417 return 0; 418 } 419 420 /** 421 * hif_snoc_bus_resume() - snoc bus resume function 422 * @scn: hif context 423 * 424 * Clear wakeup interrupt configuration. 425 * Re-enable ce interrupts 426 * 427 * Return: 0 on success 428 */ 429 int hif_snoc_bus_resume(struct hif_softc *scn) 430 { 431 if (hif_snoc_setup_wakeup_sources(scn, false) != QDF_STATUS_SUCCESS) 432 QDF_BUG(0); 433 434 return 0; 435 } 436 437 /** 438 * hif_snoc_bus_suspend_noirq() - ensure there are no pending transactions 439 * @scn: hif context 440 * 441 * Ensure that if we received the wakeup message before the irq 442 * was disabled that the message is processed before suspending. 443 * 444 * Return: -EBUSY if we fail to flush the tasklets. 445 */ 446 int hif_snoc_bus_suspend_noirq(struct hif_softc *scn) 447 { 448 if (hif_drain_tasklets(scn) != 0) 449 return -EBUSY; 450 return 0; 451 } 452 453 int hif_snoc_map_ce_to_irq(struct hif_softc *scn, int ce_id) 454 { 455 return pld_get_irq(scn->qdf_dev->dev, ce_id); 456 } 457 458 /** 459 * hif_is_target_register_access_allowed(): Check target register access allow 460 * @scn: HIF Context 461 * 462 * This function help to check whether target register access is allowed or not 463 * 464 * Return: true if target access is allowed else false 465 */ 466 bool hif_is_target_register_access_allowed(struct hif_softc *scn) 467 { 468 if (hif_is_recovery_in_progress(scn)) 469 return hif_is_target_ready(scn); 470 else 471 return true; 472 } 473 474 /** 475 * hif_snoc_needs_bmi() - return true if the soc needs bmi through the driver 476 * @scn: hif context 477 * 478 * Return: true if soc needs driver bmi otherwise false 479 */ 480 bool hif_snoc_needs_bmi(struct hif_softc *scn) 481 { 482 return false; 483 } 484 485 #ifdef FEATURE_ENABLE_CE_DP_IRQ_AFFINE 486 static void hif_snoc_ce_dp_irq_set_affinity_hint(struct hif_softc *scn) 487 { 488 int ret, irq; 489 unsigned int cpus; 490 struct CE_state *ce_state; 491 int ce_id; 492 qdf_cpu_mask ce_cpu_mask, updated_mask; 493 int perf_cpu_cluster = hif_get_perf_cluster_bitmap(); 494 int package_id; 495 496 qdf_cpumask_clear(&ce_cpu_mask); 497 498 qdf_for_each_online_cpu(cpus) { 499 package_id = qdf_topology_physical_package_id(cpus); 500 if (package_id >= 0 && BIT(package_id) & perf_cpu_cluster) 501 qdf_cpumask_set_cpu(cpus, &ce_cpu_mask); 502 } 503 504 if (qdf_cpumask_empty(&ce_cpu_mask)) { 505 hif_err_rl("Empty cpu_mask, unable to set CE DP IRQ affinity"); 506 return; 507 } 508 509 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 510 ce_state = scn->ce_id_to_state[ce_id]; 511 if (!ce_state || !ce_state->htt_rx_data) 512 continue; 513 514 qdf_cpumask_copy(&updated_mask, &ce_cpu_mask); 515 irq = pld_get_irq(scn->qdf_dev->dev, ce_id); 516 ret = hif_affinity_mgr_set_ce_irq_affinity(scn, irq, ce_id, 517 &updated_mask); 518 if (ret) 519 hif_err_rl("Set affinity %*pbl fails for CE IRQ %d", 520 qdf_cpumask_pr_args(&updated_mask), irq); 521 else 522 hif_debug_rl("Set affinity %*pbl for CE IRQ: %d", 523 qdf_cpumask_pr_args(&updated_mask), irq); 524 } 525 } 526 527 void hif_snoc_configure_irq_affinity(struct hif_softc *scn) 528 { 529 if (scn->hif_config.enable_ce_dp_irq_affine) 530 hif_snoc_ce_dp_irq_set_affinity_hint(scn); 531 } 532 #endif 533