1 /* 2 * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 #include "targcfg.h" 29 #include "qdf_lock.h" 30 #include "qdf_status.h" 31 #include "qdf_status.h" 32 #include <qdf_atomic.h> /* qdf_atomic_read */ 33 #include <targaddrs.h> 34 #include "hif_io32.h" 35 #include <hif.h> 36 #include "regtable.h" 37 #include <a_debug.h> 38 #include "hif_main.h" 39 #include "ce_api.h" 40 #include "qdf_trace.h" 41 #include "hif_debug.h" 42 43 void 44 hif_ce_dump_target_memory(struct hif_softc *scn, void *ramdump_base, 45 uint32_t address, uint32_t size) 46 { 47 uint32_t loc = address; 48 uint32_t val = 0; 49 uint32_t j = 0; 50 u8 *temp = ramdump_base; 51 52 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 53 return; 54 55 while (j < size) { 56 val = hif_read32_mb(scn->mem + loc + j); 57 qdf_mem_copy(temp, &val, 4); 58 j += 4; 59 temp += 4; 60 } 61 62 Q_TARGET_ACCESS_END(scn); 63 } 64 /* 65 * TBDXXX: Should be a function call specific to each Target-type. 66 * This convoluted macro converts from Target CPU Virtual Address 67 * Space to CE Address Space. As part of this process, we 68 * conservatively fetch the current PCIE_BAR. MOST of the time, 69 * this should match the upper bits of PCI space for this device; 70 * but that's not guaranteed. 71 */ 72 #ifdef QCA_WIFI_3_0 73 #define TARG_CPU_SPACE_TO_CE_SPACE(pci_addr, addr) \ 74 (scn->mem_pa + addr) 75 #else 76 #define TARG_CPU_SPACE_TO_CE_SPACE(pci_addr, addr) \ 77 (((hif_read32_mb((pci_addr) + \ 78 (SOC_CORE_BASE_ADDRESS|CORE_CTRL_ADDRESS)) & 0x7ff) << 21) \ 79 | 0x100000 | ((addr) & 0xfffff)) 80 #endif 81 82 #define TARG_CPU_SPACE_TO_CE_SPACE_IPQ4019(pci_addr, addr) \ 83 (hif_read32_mb((pci_addr)+(WIFICMN_PCIE_BAR_REG_ADDRESS)) \ 84 | ((addr) & 0xfffff)) 85 86 #define TARG_CPU_SPACE_TO_CE_SPACE_AR900B(pci_addr, addr) \ 87 (hif_read32_mb((pci_addr)+(WIFICMN_PCIE_BAR_REG_ADDRESS)) \ 88 | 0x100000 | ((addr) & 0xfffff)) 89 90 #define SRAM_BASE_ADDRESS 0xc0000 91 #define SRAM_END_ADDRESS 0x100000 92 #define WIFI0_IPQ4019_BAR 0xa000000 93 #define WIFI1_IPQ4019_BAR 0xa800000 94 95 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ 96 #define DIAG_ACCESS_CE_TIMEOUT_MS 10 97 98 /** 99 * get_ce_phy_addr() - get the physical address of an soc virtual address 100 * @sc: hif context 101 * @address: soc virtual address 102 * @target_type: target type being used. 103 * 104 * Return: soc physical address 105 */ 106 static qdf_dma_addr_t get_ce_phy_addr(struct hif_softc *sc, uint32_t address, 107 unsigned int target_type) 108 { 109 qdf_dma_addr_t ce_phy_addr; 110 struct hif_softc *scn = sc; 111 unsigned int region = address & 0xfffff; 112 unsigned int bar = address & 0xfff00000; 113 unsigned int sramregion = 0; 114 115 if ((target_type == TARGET_TYPE_IPQ4019) && 116 (region >= SRAM_BASE_ADDRESS && region <= SRAM_END_ADDRESS) 117 && (bar == WIFI0_IPQ4019_BAR || 118 bar == WIFI1_IPQ4019_BAR || bar == 0)) { 119 sramregion = 1; 120 } 121 122 if ((target_type == TARGET_TYPE_IPQ4019) && sramregion == 1) { 123 ce_phy_addr = 124 TARG_CPU_SPACE_TO_CE_SPACE_IPQ4019(sc->mem, address); 125 } else if ((target_type == TARGET_TYPE_AR900B) || 126 (target_type == TARGET_TYPE_QCA9984) || 127 (target_type == TARGET_TYPE_IPQ4019) || 128 (target_type == TARGET_TYPE_QCA9888)) { 129 ce_phy_addr = 130 TARG_CPU_SPACE_TO_CE_SPACE_AR900B(sc->mem, address); 131 } else { 132 ce_phy_addr = 133 TARG_CPU_SPACE_TO_CE_SPACE(sc->mem, address); 134 } 135 136 return ce_phy_addr; 137 } 138 139 /* 140 * Diagnostic read/write access is provided for startup/config/debug usage. 141 * Caller must guarantee proper alignment, when applicable, and single user 142 * at any moment. 143 */ 144 145 #define FW_SRAM_ADDRESS 0x000C0000 146 147 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, 148 uint32_t address, uint8_t *data, int nbytes) 149 { 150 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 151 152 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 153 QDF_STATUS status = QDF_STATUS_SUCCESS; 154 qdf_dma_addr_t buf; 155 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 156 unsigned int id; 157 unsigned int flags; 158 struct CE_handle *ce_diag; 159 qdf_dma_addr_t CE_data; /* Host buffer address in CE space */ 160 qdf_dma_addr_t CE_data_base = 0; 161 void *data_buf = NULL; 162 int i; 163 unsigned int mux_id = 0; 164 unsigned int transaction_id = 0xffff; 165 qdf_dma_addr_t ce_phy_addr = address; 166 unsigned int toeplitz_hash_result; 167 unsigned int user_flags = 0; 168 unsigned int target_type = 0; 169 unsigned int boundary_addr = 0; 170 171 ce_diag = hif_state->ce_diag; 172 if (ce_diag == NULL) { 173 HIF_ERROR("%s: DIAG CE not present", __func__); 174 return QDF_STATUS_E_INVAL; 175 } 176 177 transaction_id = (mux_id & MUX_ID_MASK) | 178 (transaction_id & TRANSACTION_ID_MASK); 179 #ifdef QCA_WIFI_3_0 180 user_flags &= DESC_DATA_FLAG_MASK; 181 #endif 182 target_type = (hif_get_target_info_handle(hif_ctx))->target_type; 183 184 /* This code cannot handle reads to non-memory space. Redirect to the 185 * register read fn but preserve the multi word read capability of 186 * this fn 187 */ 188 if ((target_type == TARGET_TYPE_IPQ4019) || 189 (target_type == TARGET_TYPE_AR900B) || 190 (target_type == TARGET_TYPE_QCA9984) || 191 (target_type == TARGET_TYPE_AR9888) || 192 (target_type == TARGET_TYPE_QCA9888)) 193 boundary_addr = FW_SRAM_ADDRESS; 194 else 195 boundary_addr = DRAM_BASE_ADDRESS; 196 197 if (address < boundary_addr) { 198 199 if ((address & 0x3) || ((uintptr_t) data & 0x3)) 200 return QDF_STATUS_E_INVAL; 201 202 while ((nbytes >= 4) && 203 (QDF_STATUS_SUCCESS == (status = 204 hif_diag_read_access(hif_ctx, address, 205 (uint32_t *)data)))) { 206 207 nbytes -= sizeof(uint32_t); 208 address += sizeof(uint32_t); 209 data += sizeof(uint32_t); 210 211 } 212 213 return status; 214 } 215 216 A_TARGET_ACCESS_LIKELY(scn); 217 218 /* 219 * Allocate a temporary bounce buffer to hold caller's data 220 * to be DMA'ed from Target. This guarantees 221 * 1) 4-byte alignment 222 * 2) Buffer in DMA-able space 223 */ 224 orig_nbytes = nbytes; 225 data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 226 orig_nbytes, &CE_data_base); 227 if (!data_buf) { 228 status = QDF_STATUS_E_NOMEM; 229 goto done; 230 } 231 qdf_mem_set(data_buf, orig_nbytes, 0); 232 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base, 233 orig_nbytes, DMA_FROM_DEVICE); 234 235 remaining_bytes = orig_nbytes; 236 CE_data = CE_data_base; 237 while (remaining_bytes) { 238 nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT); 239 { 240 status = ce_recv_buf_enqueue(ce_diag, NULL, CE_data); 241 if (status != QDF_STATUS_SUCCESS) 242 goto done; 243 } 244 245 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 246 status = QDF_STATUS_E_FAILURE; 247 goto done; 248 } 249 250 /* convert soc virtual address to physical address */ 251 ce_phy_addr = get_ce_phy_addr(scn, address, target_type); 252 253 if (Q_TARGET_ACCESS_END(scn) < 0) { 254 status = QDF_STATUS_E_FAILURE; 255 goto done; 256 } 257 258 /* Request CE to send from Target(!) 259 * address to Host buffer 260 */ 261 status = ce_send(ce_diag, NULL, ce_phy_addr, nbytes, 262 transaction_id, 0, user_flags); 263 if (status != QDF_STATUS_SUCCESS) 264 goto done; 265 266 i = 0; 267 while (ce_completed_send_next(ce_diag, NULL, NULL, &buf, 268 &completed_nbytes, &id, NULL, NULL, 269 &toeplitz_hash_result) != QDF_STATUS_SUCCESS) { 270 qdf_mdelay(1); 271 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 272 status = QDF_STATUS_E_BUSY; 273 goto done; 274 } 275 } 276 if (nbytes != completed_nbytes) { 277 status = QDF_STATUS_E_FAILURE; 278 goto done; 279 } 280 if (buf != ce_phy_addr) { 281 status = QDF_STATUS_E_FAILURE; 282 goto done; 283 } 284 285 i = 0; 286 while (ce_completed_recv_next 287 (ce_diag, NULL, NULL, &buf, 288 &completed_nbytes, &id, 289 &flags) != QDF_STATUS_SUCCESS) { 290 qdf_mdelay(1); 291 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 292 status = QDF_STATUS_E_BUSY; 293 goto done; 294 } 295 } 296 if (nbytes != completed_nbytes) { 297 status = QDF_STATUS_E_FAILURE; 298 goto done; 299 } 300 if (buf != CE_data) { 301 status = QDF_STATUS_E_FAILURE; 302 goto done; 303 } 304 305 remaining_bytes -= nbytes; 306 address += nbytes; 307 CE_data += nbytes; 308 } 309 310 done: 311 A_TARGET_ACCESS_UNLIKELY(scn); 312 313 if (status == QDF_STATUS_SUCCESS) 314 qdf_mem_copy(data, data_buf, orig_nbytes); 315 else 316 HIF_ERROR("%s failure (0x%x)", __func__, address); 317 318 if (data_buf) 319 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 320 orig_nbytes, data_buf, CE_data_base, 0); 321 322 return status; 323 } 324 325 /* Read 4-byte aligned data from Target memory or register */ 326 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx, 327 uint32_t address, uint32_t *data) 328 { 329 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 330 331 if (address >= DRAM_BASE_ADDRESS) { 332 /* Assume range doesn't cross this boundary */ 333 return hif_diag_read_mem(hif_ctx, address, (uint8_t *) data, 334 sizeof(uint32_t)); 335 } else { 336 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 337 return QDF_STATUS_E_FAILURE; 338 *data = A_TARGET_READ(scn, address); 339 if (Q_TARGET_ACCESS_END(scn) < 0) 340 return QDF_STATUS_E_FAILURE; 341 342 return QDF_STATUS_SUCCESS; 343 } 344 } 345 346 /** 347 * hif_diag_write_mem() - write data into the soc memory 348 * @hif_ctx: hif context 349 * @address: soc virtual address 350 * @data: data to copy into the soc address 351 * @nbytes: number of bytes to coppy 352 */ 353 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx, 354 uint32_t address, uint8_t *data, int nbytes) 355 { 356 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 357 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 358 QDF_STATUS status = QDF_STATUS_SUCCESS; 359 qdf_dma_addr_t buf; 360 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 361 unsigned int id; 362 unsigned int flags; 363 struct CE_handle *ce_diag; 364 void *data_buf = NULL; 365 qdf_dma_addr_t CE_data; /* Host buffer address in CE space */ 366 qdf_dma_addr_t CE_data_base = 0; 367 int i; 368 unsigned int mux_id = 0; 369 unsigned int transaction_id = 0xffff; 370 qdf_dma_addr_t ce_phy_addr = address; 371 unsigned int toeplitz_hash_result; 372 unsigned int user_flags = 0; 373 unsigned int target_type = 0; 374 375 ce_diag = hif_state->ce_diag; 376 if (ce_diag == NULL) { 377 HIF_ERROR("%s: DIAG CE not present", __func__); 378 return QDF_STATUS_E_INVAL; 379 } 380 381 transaction_id = (mux_id & MUX_ID_MASK) | 382 (transaction_id & TRANSACTION_ID_MASK); 383 #ifdef QCA_WIFI_3_0 384 user_flags &= DESC_DATA_FLAG_MASK; 385 #endif 386 387 A_TARGET_ACCESS_LIKELY(scn); 388 389 /* 390 * Allocate a temporary bounce buffer to hold caller's data 391 * to be DMA'ed to Target. This guarantees 392 * 1) 4-byte alignment 393 * 2) Buffer in DMA-able space 394 */ 395 orig_nbytes = nbytes; 396 data_buf = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 397 orig_nbytes, &CE_data_base); 398 if (!data_buf) { 399 status = A_NO_MEMORY; 400 goto done; 401 } 402 403 /* Copy caller's data to allocated DMA buf */ 404 qdf_mem_copy(data_buf, data, orig_nbytes); 405 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data_base, 406 orig_nbytes, DMA_TO_DEVICE); 407 408 target_type = (hif_get_target_info_handle(hif_ctx))->target_type; 409 410 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) { 411 status = QDF_STATUS_E_FAILURE; 412 goto done; 413 } 414 415 /* convert soc virtual address to physical address */ 416 ce_phy_addr = get_ce_phy_addr(scn, address, target_type); 417 418 if (Q_TARGET_ACCESS_END(scn) < 0) { 419 status = QDF_STATUS_E_FAILURE; 420 goto done; 421 } 422 423 remaining_bytes = orig_nbytes; 424 CE_data = CE_data_base; 425 while (remaining_bytes) { 426 nbytes = min(remaining_bytes, DIAG_TRANSFER_LIMIT); 427 428 /* Set up to receive directly into Target(!) address */ 429 status = ce_recv_buf_enqueue(ce_diag, NULL, ce_phy_addr); 430 if (status != QDF_STATUS_SUCCESS) 431 goto done; 432 433 /* 434 * Request CE to send caller-supplied data that 435 * was copied to bounce buffer to Target(!) address. 436 */ 437 status = ce_send(ce_diag, NULL, (qdf_dma_addr_t) CE_data, 438 nbytes, transaction_id, 0, user_flags); 439 440 if (status != QDF_STATUS_SUCCESS) 441 goto done; 442 443 /* poll for transfer complete */ 444 i = 0; 445 while (ce_completed_send_next(ce_diag, NULL, NULL, &buf, 446 &completed_nbytes, &id, 447 NULL, NULL, &toeplitz_hash_result) != 448 QDF_STATUS_SUCCESS) { 449 qdf_mdelay(1); 450 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 451 status = QDF_STATUS_E_BUSY; 452 goto done; 453 } 454 } 455 456 if (nbytes != completed_nbytes) { 457 status = QDF_STATUS_E_FAILURE; 458 goto done; 459 } 460 461 if (buf != CE_data) { 462 status = QDF_STATUS_E_FAILURE; 463 goto done; 464 } 465 466 i = 0; 467 while (ce_completed_recv_next 468 (ce_diag, NULL, NULL, &buf, 469 &completed_nbytes, &id, 470 &flags) != QDF_STATUS_SUCCESS) { 471 qdf_mdelay(1); 472 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 473 status = QDF_STATUS_E_BUSY; 474 goto done; 475 } 476 } 477 478 if (nbytes != completed_nbytes) { 479 status = QDF_STATUS_E_FAILURE; 480 goto done; 481 } 482 483 if (buf != ce_phy_addr) { 484 status = QDF_STATUS_E_FAILURE; 485 goto done; 486 } 487 488 remaining_bytes -= nbytes; 489 address += nbytes; 490 CE_data += nbytes; 491 } 492 493 done: 494 A_TARGET_ACCESS_UNLIKELY(scn); 495 496 if (data_buf) { 497 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 498 orig_nbytes, data_buf, CE_data_base, 0); 499 } 500 501 if (status != QDF_STATUS_SUCCESS) { 502 HIF_ERROR("%s failure (0x%llx)", __func__, 503 (uint64_t)ce_phy_addr); 504 } 505 506 return status; 507 } 508 509 /* Write 4B data to Target memory or register */ 510 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx, 511 uint32_t address, uint32_t data) 512 { 513 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 514 515 if (address >= DRAM_BASE_ADDRESS) { 516 /* Assume range doesn't cross this boundary */ 517 uint32_t data_buf = data; 518 519 return hif_diag_write_mem(hif_ctx, address, 520 (uint8_t *) &data_buf, 521 sizeof(uint32_t)); 522 } else { 523 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 524 return QDF_STATUS_E_FAILURE; 525 A_TARGET_WRITE(scn, address, data); 526 if (Q_TARGET_ACCESS_END(scn) < 0) 527 return QDF_STATUS_E_FAILURE; 528 529 return QDF_STATUS_SUCCESS; 530 } 531 } 532