1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "targcfg.h" 20 #include "qdf_lock.h" 21 #include "qdf_status.h" 22 #include "qdf_status.h" 23 #include <qdf_atomic.h> /* qdf_atomic_read */ 24 #include <targaddrs.h> 25 #include "hif_io32.h" 26 #include <hif.h> 27 #include <target_type.h> 28 #include "regtable.h" 29 #define ATH_MODULE_NAME hif 30 #include <a_debug.h> 31 #include "hif_main.h" 32 #include "ce_api.h" 33 #include "qdf_trace.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "ce_internal.h" 37 #include "ce_reg.h" 38 #include "ce_assignment.h" 39 #include "ce_tasklet.h" 40 #ifndef CONFIG_WIN 41 #include "qwlan_version.h" 42 #endif 43 #include "qdf_module.h" 44 45 #define CE_POLL_TIMEOUT 10 /* ms */ 46 47 #define AGC_DUMP 1 48 #define CHANINFO_DUMP 2 49 #define BB_WATCHDOG_DUMP 3 50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 51 #define PCIE_ACCESS_DUMP 4 52 #endif 53 #include "mp_dev.h" 54 55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \ 56 !defined(QCA_WIFI_SUPPORT_SRNG) 57 #define QCA_WIFI_SUPPORT_SRNG 58 #endif 59 60 /* Forward references */ 61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 62 63 /* 64 * Fix EV118783, poll to check whether a BMI response comes 65 * other than waiting for the interruption which may be lost. 66 */ 67 /* #define BMI_RSP_POLLING */ 68 #define BMI_RSP_TO_MILLISEC 1000 69 70 #ifdef CONFIG_BYPASS_QMI 71 #define BYPASS_QMI 1 72 #else 73 #define BYPASS_QMI 0 74 #endif 75 76 #ifdef CONFIG_WIN 77 #if ENABLE_10_4_FW_HDR 78 #define WDI_IPA_SERVICE_GROUP 5 79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 82 #endif /* ENABLE_10_4_FW_HDR */ 83 #endif 84 85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); 86 static void hif_config_rri_on_ddr(struct hif_softc *scn); 87 88 /** 89 * hif_target_access_log_dump() - dump access log 90 * 91 * dump access log 92 * 93 * Return: n/a 94 */ 95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 96 static void hif_target_access_log_dump(void) 97 { 98 hif_target_dump_access_log(); 99 } 100 #endif 101 102 103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 104 uint8_t cmd_id, bool start) 105 { 106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 107 108 switch (cmd_id) { 109 case AGC_DUMP: 110 if (start) 111 priv_start_agc(scn); 112 else 113 priv_dump_agc(scn); 114 break; 115 case CHANINFO_DUMP: 116 if (start) 117 priv_start_cap_chaninfo(scn); 118 else 119 priv_dump_chaninfo(scn); 120 break; 121 case BB_WATCHDOG_DUMP: 122 priv_dump_bbwatchdog(scn); 123 break; 124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 125 case PCIE_ACCESS_DUMP: 126 hif_target_access_log_dump(); 127 break; 128 #endif 129 default: 130 HIF_ERROR("%s: Invalid htc dump command", __func__); 131 break; 132 } 133 } 134 135 static void ce_poll_timeout(void *arg) 136 { 137 struct CE_state *CE_state = (struct CE_state *)arg; 138 139 if (CE_state->timer_inited) { 140 ce_per_engine_service(CE_state->scn, CE_state->id); 141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 142 } 143 } 144 145 static unsigned int roundup_pwr2(unsigned int n) 146 { 147 int i; 148 unsigned int test_pwr2; 149 150 if (!(n & (n - 1))) 151 return n; /* already a power of 2 */ 152 153 test_pwr2 = 4; 154 for (i = 0; i < 29; i++) { 155 if (test_pwr2 > n) 156 return test_pwr2; 157 test_pwr2 = test_pwr2 << 1; 158 } 159 160 QDF_ASSERT(0); /* n too large */ 161 return 0; 162 } 163 164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 166 167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 177 #ifdef QCA_WIFI_3_0_ADRASTEA 178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 181 #endif 182 }; 183 184 #ifdef WLAN_FEATURE_EPPING 185 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 187 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 188 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 189 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 192 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 193 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 194 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 195 }; 196 #endif 197 198 /* CE_PCI TABLE */ 199 /* 200 * NOTE: the table below is out of date, though still a useful reference. 201 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 202 * mapping of HTC services to HIF pipes. 203 */ 204 /* 205 * This authoritative table defines Copy Engine configuration and the mapping 206 * of services/endpoints to CEs. A subset of this information is passed to 207 * the Target during startup as a prerequisite to entering BMI phase. 208 * See: 209 * target_service_to_ce_map - Target-side mapping 210 * hif_map_service_to_pipe - Host-side mapping 211 * target_ce_config - Target-side configuration 212 * host_ce_config - Host-side configuration 213 ============================================================================ 214 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 215 | | | ctio | Size | Frequency 216 | | | n | | 217 ============================================================================ 218 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 219 descriptor | | | | O(100B) | and regular 220 download | | | | | 221 ---------------------------------------------------------------------------- 222 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 223 indication | | | | O(10B) | regular 224 upload | | | | | 225 ---------------------------------------------------------------------------- 226 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 227 upload | | | | O(1000B) | (frequent 228 e.g. noise | | | | | during IP1.0 229 packets | | | | | testing) 230 ---------------------------------------------------------------------------- 231 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 232 download | | | | O(1000B) | (frequent 233 e.g. | | | | | during IP1.0 234 misdirecte | | | | | testing) 235 d EAPOL | | | | | 236 packets | | | | | 237 ---------------------------------------------------------------------------- 238 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 239 | DATA_VO (uplink) | | | | 240 ---------------------------------------------------------------------------- 241 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 242 | DATA_VO (downlink) | | | | 243 ---------------------------------------------------------------------------- 244 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 245 | | | | O(100B) | 246 ---------------------------------------------------------------------------- 247 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 248 messages | (downlink) | | | O(100B) | 249 | | | | | 250 ---------------------------------------------------------------------------- 251 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 252 | HTC_RAW_STREAMS | | | | 253 | (uplink) | | | | 254 ---------------------------------------------------------------------------- 255 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 256 | HTC_RAW_STREAMS | | | | 257 | (downlink) | | | | 258 ---------------------------------------------------------------------------- 259 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 260 | | | | | infrequent 261 ============================================================================ 262 */ 263 264 /* 265 * Map from service/endpoint to Copy Engine. 266 * This table is derived from the CE_PCI TABLE, above. 267 * It is passed to the Target at startup for use by firmware. 268 */ 269 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 270 { 271 WMI_DATA_VO_SVC, 272 PIPEDIR_OUT, /* out = UL = host -> target */ 273 3, 274 }, 275 { 276 WMI_DATA_VO_SVC, 277 PIPEDIR_IN, /* in = DL = target -> host */ 278 2, 279 }, 280 { 281 WMI_DATA_BK_SVC, 282 PIPEDIR_OUT, /* out = UL = host -> target */ 283 3, 284 }, 285 { 286 WMI_DATA_BK_SVC, 287 PIPEDIR_IN, /* in = DL = target -> host */ 288 2, 289 }, 290 { 291 WMI_DATA_BE_SVC, 292 PIPEDIR_OUT, /* out = UL = host -> target */ 293 3, 294 }, 295 { 296 WMI_DATA_BE_SVC, 297 PIPEDIR_IN, /* in = DL = target -> host */ 298 2, 299 }, 300 { 301 WMI_DATA_VI_SVC, 302 PIPEDIR_OUT, /* out = UL = host -> target */ 303 3, 304 }, 305 { 306 WMI_DATA_VI_SVC, 307 PIPEDIR_IN, /* in = DL = target -> host */ 308 2, 309 }, 310 { 311 WMI_CONTROL_SVC, 312 PIPEDIR_OUT, /* out = UL = host -> target */ 313 3, 314 }, 315 { 316 WMI_CONTROL_SVC, 317 PIPEDIR_IN, /* in = DL = target -> host */ 318 2, 319 }, 320 { 321 HTC_CTRL_RSVD_SVC, 322 PIPEDIR_OUT, /* out = UL = host -> target */ 323 0, /* could be moved to 3 (share with WMI) */ 324 }, 325 { 326 HTC_CTRL_RSVD_SVC, 327 PIPEDIR_IN, /* in = DL = target -> host */ 328 2, 329 }, 330 { 331 HTC_RAW_STREAMS_SVC, /* not currently used */ 332 PIPEDIR_OUT, /* out = UL = host -> target */ 333 0, 334 }, 335 { 336 HTC_RAW_STREAMS_SVC, /* not currently used */ 337 PIPEDIR_IN, /* in = DL = target -> host */ 338 2, 339 }, 340 { 341 HTT_DATA_MSG_SVC, 342 PIPEDIR_OUT, /* out = UL = host -> target */ 343 4, 344 }, 345 { 346 HTT_DATA_MSG_SVC, 347 PIPEDIR_IN, /* in = DL = target -> host */ 348 1, 349 }, 350 { 351 WDI_IPA_TX_SVC, 352 PIPEDIR_OUT, /* in = DL = target -> host */ 353 5, 354 }, 355 #if defined(QCA_WIFI_3_0_ADRASTEA) 356 { 357 HTT_DATA2_MSG_SVC, 358 PIPEDIR_IN, /* in = DL = target -> host */ 359 9, 360 }, 361 { 362 HTT_DATA3_MSG_SVC, 363 PIPEDIR_IN, /* in = DL = target -> host */ 364 10, 365 }, 366 { 367 PACKET_LOG_SVC, 368 PIPEDIR_IN, /* in = DL = target -> host */ 369 11, 370 }, 371 #endif 372 /* (Additions here) */ 373 374 { /* Must be last */ 375 0, 376 0, 377 0, 378 }, 379 }; 380 381 /* PIPEDIR_OUT = HOST to Target */ 382 /* PIPEDIR_IN = TARGET to HOST */ 383 #if (defined(QCA_WIFI_QCA8074)) 384 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 385 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 386 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 387 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 388 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 389 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 390 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 391 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 392 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 393 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 394 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 395 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 396 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 397 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 398 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 399 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 400 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 401 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 402 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 403 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 404 /* (Additions here) */ 405 { 0, 0, 0, }, 406 }; 407 #else 408 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 409 }; 410 #endif 411 412 #if (defined(QCA_WIFI_QCA6290)) 413 #ifdef CONFIG_WIN 414 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 415 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 416 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 417 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 418 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 419 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 420 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 421 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 422 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 423 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 424 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 425 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 426 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 427 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 428 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 429 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 430 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 431 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 432 /* (Additions here) */ 433 { 0, 0, 0, }, 434 }; 435 #else 436 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 437 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 438 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 439 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 440 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 441 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 442 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 443 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 444 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 445 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 446 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 447 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 448 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 449 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 450 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 451 /* (Additions here) */ 452 { 0, 0, 0, }, 453 }; 454 #endif 455 #else 456 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 457 }; 458 #endif 459 460 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 461 { 462 WMI_DATA_VO_SVC, 463 PIPEDIR_OUT, /* out = UL = host -> target */ 464 3, 465 }, 466 { 467 WMI_DATA_VO_SVC, 468 PIPEDIR_IN, /* in = DL = target -> host */ 469 2, 470 }, 471 { 472 WMI_DATA_BK_SVC, 473 PIPEDIR_OUT, /* out = UL = host -> target */ 474 3, 475 }, 476 { 477 WMI_DATA_BK_SVC, 478 PIPEDIR_IN, /* in = DL = target -> host */ 479 2, 480 }, 481 { 482 WMI_DATA_BE_SVC, 483 PIPEDIR_OUT, /* out = UL = host -> target */ 484 3, 485 }, 486 { 487 WMI_DATA_BE_SVC, 488 PIPEDIR_IN, /* in = DL = target -> host */ 489 2, 490 }, 491 { 492 WMI_DATA_VI_SVC, 493 PIPEDIR_OUT, /* out = UL = host -> target */ 494 3, 495 }, 496 { 497 WMI_DATA_VI_SVC, 498 PIPEDIR_IN, /* in = DL = target -> host */ 499 2, 500 }, 501 { 502 WMI_CONTROL_SVC, 503 PIPEDIR_OUT, /* out = UL = host -> target */ 504 3, 505 }, 506 { 507 WMI_CONTROL_SVC, 508 PIPEDIR_IN, /* in = DL = target -> host */ 509 2, 510 }, 511 { 512 HTC_CTRL_RSVD_SVC, 513 PIPEDIR_OUT, /* out = UL = host -> target */ 514 0, /* could be moved to 3 (share with WMI) */ 515 }, 516 { 517 HTC_CTRL_RSVD_SVC, 518 PIPEDIR_IN, /* in = DL = target -> host */ 519 1, 520 }, 521 { 522 HTC_RAW_STREAMS_SVC, /* not currently used */ 523 PIPEDIR_OUT, /* out = UL = host -> target */ 524 0, 525 }, 526 { 527 HTC_RAW_STREAMS_SVC, /* not currently used */ 528 PIPEDIR_IN, /* in = DL = target -> host */ 529 1, 530 }, 531 { 532 HTT_DATA_MSG_SVC, 533 PIPEDIR_OUT, /* out = UL = host -> target */ 534 4, 535 }, 536 #ifdef WLAN_FEATURE_FASTPATH 537 { 538 HTT_DATA_MSG_SVC, 539 PIPEDIR_IN, /* in = DL = target -> host */ 540 5, 541 }, 542 #else /* WLAN_FEATURE_FASTPATH */ 543 { 544 HTT_DATA_MSG_SVC, 545 PIPEDIR_IN, /* in = DL = target -> host */ 546 1, 547 }, 548 #endif /* WLAN_FEATURE_FASTPATH */ 549 550 /* (Additions here) */ 551 552 { /* Must be last */ 553 0, 554 0, 555 0, 556 }, 557 }; 558 559 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 560 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 561 562 #ifdef WLAN_FEATURE_EPPING 563 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 564 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 565 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 566 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 567 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 568 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 569 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 570 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 571 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 572 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 573 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 574 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 575 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 576 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 577 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 578 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 579 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 580 {0, 0, 0,}, /* Must be last */ 581 }; 582 583 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 584 **tgt_svc_map_to_use, 585 uint32_t *sz_tgt_svc_map_to_use) 586 { 587 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 588 *sz_tgt_svc_map_to_use = 589 sizeof(target_service_to_ce_map_wlan_epping); 590 } 591 #endif 592 593 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 594 struct service_to_pipe **tgt_svc_map_to_use, 595 uint32_t *sz_tgt_svc_map_to_use) 596 { 597 uint32_t mode = hif_get_conparam(scn); 598 struct hif_target_info *tgt_info = &scn->target_info; 599 600 if (QDF_IS_EPPING_ENABLED(mode)) { 601 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 602 sz_tgt_svc_map_to_use); 603 } else { 604 switch (tgt_info->target_type) { 605 default: 606 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 607 *sz_tgt_svc_map_to_use = 608 sizeof(target_service_to_ce_map_wlan); 609 break; 610 case TARGET_TYPE_AR900B: 611 case TARGET_TYPE_QCA9984: 612 case TARGET_TYPE_IPQ4019: 613 case TARGET_TYPE_QCA9888: 614 case TARGET_TYPE_AR9888: 615 case TARGET_TYPE_AR9888V2: 616 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 617 *sz_tgt_svc_map_to_use = 618 sizeof(target_service_to_ce_map_ar900b); 619 break; 620 case TARGET_TYPE_QCA6290: 621 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 622 *sz_tgt_svc_map_to_use = 623 sizeof(target_service_to_ce_map_qca6290); 624 break; 625 case TARGET_TYPE_QCA8074: 626 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 627 *sz_tgt_svc_map_to_use = 628 sizeof(target_service_to_ce_map_qca8074); 629 break; 630 } 631 } 632 } 633 634 /** 635 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 636 * @ce_state : pointer to the state context of the CE 637 * 638 * Description: 639 * Sets htt_rx_data attribute of the state structure if the 640 * CE serves one of the HTT DATA services. 641 * 642 * Return: 643 * false (attribute set to false) 644 * true (attribute set to true); 645 */ 646 static bool ce_mark_datapath(struct CE_state *ce_state) 647 { 648 struct service_to_pipe *svc_map; 649 uint32_t map_sz, map_len; 650 int i; 651 bool rc = false; 652 653 if (ce_state != NULL) { 654 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 655 &map_sz); 656 657 map_len = map_sz / sizeof(struct service_to_pipe); 658 for (i = 0; i < map_len; i++) { 659 if ((svc_map[i].pipenum == ce_state->id) && 660 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 661 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 662 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 663 /* HTT CEs are unidirectional */ 664 if (svc_map[i].pipedir == PIPEDIR_IN) 665 ce_state->htt_rx_data = true; 666 else 667 ce_state->htt_tx_data = true; 668 rc = true; 669 } 670 } 671 } 672 return rc; 673 } 674 675 /** 676 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 677 * @ce_id: ce in question 678 * @ring: ring state being examined 679 * @type: "src_ring" or "dest_ring" string for identifying the ring 680 * 681 * Warns on non-zero index values. 682 * Causes a kernel panic if the ring is not empty durring initialization. 683 */ 684 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 685 char *type) 686 { 687 if (ring->write_index != 0 || ring->sw_index != 0) 688 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", 689 ce_id, type, ring->sw_index, ring->write_index); 690 if (ring->write_index != ring->sw_index) 691 QDF_BUG(0); 692 } 693 694 #ifdef IPA_OFFLOAD 695 /** 696 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 697 * @scn: softc instance 698 * @ce_id: ce in question 699 * @base_addr: pointer to copyengine ring base address 700 * @ce_ring: copyengine instance 701 * @nentries: number of entries should be allocated 702 * @desc_size: ce desc size 703 * 704 * Return: QDF_STATUS_SUCCESS - for success 705 */ 706 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 707 qdf_dma_addr_t *base_addr, 708 struct CE_ring_state *ce_ring, 709 unsigned int nentries, uint32_t desc_size) 710 { 711 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) { 712 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev, 713 nentries * desc_size + CE_DESC_RING_ALIGN); 714 if (!scn->ipa_ce_ring) { 715 HIF_ERROR("%s: Failed to allocate memory for IPA ce ring", 716 __func__); 717 return QDF_STATUS_E_NOMEM; 718 } 719 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 720 &scn->ipa_ce_ring->mem_info); 721 ce_ring->base_addr_owner_space_unaligned = 722 scn->ipa_ce_ring->vaddr; 723 } else { 724 ce_ring->base_addr_owner_space_unaligned = 725 qdf_mem_alloc_consistent(scn->qdf_dev, 726 scn->qdf_dev->dev, 727 (nentries * desc_size + 728 CE_DESC_RING_ALIGN), 729 base_addr); 730 if (!ce_ring->base_addr_owner_space_unaligned) { 731 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 732 __func__, CE_id); 733 return QDF_STATUS_E_NOMEM; 734 } 735 } 736 return QDF_STATUS_SUCCESS; 737 } 738 739 /** 740 * ce_free_desc_ring() - Frees copyengine descriptor ring 741 * @scn: softc instance 742 * @ce_id: ce in question 743 * @ce_ring: copyengine instance 744 * @desc_size: ce desc size 745 * 746 * Return: None 747 */ 748 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 749 struct CE_ring_state *ce_ring, uint32_t desc_size) 750 { 751 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) { 752 qdf_mem_shared_mem_free(scn->qdf_dev, 753 scn->ipa_ce_ring); 754 ce_ring->base_addr_owner_space_unaligned = NULL; 755 } else { 756 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 757 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 758 ce_ring->base_addr_owner_space_unaligned, 759 ce_ring->base_addr_CE_space, 0); 760 ce_ring->base_addr_owner_space_unaligned = NULL; 761 } 762 } 763 #else 764 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 765 qdf_dma_addr_t *base_addr, 766 struct CE_ring_state *ce_ring, 767 unsigned int nentries, uint32_t desc_size) 768 { 769 ce_ring->base_addr_owner_space_unaligned = 770 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 771 (nentries * desc_size + 772 CE_DESC_RING_ALIGN), base_addr); 773 if (!ce_ring->base_addr_owner_space_unaligned) { 774 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 775 __func__, CE_id); 776 return QDF_STATUS_E_NOMEM; 777 } 778 return QDF_STATUS_SUCCESS; 779 } 780 781 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 782 struct CE_ring_state *ce_ring, uint32_t desc_size) 783 { 784 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 785 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 786 ce_ring->base_addr_owner_space_unaligned, 787 ce_ring->base_addr_CE_space, 0); 788 ce_ring->base_addr_owner_space_unaligned = NULL; 789 } 790 #endif /* IPA_OFFLOAD */ 791 792 /** 793 * ce_srng_based() - Does this target use srng 794 * @ce_state : pointer to the state context of the CE 795 * 796 * Description: 797 * returns true if the target is SRNG based 798 * 799 * Return: 800 * false (attribute set to false) 801 * true (attribute set to true); 802 */ 803 bool ce_srng_based(struct hif_softc *scn) 804 { 805 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 806 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 807 808 switch (tgt_info->target_type) { 809 case TARGET_TYPE_QCA8074: 810 case TARGET_TYPE_QCA6290: 811 return true; 812 default: 813 return false; 814 } 815 return false; 816 } 817 qdf_export_symbol(ce_srng_based); 818 819 #ifdef QCA_WIFI_SUPPORT_SRNG 820 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 821 { 822 if (ce_srng_based(scn)) 823 return ce_services_srng(); 824 825 return ce_services_legacy(); 826 } 827 828 829 #else /* QCA_LITHIUM */ 830 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 831 { 832 return ce_services_legacy(); 833 } 834 #endif /* QCA_LITHIUM */ 835 836 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 837 struct pld_shadow_reg_v2_cfg **shadow_config, 838 int *num_shadow_registers_configured) { 839 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 840 841 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 842 scn, shadow_config, num_shadow_registers_configured); 843 } 844 845 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 846 uint8_t ring_type) 847 { 848 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 849 850 return hif_state->ce_services->ce_get_desc_size(ring_type); 851 } 852 853 854 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 855 uint8_t ring_type, uint32_t nentries) 856 { 857 uint32_t ce_nbytes; 858 char *ptr; 859 qdf_dma_addr_t base_addr; 860 struct CE_ring_state *ce_ring; 861 uint32_t desc_size; 862 struct hif_softc *scn = CE_state->scn; 863 864 ce_nbytes = sizeof(struct CE_ring_state) 865 + (nentries * sizeof(void *)); 866 ptr = qdf_mem_malloc(ce_nbytes); 867 if (!ptr) 868 return NULL; 869 870 ce_ring = (struct CE_ring_state *)ptr; 871 ptr += sizeof(struct CE_ring_state); 872 ce_ring->nentries = nentries; 873 ce_ring->nentries_mask = nentries - 1; 874 875 ce_ring->low_water_mark_nentries = 0; 876 ce_ring->high_water_mark_nentries = nentries; 877 ce_ring->per_transfer_context = (void **)ptr; 878 879 desc_size = ce_get_desc_size(scn, ring_type); 880 881 /* Legacy platforms that do not support cache 882 * coherent DMA are unsupported 883 */ 884 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 885 ce_ring, nentries, 886 desc_size) != 887 QDF_STATUS_SUCCESS) { 888 HIF_ERROR("%s: ring has no DMA mem", 889 __func__); 890 qdf_mem_free(ptr); 891 return NULL; 892 } 893 ce_ring->base_addr_CE_space_unaligned = base_addr; 894 895 /* Correctly initialize memory to 0 to 896 * prevent garbage data crashing system 897 * when download firmware 898 */ 899 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 900 nentries * desc_size + 901 CE_DESC_RING_ALIGN); 902 903 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 904 905 ce_ring->base_addr_CE_space = 906 (ce_ring->base_addr_CE_space_unaligned + 907 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 908 909 ce_ring->base_addr_owner_space = (void *) 910 (((size_t) ce_ring->base_addr_owner_space_unaligned + 911 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 912 } else { 913 ce_ring->base_addr_CE_space = 914 ce_ring->base_addr_CE_space_unaligned; 915 ce_ring->base_addr_owner_space = 916 ce_ring->base_addr_owner_space_unaligned; 917 } 918 919 return ce_ring; 920 } 921 922 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 923 uint32_t ce_id, struct CE_ring_state *ring, 924 struct CE_attr *attr) 925 { 926 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 927 928 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 929 ring, attr); 930 } 931 932 int hif_ce_bus_early_suspend(struct hif_softc *scn) 933 { 934 uint8_t ul_pipe, dl_pipe; 935 int ce_id, status, ul_is_polled, dl_is_polled; 936 struct CE_state *ce_state; 937 938 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 939 &ul_pipe, &dl_pipe, 940 &ul_is_polled, &dl_is_polled); 941 if (status) { 942 HIF_ERROR("%s: pipe_mapping failure", __func__); 943 return status; 944 } 945 946 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 947 if (ce_id == ul_pipe) 948 continue; 949 if (ce_id == dl_pipe) 950 continue; 951 952 ce_state = scn->ce_id_to_state[ce_id]; 953 qdf_spin_lock_bh(&ce_state->ce_index_lock); 954 if (ce_state->state == CE_RUNNING) 955 ce_state->state = CE_PAUSED; 956 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 957 } 958 959 return status; 960 } 961 962 int hif_ce_bus_late_resume(struct hif_softc *scn) 963 { 964 int ce_id; 965 struct CE_state *ce_state; 966 int write_index; 967 bool index_updated; 968 969 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 970 ce_state = scn->ce_id_to_state[ce_id]; 971 qdf_spin_lock_bh(&ce_state->ce_index_lock); 972 if (ce_state->state == CE_PENDING) { 973 write_index = ce_state->src_ring->write_index; 974 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 975 write_index); 976 ce_state->state = CE_RUNNING; 977 index_updated = true; 978 } else { 979 index_updated = false; 980 } 981 982 if (ce_state->state == CE_PAUSED) 983 ce_state->state = CE_RUNNING; 984 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 985 986 if (index_updated) 987 hif_record_ce_desc_event(scn, ce_id, 988 RESUME_WRITE_INDEX_UPDATE, 989 NULL, NULL, write_index, 0); 990 } 991 992 return 0; 993 } 994 995 /** 996 * ce_oom_recovery() - try to recover rx ce from oom condition 997 * @context: CE_state of the CE with oom rx ring 998 * 999 * the executing work Will continue to be rescheduled untill 1000 * at least 1 descriptor is successfully posted to the rx ring. 1001 * 1002 * return: none 1003 */ 1004 static void ce_oom_recovery(void *context) 1005 { 1006 struct CE_state *ce_state = context; 1007 struct hif_softc *scn = ce_state->scn; 1008 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1009 struct HIF_CE_pipe_info *pipe_info = 1010 &ce_softc->pipe_info[ce_state->id]; 1011 1012 hif_post_recv_buffers_for_pipe(pipe_info); 1013 } 1014 1015 #if HIF_CE_DEBUG_DATA_BUF 1016 /** 1017 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1018 * the CE descriptors. 1019 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1020 * @scn: hif scn handle 1021 * ce_id: Copy Engine Id 1022 * 1023 * Return: QDF_STATUS 1024 */ 1025 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1026 { 1027 struct hif_ce_desc_event *event = NULL; 1028 struct hif_ce_desc_event *hist_ev = NULL; 1029 uint32_t index = 0; 1030 1031 hist_ev = 1032 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1033 1034 if (!hist_ev) 1035 return QDF_STATUS_E_NOMEM; 1036 1037 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1038 event = &hist_ev[index]; 1039 event->data = 1040 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 1041 if (event->data == NULL) 1042 return QDF_STATUS_E_NOMEM; 1043 } 1044 return QDF_STATUS_SUCCESS; 1045 } 1046 1047 /** 1048 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 1049 * the CE descriptors. 1050 * @scn: hif scn handle 1051 * ce_id: Copy Engine Id 1052 * 1053 * Return: 1054 */ 1055 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1056 { 1057 struct hif_ce_desc_event *event = NULL; 1058 struct hif_ce_desc_event *hist_ev = NULL; 1059 uint32_t index = 0; 1060 1061 hist_ev = 1062 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1063 1064 if (!hist_ev) 1065 return; 1066 1067 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1068 event = &hist_ev[index]; 1069 if (event->data != NULL) 1070 qdf_mem_free(event->data); 1071 event->data = NULL; 1072 event = NULL; 1073 } 1074 } 1075 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1076 1077 /* 1078 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1079 * for defined here 1080 */ 1081 #if HIF_CE_DEBUG_DATA_BUF 1082 /** 1083 * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing 1084 * @scn: hif scn handle 1085 * ce_id: Copy Engine Id 1086 * 1087 * Return: QDF_STATUS 1088 */ 1089 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn, 1090 unsigned int CE_id) 1091 { 1092 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 1093 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 1094 1095 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) { 1096 scn->hif_ce_desc_hist.enable[CE_id] = 0; 1097 return QDF_STATUS_E_NOMEM; 1098 } else { 1099 scn->hif_ce_desc_hist.enable[CE_id] = 1; 1100 return QDF_STATUS_SUCCESS; 1101 } 1102 } 1103 1104 /** 1105 * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors 1106 * storing. 1107 * @scn: hif scn handle 1108 * ce_id: Copy Engine Id 1109 * 1110 * Return: 1111 */ 1112 static inline void free_mem_ce_debug_history(struct hif_softc *scn, 1113 unsigned int CE_id) 1114 { 1115 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1116 struct hif_ce_desc_event *hist_ev = 1117 (struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id]; 1118 1119 if (!hist_ev) 1120 return; 1121 1122 #if HIF_CE_DEBUG_DATA_BUF 1123 if (ce_hist->data_enable[CE_id] == 1) { 1124 ce_hist->data_enable[CE_id] = 0; 1125 free_mem_ce_debug_hist_data(scn, CE_id); 1126 } 1127 #endif 1128 ce_hist->enable[CE_id] = 0; 1129 qdf_mem_free(ce_hist->hist_ev[CE_id]); 1130 ce_hist->hist_ev[CE_id] = NULL; 1131 } 1132 1133 /** 1134 * reset_ce_debug_history() - reset the index and ce id used for dumping the 1135 * CE records on the console using sysfs. 1136 * @scn: hif scn handle 1137 * 1138 * Return: 1139 */ 1140 static inline void reset_ce_debug_history(struct hif_softc *scn) 1141 { 1142 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1143 /* Initialise the CE debug history sysfs interface inputs ce_id and 1144 * index. Disable data storing 1145 */ 1146 ce_hist->hist_index = 0; 1147 ce_hist->hist_id = 0; 1148 } 1149 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 1150 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn, 1151 unsigned int CE_id) 1152 { 1153 return QDF_STATUS_SUCCESS; 1154 } 1155 1156 static inline void free_mem_ce_debug_history(struct hif_softc *scn, 1157 unsigned int CE_id) 1158 { 1159 } 1160 1161 static inline void reset_ce_debug_history(struct hif_softc *scn) 1162 { 1163 } 1164 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 1165 1166 /* 1167 * Initialize a Copy Engine based on caller-supplied attributes. 1168 * This may be called once to initialize both source and destination 1169 * rings or it may be called twice for separate source and destination 1170 * initialization. It may be that only one side or the other is 1171 * initialized by software/firmware. 1172 * 1173 * This should be called durring the initialization sequence before 1174 * interupts are enabled, so we don't have to worry about thread safety. 1175 */ 1176 struct CE_handle *ce_init(struct hif_softc *scn, 1177 unsigned int CE_id, struct CE_attr *attr) 1178 { 1179 struct CE_state *CE_state; 1180 uint32_t ctrl_addr; 1181 unsigned int nentries; 1182 bool malloc_CE_state = false; 1183 bool malloc_src_ring = false; 1184 int status; 1185 1186 QDF_ASSERT(CE_id < scn->ce_count); 1187 ctrl_addr = CE_BASE_ADDRESS(CE_id); 1188 CE_state = scn->ce_id_to_state[CE_id]; 1189 1190 if (!CE_state) { 1191 CE_state = 1192 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 1193 if (!CE_state) { 1194 HIF_ERROR("%s: CE_state has no mem", __func__); 1195 return NULL; 1196 } 1197 malloc_CE_state = true; 1198 qdf_spinlock_create(&CE_state->ce_index_lock); 1199 1200 CE_state->id = CE_id; 1201 CE_state->ctrl_addr = ctrl_addr; 1202 CE_state->state = CE_RUNNING; 1203 CE_state->attr_flags = attr->flags; 1204 } 1205 CE_state->scn = scn; 1206 1207 qdf_atomic_init(&CE_state->rx_pending); 1208 if (attr == NULL) { 1209 /* Already initialized; caller wants the handle */ 1210 return (struct CE_handle *)CE_state; 1211 } 1212 1213 if (CE_state->src_sz_max) 1214 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 1215 else 1216 CE_state->src_sz_max = attr->src_sz_max; 1217 1218 ce_init_ce_desc_event_log(scn, CE_id, 1219 attr->src_nentries + attr->dest_nentries); 1220 1221 /* source ring setup */ 1222 nentries = attr->src_nentries; 1223 if (nentries) { 1224 struct CE_ring_state *src_ring; 1225 1226 nentries = roundup_pwr2(nentries); 1227 if (CE_state->src_ring) { 1228 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 1229 } else { 1230 src_ring = CE_state->src_ring = 1231 ce_alloc_ring_state(CE_state, 1232 CE_RING_SRC, 1233 nentries); 1234 if (!src_ring) { 1235 /* cannot allocate src ring. If the 1236 * CE_state is allocated locally free 1237 * CE_State and return error. 1238 */ 1239 HIF_ERROR("%s: src ring has no mem", __func__); 1240 if (malloc_CE_state) { 1241 /* allocated CE_state locally */ 1242 qdf_mem_free(CE_state); 1243 malloc_CE_state = false; 1244 } 1245 return NULL; 1246 } 1247 /* we can allocate src ring. Mark that the src ring is 1248 * allocated locally 1249 */ 1250 malloc_src_ring = true; 1251 1252 /* 1253 * Also allocate a shadow src ring in 1254 * regular mem to use for faster access. 1255 */ 1256 src_ring->shadow_base_unaligned = 1257 qdf_mem_malloc(nentries * 1258 sizeof(struct CE_src_desc) + 1259 CE_DESC_RING_ALIGN); 1260 if (src_ring->shadow_base_unaligned == NULL) { 1261 HIF_ERROR("%s: src ring no shadow_base mem", 1262 __func__); 1263 goto error_no_dma_mem; 1264 } 1265 src_ring->shadow_base = (struct CE_src_desc *) 1266 (((size_t) src_ring->shadow_base_unaligned + 1267 CE_DESC_RING_ALIGN - 1) & 1268 ~(CE_DESC_RING_ALIGN - 1)); 1269 1270 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 1271 src_ring, attr); 1272 if (status < 0) 1273 goto error_target_access; 1274 1275 ce_ring_test_initial_indexes(CE_id, src_ring, 1276 "src_ring"); 1277 } 1278 } 1279 1280 /* destination ring setup */ 1281 nentries = attr->dest_nentries; 1282 if (nentries) { 1283 struct CE_ring_state *dest_ring; 1284 1285 nentries = roundup_pwr2(nentries); 1286 if (CE_state->dest_ring) { 1287 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 1288 } else { 1289 dest_ring = CE_state->dest_ring = 1290 ce_alloc_ring_state(CE_state, 1291 CE_RING_DEST, 1292 nentries); 1293 if (!dest_ring) { 1294 /* cannot allocate dst ring. If the CE_state 1295 * or src ring is allocated locally free 1296 * CE_State and src ring and return error. 1297 */ 1298 HIF_ERROR("%s: dest ring has no mem", 1299 __func__); 1300 goto error_no_dma_mem; 1301 } 1302 1303 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 1304 dest_ring, attr); 1305 if (status < 0) 1306 goto error_target_access; 1307 1308 ce_ring_test_initial_indexes(CE_id, dest_ring, 1309 "dest_ring"); 1310 1311 /* For srng based target, init status ring here */ 1312 if (ce_srng_based(CE_state->scn)) { 1313 CE_state->status_ring = 1314 ce_alloc_ring_state(CE_state, 1315 CE_RING_STATUS, 1316 nentries); 1317 if (CE_state->status_ring == NULL) { 1318 /*Allocation failed. Cleanup*/ 1319 qdf_mem_free(CE_state->dest_ring); 1320 if (malloc_src_ring) { 1321 qdf_mem_free 1322 (CE_state->src_ring); 1323 CE_state->src_ring = NULL; 1324 malloc_src_ring = false; 1325 } 1326 if (malloc_CE_state) { 1327 /* allocated CE_state locally */ 1328 scn->ce_id_to_state[CE_id] = 1329 NULL; 1330 qdf_mem_free(CE_state); 1331 malloc_CE_state = false; 1332 } 1333 1334 return NULL; 1335 } 1336 1337 status = ce_ring_setup(scn, CE_RING_STATUS, 1338 CE_id, CE_state->status_ring, 1339 attr); 1340 if (status < 0) 1341 goto error_target_access; 1342 1343 } 1344 1345 /* epping */ 1346 /* poll timer */ 1347 if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) || 1348 scn->polled_mode_on) { 1349 qdf_timer_init(scn->qdf_dev, 1350 &CE_state->poll_timer, 1351 ce_poll_timeout, 1352 CE_state, 1353 QDF_TIMER_TYPE_SW); 1354 CE_state->timer_inited = true; 1355 qdf_timer_mod(&CE_state->poll_timer, 1356 CE_POLL_TIMEOUT); 1357 } 1358 } 1359 } 1360 1361 if (!ce_srng_based(scn)) { 1362 /* Enable CE error interrupts */ 1363 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1364 goto error_target_access; 1365 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 1366 if (Q_TARGET_ACCESS_END(scn) < 0) 1367 goto error_target_access; 1368 } 1369 1370 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 1371 ce_oom_recovery, CE_state); 1372 1373 /* update the htt_data attribute */ 1374 ce_mark_datapath(CE_state); 1375 scn->ce_id_to_state[CE_id] = CE_state; 1376 1377 alloc_mem_ce_debug_history(scn, CE_id); 1378 1379 return (struct CE_handle *)CE_state; 1380 1381 error_target_access: 1382 error_no_dma_mem: 1383 ce_fini((struct CE_handle *)CE_state); 1384 return NULL; 1385 } 1386 1387 #ifdef WLAN_FEATURE_FASTPATH 1388 /** 1389 * hif_enable_fastpath() Update that we have enabled fastpath mode 1390 * @hif_ctx: HIF context 1391 * 1392 * For use in data path 1393 * 1394 * Retrun: void 1395 */ 1396 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 1397 { 1398 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1399 1400 if (ce_srng_based(scn)) { 1401 HIF_INFO("%s, srng rings do not support fastpath", __func__); 1402 return; 1403 } 1404 HIF_DBG("%s, Enabling fastpath mode", __func__); 1405 scn->fastpath_mode_on = true; 1406 } 1407 1408 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx) 1409 { 1410 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1411 HIF_DBG("%s, Enabling polled mode", __func__); 1412 1413 scn->polled_mode_on = true; 1414 } 1415 1416 /** 1417 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 1418 * @hif_ctx: HIF Context 1419 * 1420 * For use in data path to skip HTC 1421 * 1422 * Return: bool 1423 */ 1424 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 1425 { 1426 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1427 1428 return scn->fastpath_mode_on; 1429 } 1430 1431 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 1432 { 1433 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1434 1435 return scn->polled_mode_on; 1436 } 1437 1438 /** 1439 * hif_get_ce_handle - API to get CE handle for FastPath mode 1440 * @hif_ctx: HIF Context 1441 * @id: CopyEngine Id 1442 * 1443 * API to return CE handle for fastpath mode 1444 * 1445 * Return: void 1446 */ 1447 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 1448 { 1449 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1450 1451 return scn->ce_id_to_state[id]; 1452 } 1453 1454 /** 1455 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 1456 * No processing is required inside this function. 1457 * @ce_hdl: Cope engine handle 1458 * Using an assert, this function makes sure that, 1459 * the TX CE has been processed completely. 1460 * 1461 * This is called while dismantling CE structures. No other thread 1462 * should be using these structures while dismantling is occuring 1463 * therfore no locking is needed. 1464 * 1465 * Return: none 1466 */ 1467 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 1468 { 1469 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1470 struct CE_ring_state *src_ring = ce_state->src_ring; 1471 struct hif_softc *sc = ce_state->scn; 1472 uint32_t sw_index, write_index; 1473 1474 if (hif_is_nss_wifi_enabled(sc)) 1475 return; 1476 1477 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 1478 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", 1479 __func__, __LINE__); 1480 sw_index = src_ring->sw_index; 1481 write_index = src_ring->sw_index; 1482 1483 /* At this point Tx CE should be clean */ 1484 qdf_assert_always(sw_index == write_index); 1485 } 1486 } 1487 1488 /** 1489 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 1490 * @ce_hdl: Handle to CE 1491 * 1492 * These buffers are never allocated on the fly, but 1493 * are allocated only once during HIF start and freed 1494 * only once during HIF stop. 1495 * NOTE: 1496 * The assumption here is there is no in-flight DMA in progress 1497 * currently, so that buffers can be freed up safely. 1498 * 1499 * Return: NONE 1500 */ 1501 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 1502 { 1503 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1504 struct CE_ring_state *dst_ring = ce_state->dest_ring; 1505 qdf_nbuf_t nbuf; 1506 int i; 1507 1508 if (ce_state->scn->fastpath_mode_on == false) 1509 return; 1510 1511 if (!ce_state->htt_rx_data) 1512 return; 1513 1514 /* 1515 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 1516 * this CE is completely full: does not leave one blank space, to 1517 * distinguish between empty queue & full queue. So free all the 1518 * entries. 1519 */ 1520 for (i = 0; i < dst_ring->nentries; i++) { 1521 nbuf = dst_ring->per_transfer_context[i]; 1522 1523 /* 1524 * The reasons for doing this check are: 1525 * 1) Protect against calling cleanup before allocating buffers 1526 * 2) In a corner case, FASTPATH_mode_on may be set, but we 1527 * could have a partially filled ring, because of a memory 1528 * allocation failure in the middle of allocating ring. 1529 * This check accounts for that case, checking 1530 * fastpath_mode_on flag or started flag would not have 1531 * covered that case. This is not in performance path, 1532 * so OK to do this. 1533 */ 1534 if (nbuf) { 1535 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 1536 QDF_DMA_FROM_DEVICE); 1537 qdf_nbuf_free(nbuf); 1538 } 1539 } 1540 } 1541 1542 /** 1543 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 1544 * @scn: HIF handle 1545 * 1546 * Datapath Rx CEs are special case, where we reuse all the message buffers. 1547 * Hence we have to post all the entries in the pipe, even, in the beginning 1548 * unlike for other CE pipes where one less than dest_nentries are filled in 1549 * the beginning. 1550 * 1551 * Return: None 1552 */ 1553 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1554 { 1555 int pipe_num; 1556 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1557 1558 if (scn->fastpath_mode_on == false) 1559 return; 1560 1561 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 1562 struct HIF_CE_pipe_info *pipe_info = 1563 &hif_state->pipe_info[pipe_num]; 1564 struct CE_state *ce_state = 1565 scn->ce_id_to_state[pipe_info->pipe_num]; 1566 1567 if (ce_state->htt_rx_data) 1568 atomic_inc(&pipe_info->recv_bufs_needed); 1569 } 1570 } 1571 #else 1572 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1573 { 1574 } 1575 1576 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 1577 { 1578 return false; 1579 } 1580 1581 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state) 1582 { 1583 return false; 1584 } 1585 #endif /* WLAN_FEATURE_FASTPATH */ 1586 1587 void ce_fini(struct CE_handle *copyeng) 1588 { 1589 struct CE_state *CE_state = (struct CE_state *)copyeng; 1590 unsigned int CE_id = CE_state->id; 1591 struct hif_softc *scn = CE_state->scn; 1592 uint32_t desc_size; 1593 1594 bool inited = CE_state->timer_inited; 1595 CE_state->state = CE_UNUSED; 1596 scn->ce_id_to_state[CE_id] = NULL; 1597 /* Set the flag to false first to stop processing in ce_poll_timeout */ 1598 CE_state->timer_inited = false; 1599 qdf_lro_deinit(CE_state->lro_data); 1600 1601 if (CE_state->src_ring) { 1602 /* Cleanup the datapath Tx ring */ 1603 ce_h2t_tx_ce_cleanup(copyeng); 1604 1605 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 1606 if (CE_state->src_ring->shadow_base_unaligned) 1607 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 1608 if (CE_state->src_ring->base_addr_owner_space_unaligned) 1609 ce_free_desc_ring(scn, CE_state->id, 1610 CE_state->src_ring, 1611 desc_size); 1612 qdf_mem_free(CE_state->src_ring); 1613 } 1614 if (CE_state->dest_ring) { 1615 /* Cleanup the datapath Rx ring */ 1616 ce_t2h_msg_ce_cleanup(copyeng); 1617 1618 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 1619 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 1620 ce_free_desc_ring(scn, CE_state->id, 1621 CE_state->dest_ring, 1622 desc_size); 1623 qdf_mem_free(CE_state->dest_ring); 1624 1625 /* epping */ 1626 if (inited) { 1627 qdf_timer_free(&CE_state->poll_timer); 1628 } 1629 } 1630 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 1631 /* Cleanup the datapath Tx ring */ 1632 ce_h2t_tx_ce_cleanup(copyeng); 1633 1634 if (CE_state->status_ring->shadow_base_unaligned) 1635 qdf_mem_free( 1636 CE_state->status_ring->shadow_base_unaligned); 1637 1638 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 1639 if (CE_state->status_ring->base_addr_owner_space_unaligned) 1640 ce_free_desc_ring(scn, CE_state->id, 1641 CE_state->status_ring, 1642 desc_size); 1643 qdf_mem_free(CE_state->status_ring); 1644 } 1645 1646 free_mem_ce_debug_history(scn, CE_id); 1647 reset_ce_debug_history(scn); 1648 ce_deinit_ce_desc_event_log(scn, CE_id); 1649 1650 qdf_spinlock_destroy(&CE_state->ce_index_lock); 1651 qdf_mem_free(CE_state); 1652 } 1653 1654 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 1655 { 1656 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1657 1658 qdf_mem_zero(&hif_state->msg_callbacks_pending, 1659 sizeof(hif_state->msg_callbacks_pending)); 1660 qdf_mem_zero(&hif_state->msg_callbacks_current, 1661 sizeof(hif_state->msg_callbacks_current)); 1662 } 1663 1664 /* Send the first nbytes bytes of the buffer */ 1665 QDF_STATUS 1666 hif_send_head(struct hif_opaque_softc *hif_ctx, 1667 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 1668 qdf_nbuf_t nbuf, unsigned int data_attr) 1669 { 1670 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1671 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1672 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1673 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 1674 int bytes = nbytes, nfrags = 0; 1675 struct ce_sendlist sendlist; 1676 int status, i = 0; 1677 unsigned int mux_id = 0; 1678 1679 QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf)); 1680 1681 transfer_id = 1682 (mux_id & MUX_ID_MASK) | 1683 (transfer_id & TRANSACTION_ID_MASK); 1684 data_attr &= DESC_DATA_FLAG_MASK; 1685 /* 1686 * The common case involves sending multiple fragments within a 1687 * single download (the tx descriptor and the tx frame header). 1688 * So, optimize for the case of multiple fragments by not even 1689 * checking whether it's necessary to use a sendlist. 1690 * The overhead of using a sendlist for a single buffer download 1691 * is not a big deal, since it happens rarely (for WMI messages). 1692 */ 1693 ce_sendlist_init(&sendlist); 1694 do { 1695 qdf_dma_addr_t frag_paddr; 1696 int frag_bytes; 1697 1698 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 1699 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 1700 /* 1701 * Clear the packet offset for all but the first CE desc. 1702 */ 1703 if (i++ > 0) 1704 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 1705 1706 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 1707 frag_bytes > 1708 bytes ? bytes : frag_bytes, 1709 qdf_nbuf_get_frag_is_wordstream 1710 (nbuf, 1711 nfrags) ? 0 : 1712 CE_SEND_FLAG_SWAP_DISABLE, 1713 data_attr); 1714 if (status != QDF_STATUS_SUCCESS) { 1715 HIF_ERROR("%s: error, frag_num %d larger than limit", 1716 __func__, nfrags); 1717 return status; 1718 } 1719 bytes -= frag_bytes; 1720 nfrags++; 1721 } while (bytes > 0); 1722 1723 /* Make sure we have resources to handle this request */ 1724 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 1725 if (pipe_info->num_sends_allowed < nfrags) { 1726 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1727 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 1728 return QDF_STATUS_E_RESOURCES; 1729 } 1730 pipe_info->num_sends_allowed -= nfrags; 1731 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1732 1733 if (qdf_unlikely(ce_hdl == NULL)) { 1734 HIF_ERROR("%s: error CE handle is null", __func__); 1735 return A_ERROR; 1736 } 1737 1738 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 1739 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 1740 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 1741 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 1742 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 1743 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 1744 1745 return status; 1746 } 1747 1748 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 1749 int force) 1750 { 1751 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1752 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1753 1754 if (!force) { 1755 int resources; 1756 /* 1757 * Decide whether to actually poll for completions, or just 1758 * wait for a later chance. If there seem to be plenty of 1759 * resources left, then just wait, since checking involves 1760 * reading a CE register, which is a relatively expensive 1761 * operation. 1762 */ 1763 resources = hif_get_free_queue_number(hif_ctx, pipe); 1764 /* 1765 * If at least 50% of the total resources are still available, 1766 * don't bother checking again yet. 1767 */ 1768 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 1769 1)) 1770 return; 1771 } 1772 #if ATH_11AC_TXCOMPACT 1773 ce_per_engine_servicereap(scn, pipe); 1774 #else 1775 ce_per_engine_service(scn, pipe); 1776 #endif 1777 } 1778 1779 uint16_t 1780 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 1781 { 1782 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1783 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1784 uint16_t rv; 1785 1786 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 1787 rv = pipe_info->num_sends_allowed; 1788 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1789 return rv; 1790 } 1791 1792 /* Called by lower (CE) layer when a send to Target completes. */ 1793 static void 1794 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 1795 void *transfer_context, qdf_dma_addr_t CE_data, 1796 unsigned int nbytes, unsigned int transfer_id, 1797 unsigned int sw_index, unsigned int hw_index, 1798 unsigned int toeplitz_hash_result) 1799 { 1800 struct HIF_CE_pipe_info *pipe_info = 1801 (struct HIF_CE_pipe_info *)ce_context; 1802 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 1803 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1804 unsigned int sw_idx = sw_index, hw_idx = hw_index; 1805 struct hif_msg_callbacks *msg_callbacks = 1806 &pipe_info->pipe_callbacks; 1807 1808 do { 1809 /* 1810 * The upper layer callback will be triggered 1811 * when last fragment is complteted. 1812 */ 1813 if (transfer_context != CE_SENDLIST_ITEM_CTXT) { 1814 if (scn->target_status == TARGET_STATUS_RESET) { 1815 1816 qdf_nbuf_unmap_single(scn->qdf_dev, 1817 transfer_context, 1818 QDF_DMA_TO_DEVICE); 1819 qdf_nbuf_free(transfer_context); 1820 } else 1821 msg_callbacks->txCompletionHandler( 1822 msg_callbacks->Context, 1823 transfer_context, transfer_id, 1824 toeplitz_hash_result); 1825 } 1826 1827 qdf_spin_lock(&pipe_info->completion_freeq_lock); 1828 pipe_info->num_sends_allowed++; 1829 qdf_spin_unlock(&pipe_info->completion_freeq_lock); 1830 } while (ce_completed_send_next(copyeng, 1831 &ce_context, &transfer_context, 1832 &CE_data, &nbytes, &transfer_id, 1833 &sw_idx, &hw_idx, 1834 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 1835 } 1836 1837 /** 1838 * hif_ce_do_recv(): send message from copy engine to upper layers 1839 * @msg_callbacks: structure containing callback and callback context 1840 * @netbuff: skb containing message 1841 * @nbytes: number of bytes in the message 1842 * @pipe_info: used for the pipe_number info 1843 * 1844 * Checks the packet length, configures the length in the netbuff, 1845 * and calls the upper layer callback. 1846 * 1847 * return: None 1848 */ 1849 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 1850 qdf_nbuf_t netbuf, int nbytes, 1851 struct HIF_CE_pipe_info *pipe_info) { 1852 if (nbytes <= pipe_info->buf_sz) { 1853 qdf_nbuf_set_pktlen(netbuf, nbytes); 1854 msg_callbacks-> 1855 rxCompletionHandler(msg_callbacks->Context, 1856 netbuf, pipe_info->pipe_num); 1857 } else { 1858 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", 1859 __func__, netbuf, nbytes); 1860 1861 qdf_nbuf_free(netbuf); 1862 } 1863 } 1864 1865 /* Called by lower (CE) layer when data is received from the Target. */ 1866 static void 1867 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 1868 void *transfer_context, qdf_dma_addr_t CE_data, 1869 unsigned int nbytes, unsigned int transfer_id, 1870 unsigned int flags) 1871 { 1872 struct HIF_CE_pipe_info *pipe_info = 1873 (struct HIF_CE_pipe_info *)ce_context; 1874 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 1875 struct CE_state *ce_state = (struct CE_state *) copyeng; 1876 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1877 #ifdef HIF_PCI 1878 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state); 1879 #endif 1880 struct hif_msg_callbacks *msg_callbacks = 1881 &pipe_info->pipe_callbacks; 1882 1883 do { 1884 #ifdef HIF_PCI 1885 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); 1886 #endif 1887 qdf_nbuf_unmap_single(scn->qdf_dev, 1888 (qdf_nbuf_t) transfer_context, 1889 QDF_DMA_FROM_DEVICE); 1890 1891 atomic_inc(&pipe_info->recv_bufs_needed); 1892 hif_post_recv_buffers_for_pipe(pipe_info); 1893 if (scn->target_status == TARGET_STATUS_RESET) 1894 qdf_nbuf_free(transfer_context); 1895 else 1896 hif_ce_do_recv(msg_callbacks, transfer_context, 1897 nbytes, pipe_info); 1898 1899 /* Set up force_break flag if num of receices reaches 1900 * MAX_NUM_OF_RECEIVES 1901 */ 1902 ce_state->receive_count++; 1903 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 1904 ce_state->force_break = 1; 1905 break; 1906 } 1907 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 1908 &CE_data, &nbytes, &transfer_id, 1909 &flags) == QDF_STATUS_SUCCESS); 1910 1911 } 1912 1913 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 1914 1915 void 1916 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 1917 struct hif_msg_callbacks *callbacks) 1918 { 1919 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1920 1921 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 1922 spin_lock_init(&pcie_access_log_lock); 1923 #endif 1924 /* Save callbacks for later installation */ 1925 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 1926 sizeof(hif_state->msg_callbacks_pending)); 1927 1928 } 1929 1930 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 1931 { 1932 struct CE_handle *ce_diag = hif_state->ce_diag; 1933 int pipe_num; 1934 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1935 struct hif_msg_callbacks *hif_msg_callbacks = 1936 &hif_state->msg_callbacks_current; 1937 1938 /* daemonize("hif_compl_thread"); */ 1939 1940 if (scn->ce_count == 0) { 1941 HIF_ERROR("%s: Invalid ce_count", __func__); 1942 return -EINVAL; 1943 } 1944 1945 if (!hif_msg_callbacks || 1946 !hif_msg_callbacks->rxCompletionHandler || 1947 !hif_msg_callbacks->txCompletionHandler) { 1948 HIF_ERROR("%s: no completion handler registered", __func__); 1949 return -EFAULT; 1950 } 1951 1952 A_TARGET_ACCESS_LIKELY(scn); 1953 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 1954 struct CE_attr attr; 1955 struct HIF_CE_pipe_info *pipe_info; 1956 1957 pipe_info = &hif_state->pipe_info[pipe_num]; 1958 if (pipe_info->ce_hdl == ce_diag) 1959 continue; /* Handle Diagnostic CE specially */ 1960 attr = hif_state->host_ce_config[pipe_num]; 1961 if (attr.src_nentries) { 1962 /* pipe used to send to target */ 1963 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", 1964 __func__, pipe_num, pipe_info); 1965 ce_send_cb_register(pipe_info->ce_hdl, 1966 hif_pci_ce_send_done, pipe_info, 1967 attr.flags & CE_ATTR_DISABLE_INTR); 1968 pipe_info->num_sends_allowed = attr.src_nentries - 1; 1969 } 1970 if (attr.dest_nentries) { 1971 /* pipe used to receive from target */ 1972 ce_recv_cb_register(pipe_info->ce_hdl, 1973 hif_pci_ce_recv_data, pipe_info, 1974 attr.flags & CE_ATTR_DISABLE_INTR); 1975 } 1976 1977 if (attr.src_nentries) 1978 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 1979 1980 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 1981 sizeof(pipe_info->pipe_callbacks)); 1982 } 1983 1984 A_TARGET_ACCESS_UNLIKELY(scn); 1985 return 0; 1986 } 1987 1988 /* 1989 * Install pending msg callbacks. 1990 * 1991 * TBDXXX: This hack is needed because upper layers install msg callbacks 1992 * for use with HTC before BMI is done; yet this HIF implementation 1993 * needs to continue to use BMI msg callbacks. Really, upper layers 1994 * should not register HTC callbacks until AFTER BMI phase. 1995 */ 1996 static void hif_msg_callbacks_install(struct hif_softc *scn) 1997 { 1998 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1999 2000 qdf_mem_copy(&hif_state->msg_callbacks_current, 2001 &hif_state->msg_callbacks_pending, 2002 sizeof(hif_state->msg_callbacks_pending)); 2003 } 2004 2005 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 2006 uint8_t *DLPipe) 2007 { 2008 int ul_is_polled, dl_is_polled; 2009 2010 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 2011 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 2012 } 2013 2014 /** 2015 * hif_dump_pipe_debug_count() - Log error count 2016 * @scn: hif_softc pointer. 2017 * 2018 * Output the pipe error counts of each pipe to log file 2019 * 2020 * Return: N/A 2021 */ 2022 void hif_dump_pipe_debug_count(struct hif_softc *scn) 2023 { 2024 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2025 int pipe_num; 2026 2027 if (hif_state == NULL) { 2028 HIF_ERROR("%s hif_state is NULL", __func__); 2029 return; 2030 } 2031 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2032 struct HIF_CE_pipe_info *pipe_info; 2033 2034 pipe_info = &hif_state->pipe_info[pipe_num]; 2035 2036 if (pipe_info->nbuf_alloc_err_count > 0 || 2037 pipe_info->nbuf_dma_err_count > 0 || 2038 pipe_info->nbuf_ce_enqueue_err_count) 2039 HIF_ERROR( 2040 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 2041 __func__, pipe_info->pipe_num, 2042 atomic_read(&pipe_info->recv_bufs_needed), 2043 pipe_info->nbuf_alloc_err_count, 2044 pipe_info->nbuf_dma_err_count, 2045 pipe_info->nbuf_ce_enqueue_err_count); 2046 } 2047 } 2048 2049 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 2050 void *nbuf, uint32_t *error_cnt, 2051 enum hif_ce_event_type failure_type, 2052 const char *failure_type_string) 2053 { 2054 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 2055 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 2056 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2057 int ce_id = CE_state->id; 2058 uint32_t error_cnt_tmp; 2059 2060 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2061 error_cnt_tmp = ++(*error_cnt); 2062 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2063 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", 2064 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 2065 failure_type_string); 2066 hif_record_ce_desc_event(scn, ce_id, failure_type, 2067 NULL, nbuf, bufs_needed_tmp, 0); 2068 /* if we fail to allocate the last buffer for an rx pipe, 2069 * there is no trigger to refill the ce and we will 2070 * eventually crash 2071 */ 2072 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) 2073 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 2074 2075 } 2076 2077 2078 2079 2080 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 2081 { 2082 struct CE_handle *ce_hdl; 2083 qdf_size_t buf_sz; 2084 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2085 QDF_STATUS status; 2086 uint32_t bufs_posted = 0; 2087 2088 buf_sz = pipe_info->buf_sz; 2089 if (buf_sz == 0) { 2090 /* Unused Copy Engine */ 2091 return QDF_STATUS_SUCCESS; 2092 } 2093 2094 ce_hdl = pipe_info->ce_hdl; 2095 2096 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2097 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 2098 qdf_dma_addr_t CE_data; /* CE space buffer address */ 2099 qdf_nbuf_t nbuf; 2100 2101 atomic_dec(&pipe_info->recv_bufs_needed); 2102 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2103 2104 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 2105 if (!nbuf) { 2106 hif_post_recv_buffers_failure(pipe_info, nbuf, 2107 &pipe_info->nbuf_alloc_err_count, 2108 HIF_RX_NBUF_ALLOC_FAILURE, 2109 "HIF_RX_NBUF_ALLOC_FAILURE"); 2110 return QDF_STATUS_E_NOMEM; 2111 } 2112 2113 /* 2114 * qdf_nbuf_peek_header(nbuf, &data, &unused); 2115 * CE_data = dma_map_single(dev, data, buf_sz, ); 2116 * DMA_FROM_DEVICE); 2117 */ 2118 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 2119 QDF_DMA_FROM_DEVICE); 2120 2121 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2122 hif_post_recv_buffers_failure(pipe_info, nbuf, 2123 &pipe_info->nbuf_dma_err_count, 2124 HIF_RX_NBUF_MAP_FAILURE, 2125 "HIF_RX_NBUF_MAP_FAILURE"); 2126 qdf_nbuf_free(nbuf); 2127 return status; 2128 } 2129 2130 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 2131 2132 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 2133 buf_sz, DMA_FROM_DEVICE); 2134 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 2135 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2136 hif_post_recv_buffers_failure(pipe_info, nbuf, 2137 &pipe_info->nbuf_ce_enqueue_err_count, 2138 HIF_RX_NBUF_ENQUEUE_FAILURE, 2139 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 2140 2141 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 2142 QDF_DMA_FROM_DEVICE); 2143 qdf_nbuf_free(nbuf); 2144 return status; 2145 } 2146 2147 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2148 bufs_posted++; 2149 } 2150 pipe_info->nbuf_alloc_err_count = 2151 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 2152 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 2153 pipe_info->nbuf_dma_err_count = 2154 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 2155 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 2156 pipe_info->nbuf_ce_enqueue_err_count = 2157 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 2158 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 2159 2160 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2161 2162 return QDF_STATUS_SUCCESS; 2163 } 2164 2165 /* 2166 * Try to post all desired receive buffers for all pipes. 2167 * Returns 0 for non fastpath rx copy engine as 2168 * oom_allocation_work will be scheduled to recover any 2169 * failures, non-zero if unable to completely replenish 2170 * receive buffers for fastpath rx Copy engine. 2171 */ 2172 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 2173 { 2174 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2175 int pipe_num; 2176 struct CE_state *ce_state = NULL; 2177 QDF_STATUS qdf_status; 2178 2179 A_TARGET_ACCESS_LIKELY(scn); 2180 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2181 struct HIF_CE_pipe_info *pipe_info; 2182 2183 ce_state = scn->ce_id_to_state[pipe_num]; 2184 pipe_info = &hif_state->pipe_info[pipe_num]; 2185 2186 if (hif_is_nss_wifi_enabled(scn) && 2187 ce_state && (ce_state->htt_rx_data)) 2188 continue; 2189 2190 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 2191 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 2192 ce_state->htt_rx_data && 2193 scn->fastpath_mode_on) { 2194 A_TARGET_ACCESS_UNLIKELY(scn); 2195 return qdf_status; 2196 } 2197 } 2198 2199 A_TARGET_ACCESS_UNLIKELY(scn); 2200 2201 return QDF_STATUS_SUCCESS; 2202 } 2203 2204 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 2205 { 2206 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2207 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2208 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2209 2210 hif_update_fastpath_recv_bufs_cnt(scn); 2211 2212 hif_msg_callbacks_install(scn); 2213 2214 if (hif_completion_thread_startup(hif_state)) 2215 return QDF_STATUS_E_FAILURE; 2216 2217 /* enable buffer cleanup */ 2218 hif_state->started = true; 2219 2220 /* Post buffers once to start things off. */ 2221 qdf_status = hif_post_recv_buffers(scn); 2222 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2223 /* cleanup is done in hif_ce_disable */ 2224 HIF_ERROR("%s:failed to post buffers", __func__); 2225 return qdf_status; 2226 } 2227 2228 return qdf_status; 2229 } 2230 2231 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2232 { 2233 struct hif_softc *scn; 2234 struct CE_handle *ce_hdl; 2235 uint32_t buf_sz; 2236 struct HIF_CE_state *hif_state; 2237 qdf_nbuf_t netbuf; 2238 qdf_dma_addr_t CE_data; 2239 void *per_CE_context; 2240 2241 buf_sz = pipe_info->buf_sz; 2242 /* Unused Copy Engine */ 2243 if (buf_sz == 0) 2244 return; 2245 2246 2247 hif_state = pipe_info->HIF_CE_state; 2248 if (!hif_state->started) 2249 return; 2250 2251 scn = HIF_GET_SOFTC(hif_state); 2252 ce_hdl = pipe_info->ce_hdl; 2253 2254 if (scn->qdf_dev == NULL) 2255 return; 2256 while (ce_revoke_recv_next 2257 (ce_hdl, &per_CE_context, (void **)&netbuf, 2258 &CE_data) == QDF_STATUS_SUCCESS) { 2259 if (netbuf) { 2260 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 2261 QDF_DMA_FROM_DEVICE); 2262 qdf_nbuf_free(netbuf); 2263 } 2264 } 2265 } 2266 2267 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2268 { 2269 struct CE_handle *ce_hdl; 2270 struct HIF_CE_state *hif_state; 2271 struct hif_softc *scn; 2272 qdf_nbuf_t netbuf; 2273 void *per_CE_context; 2274 qdf_dma_addr_t CE_data; 2275 unsigned int nbytes; 2276 unsigned int id; 2277 uint32_t buf_sz; 2278 uint32_t toeplitz_hash_result; 2279 2280 buf_sz = pipe_info->buf_sz; 2281 if (buf_sz == 0) { 2282 /* Unused Copy Engine */ 2283 return; 2284 } 2285 2286 hif_state = pipe_info->HIF_CE_state; 2287 if (!hif_state->started) { 2288 return; 2289 } 2290 2291 scn = HIF_GET_SOFTC(hif_state); 2292 2293 ce_hdl = pipe_info->ce_hdl; 2294 2295 while (ce_cancel_send_next 2296 (ce_hdl, &per_CE_context, 2297 (void **)&netbuf, &CE_data, &nbytes, 2298 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 2299 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 2300 /* 2301 * Packets enqueued by htt_h2t_ver_req_msg() and 2302 * htt_h2t_rx_ring_cfg_msg_ll() have already been 2303 * freed in htt_htc_misc_pkt_pool_free() in 2304 * wlantl_close(), so do not free them here again 2305 * by checking whether it's the endpoint 2306 * which they are queued in. 2307 */ 2308 if (id == scn->htc_htt_tx_endpoint) 2309 return; 2310 /* Indicate the completion to higher 2311 * layer to free the buffer 2312 */ 2313 if (pipe_info->pipe_callbacks.txCompletionHandler) 2314 pipe_info->pipe_callbacks. 2315 txCompletionHandler(pipe_info-> 2316 pipe_callbacks.Context, 2317 netbuf, id, toeplitz_hash_result); 2318 } 2319 } 2320 } 2321 2322 /* 2323 * Cleanup residual buffers for device shutdown: 2324 * buffers that were enqueued for receive 2325 * buffers that were to be sent 2326 * Note: Buffers that had completed but which were 2327 * not yet processed are on a completion queue. They 2328 * are handled when the completion thread shuts down. 2329 */ 2330 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 2331 { 2332 int pipe_num; 2333 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2334 struct CE_state *ce_state; 2335 2336 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2337 struct HIF_CE_pipe_info *pipe_info; 2338 2339 ce_state = scn->ce_id_to_state[pipe_num]; 2340 if (hif_is_nss_wifi_enabled(scn) && ce_state && 2341 ((ce_state->htt_tx_data) || 2342 (ce_state->htt_rx_data))) { 2343 continue; 2344 } 2345 2346 pipe_info = &hif_state->pipe_info[pipe_num]; 2347 hif_recv_buffer_cleanup_on_pipe(pipe_info); 2348 hif_send_buffer_cleanup_on_pipe(pipe_info); 2349 } 2350 } 2351 2352 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 2353 { 2354 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2355 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2356 2357 hif_buffer_cleanup(hif_state); 2358 } 2359 2360 static void hif_destroy_oom_work(struct hif_softc *scn) 2361 { 2362 struct CE_state *ce_state; 2363 int ce_id; 2364 2365 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2366 ce_state = scn->ce_id_to_state[ce_id]; 2367 if (ce_state) 2368 qdf_destroy_work(scn->qdf_dev, 2369 &ce_state->oom_allocation_work); 2370 } 2371 } 2372 2373 void hif_ce_stop(struct hif_softc *scn) 2374 { 2375 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2376 int pipe_num; 2377 2378 /* 2379 * before cleaning up any memory, ensure irq & 2380 * bottom half contexts will not be re-entered 2381 */ 2382 hif_disable_isr(&scn->osc); 2383 hif_destroy_oom_work(scn); 2384 scn->hif_init_done = false; 2385 2386 /* 2387 * At this point, asynchronous threads are stopped, 2388 * The Target should not DMA nor interrupt, Host code may 2389 * not initiate anything more. So we just need to clean 2390 * up Host-side state. 2391 */ 2392 2393 if (scn->athdiag_procfs_inited) { 2394 athdiag_procfs_remove(); 2395 scn->athdiag_procfs_inited = false; 2396 } 2397 2398 hif_buffer_cleanup(hif_state); 2399 2400 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2401 struct HIF_CE_pipe_info *pipe_info; 2402 struct CE_attr attr; 2403 struct CE_handle *ce_diag = hif_state->ce_diag; 2404 2405 pipe_info = &hif_state->pipe_info[pipe_num]; 2406 if (pipe_info->ce_hdl) { 2407 if (pipe_info->ce_hdl != ce_diag) { 2408 attr = hif_state->host_ce_config[pipe_num]; 2409 if (attr.src_nentries) 2410 qdf_spinlock_destroy(&pipe_info-> 2411 completion_freeq_lock); 2412 } 2413 ce_fini(pipe_info->ce_hdl); 2414 pipe_info->ce_hdl = NULL; 2415 pipe_info->buf_sz = 0; 2416 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2417 } 2418 } 2419 2420 if (hif_state->sleep_timer_init) { 2421 qdf_timer_stop(&hif_state->sleep_timer); 2422 qdf_timer_free(&hif_state->sleep_timer); 2423 hif_state->sleep_timer_init = false; 2424 } 2425 2426 hif_state->started = false; 2427 } 2428 2429 2430 /** 2431 * hif_get_target_ce_config() - get copy engine configuration 2432 * @target_ce_config_ret: basic copy engine configuration 2433 * @target_ce_config_sz_ret: size of the basic configuration in bytes 2434 * @target_service_to_ce_map_ret: service mapping for the copy engines 2435 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 2436 * @target_shadow_reg_cfg_ret: shadow register configuration 2437 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 2438 * 2439 * providing accessor to these values outside of this file. 2440 * currently these are stored in static pointers to const sections. 2441 * there are multiple configurations that are selected from at compile time. 2442 * Runtime selection would need to consider mode, target type and bus type. 2443 * 2444 * Return: return by parameter. 2445 */ 2446 void hif_get_target_ce_config(struct hif_softc *scn, 2447 struct CE_pipe_config **target_ce_config_ret, 2448 uint32_t *target_ce_config_sz_ret, 2449 struct service_to_pipe **target_service_to_ce_map_ret, 2450 uint32_t *target_service_to_ce_map_sz_ret, 2451 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 2452 uint32_t *shadow_cfg_sz_ret) 2453 { 2454 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2455 2456 *target_ce_config_ret = hif_state->target_ce_config; 2457 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 2458 2459 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 2460 target_service_to_ce_map_sz_ret); 2461 2462 if (target_shadow_reg_cfg_ret) 2463 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 2464 2465 if (shadow_cfg_sz_ret) 2466 *shadow_cfg_sz_ret = shadow_cfg_sz; 2467 } 2468 2469 #ifdef CONFIG_SHADOW_V2 2470 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2471 { 2472 int i; 2473 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2474 "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg); 2475 2476 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 2477 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2478 "%s: i %d, val %x\n", __func__, i, 2479 cfg->shadow_reg_v2_cfg[i].addr); 2480 } 2481 } 2482 2483 #else 2484 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2485 { 2486 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2487 "%s: CONFIG_SHADOW_V2 not defined\n", __func__); 2488 } 2489 #endif 2490 2491 /** 2492 * hif_wlan_enable(): call the platform driver to enable wlan 2493 * @scn: HIF Context 2494 * 2495 * This function passes the con_mode and CE configuration to 2496 * platform driver to enable wlan. 2497 * 2498 * Return: linux error code 2499 */ 2500 int hif_wlan_enable(struct hif_softc *scn) 2501 { 2502 struct pld_wlan_enable_cfg cfg; 2503 enum pld_driver_mode mode; 2504 uint32_t con_mode = hif_get_conparam(scn); 2505 2506 hif_get_target_ce_config(scn, 2507 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 2508 &cfg.num_ce_tgt_cfg, 2509 (struct service_to_pipe **)&cfg.ce_svc_cfg, 2510 &cfg.num_ce_svc_pipe_cfg, 2511 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 2512 &cfg.num_shadow_reg_cfg); 2513 2514 /* translate from structure size to array size */ 2515 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 2516 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 2517 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 2518 2519 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, 2520 &cfg.num_shadow_reg_v2_cfg); 2521 2522 hif_print_hal_shadow_register_cfg(&cfg); 2523 2524 if (QDF_GLOBAL_FTM_MODE == con_mode) 2525 mode = PLD_FTM; 2526 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 2527 mode = PLD_COLDBOOT_CALIBRATION; 2528 else if (QDF_IS_EPPING_ENABLED(con_mode)) 2529 mode = PLD_EPPING; 2530 else 2531 mode = PLD_MISSION; 2532 2533 if (BYPASS_QMI) 2534 return 0; 2535 else 2536 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, 2537 mode, QWLAN_VERSIONSTR); 2538 } 2539 2540 #ifdef WLAN_FEATURE_EPPING 2541 2542 #define CE_EPPING_USES_IRQ true 2543 2544 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) 2545 { 2546 if (CE_EPPING_USES_IRQ) 2547 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 2548 else 2549 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 2550 hif_state->target_ce_config = target_ce_config_wlan_epping; 2551 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 2552 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 2553 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 2554 } 2555 #endif 2556 2557 /** 2558 * hif_ce_prepare_config() - load the correct static tables. 2559 * @scn: hif context 2560 * 2561 * Epping uses different static attribute tables than mission mode. 2562 */ 2563 void hif_ce_prepare_config(struct hif_softc *scn) 2564 { 2565 uint32_t mode = hif_get_conparam(scn); 2566 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 2567 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 2568 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2569 2570 hif_state->ce_services = ce_services_attach(scn); 2571 2572 scn->ce_count = HOST_CE_COUNT; 2573 /* if epping is enabled we need to use the epping configuration. */ 2574 if (QDF_IS_EPPING_ENABLED(mode)) { 2575 hif_ce_prepare_epping_config(hif_state); 2576 } 2577 2578 switch (tgt_info->target_type) { 2579 default: 2580 hif_state->host_ce_config = host_ce_config_wlan; 2581 hif_state->target_ce_config = target_ce_config_wlan; 2582 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 2583 break; 2584 case TARGET_TYPE_AR900B: 2585 case TARGET_TYPE_QCA9984: 2586 case TARGET_TYPE_IPQ4019: 2587 case TARGET_TYPE_QCA9888: 2588 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 2589 hif_state->host_ce_config = 2590 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 2591 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 2592 hif_state->host_ce_config = 2593 host_lowdesc_ce_cfg_wlan_ar900b; 2594 } else { 2595 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 2596 } 2597 2598 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 2599 hif_state->target_ce_config_sz = 2600 sizeof(target_ce_config_wlan_ar900b); 2601 2602 break; 2603 2604 case TARGET_TYPE_AR9888: 2605 case TARGET_TYPE_AR9888V2: 2606 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 2607 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 2608 } else { 2609 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 2610 } 2611 2612 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 2613 hif_state->target_ce_config_sz = 2614 sizeof(target_ce_config_wlan_ar9888); 2615 2616 break; 2617 2618 case TARGET_TYPE_QCA8074: 2619 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 2620 hif_state->host_ce_config = 2621 host_ce_config_wlan_qca8074_pci; 2622 hif_state->target_ce_config = 2623 target_ce_config_wlan_qca8074_pci; 2624 hif_state->target_ce_config_sz = 2625 sizeof(target_ce_config_wlan_qca8074_pci); 2626 } else { 2627 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 2628 hif_state->target_ce_config = 2629 target_ce_config_wlan_qca8074; 2630 hif_state->target_ce_config_sz = 2631 sizeof(target_ce_config_wlan_qca8074); 2632 } 2633 break; 2634 case TARGET_TYPE_QCA6290: 2635 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 2636 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 2637 hif_state->target_ce_config_sz = 2638 sizeof(target_ce_config_wlan_qca6290); 2639 2640 scn->ce_count = QCA_6290_CE_COUNT; 2641 break; 2642 } 2643 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 2644 } 2645 2646 /** 2647 * hif_ce_open() - do ce specific allocations 2648 * @hif_sc: pointer to hif context 2649 * 2650 * return: 0 for success or QDF_STATUS_E_NOMEM 2651 */ 2652 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 2653 { 2654 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2655 2656 qdf_spinlock_create(&hif_state->irq_reg_lock); 2657 qdf_spinlock_create(&hif_state->keep_awake_lock); 2658 return QDF_STATUS_SUCCESS; 2659 } 2660 2661 /** 2662 * hif_ce_close() - do ce specific free 2663 * @hif_sc: pointer to hif context 2664 */ 2665 void hif_ce_close(struct hif_softc *hif_sc) 2666 { 2667 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2668 2669 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 2670 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 2671 } 2672 2673 /** 2674 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 2675 * @hif_sc: hif context 2676 * 2677 * uses state variables to support cleaning up when hif_config_ce fails. 2678 */ 2679 void hif_unconfig_ce(struct hif_softc *hif_sc) 2680 { 2681 int pipe_num; 2682 struct HIF_CE_pipe_info *pipe_info; 2683 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2684 2685 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 2686 pipe_info = &hif_state->pipe_info[pipe_num]; 2687 if (pipe_info->ce_hdl) { 2688 ce_unregister_irq(hif_state, (1 << pipe_num)); 2689 ce_fini(pipe_info->ce_hdl); 2690 pipe_info->ce_hdl = NULL; 2691 pipe_info->buf_sz = 0; 2692 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2693 } 2694 } 2695 if (hif_sc->athdiag_procfs_inited) { 2696 athdiag_procfs_remove(); 2697 hif_sc->athdiag_procfs_inited = false; 2698 } 2699 } 2700 2701 #ifdef CONFIG_BYPASS_QMI 2702 #define FW_SHARED_MEM (2 * 1024 * 1024) 2703 2704 /** 2705 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 2706 * @scn: pointer to HIF structure 2707 * 2708 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 2709 * 2710 * Return: void 2711 */ 2712 static void hif_post_static_buf_to_target(struct hif_softc *scn) 2713 { 2714 void *target_va; 2715 phys_addr_t target_pa; 2716 2717 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 2718 FW_SHARED_MEM, &target_pa); 2719 if (NULL == target_va) { 2720 HIF_TRACE("Memory allocation failed could not post target buf"); 2721 return; 2722 } 2723 hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 2724 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa); 2725 } 2726 #else 2727 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 2728 { 2729 } 2730 #endif 2731 2732 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 2733 bool wait_for_it) 2734 { 2735 /* todo */ 2736 return 0; 2737 } 2738 2739 /** 2740 * hif_config_ce() - configure copy engines 2741 * @scn: hif context 2742 * 2743 * Prepares fw, copy engine hardware and host sw according 2744 * to the attributes selected by hif_ce_prepare_config. 2745 * 2746 * also calls athdiag_procfs_init 2747 * 2748 * return: 0 for success nonzero for failure. 2749 */ 2750 int hif_config_ce(struct hif_softc *scn) 2751 { 2752 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2753 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 2754 struct HIF_CE_pipe_info *pipe_info; 2755 int pipe_num; 2756 struct CE_state *ce_state = NULL; 2757 2758 #ifdef ADRASTEA_SHADOW_REGISTERS 2759 int i; 2760 #endif 2761 QDF_STATUS rv = QDF_STATUS_SUCCESS; 2762 2763 scn->notice_send = true; 2764 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 2765 2766 hif_post_static_buf_to_target(scn); 2767 2768 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 2769 2770 hif_config_rri_on_ddr(scn); 2771 2772 if (ce_srng_based(scn)) 2773 scn->bus_ops.hif_target_sleep_state_adjust = 2774 &hif_srng_sleep_state_adjust; 2775 2776 /* Initialise the CE debug history sysfs interface inputs ce_id and 2777 * index. Disable data storing 2778 */ 2779 reset_ce_debug_history(scn); 2780 2781 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2782 struct CE_attr *attr; 2783 2784 pipe_info = &hif_state->pipe_info[pipe_num]; 2785 pipe_info->pipe_num = pipe_num; 2786 pipe_info->HIF_CE_state = hif_state; 2787 attr = &hif_state->host_ce_config[pipe_num]; 2788 2789 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 2790 ce_state = scn->ce_id_to_state[pipe_num]; 2791 if (!ce_state) { 2792 A_TARGET_ACCESS_UNLIKELY(scn); 2793 goto err; 2794 } 2795 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 2796 QDF_ASSERT(pipe_info->ce_hdl != NULL); 2797 if (pipe_info->ce_hdl == NULL) { 2798 rv = QDF_STATUS_E_FAILURE; 2799 A_TARGET_ACCESS_UNLIKELY(scn); 2800 goto err; 2801 } 2802 2803 ce_state->lro_data = qdf_lro_init(); 2804 2805 if (attr->flags & CE_ATTR_DIAG) { 2806 /* Reserve the ultimate CE for 2807 * Diagnostic Window support 2808 */ 2809 hif_state->ce_diag = pipe_info->ce_hdl; 2810 continue; 2811 } 2812 2813 if (hif_is_nss_wifi_enabled(scn) && ce_state && 2814 (ce_state->htt_rx_data)) 2815 continue; 2816 2817 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); 2818 if (attr->dest_nentries > 0) { 2819 atomic_set(&pipe_info->recv_bufs_needed, 2820 init_buffer_count(attr->dest_nentries - 1)); 2821 /*SRNG based CE has one entry less */ 2822 if (ce_srng_based(scn)) 2823 atomic_dec(&pipe_info->recv_bufs_needed); 2824 } else { 2825 atomic_set(&pipe_info->recv_bufs_needed, 0); 2826 } 2827 ce_tasklet_init(hif_state, (1 << pipe_num)); 2828 ce_register_irq(hif_state, (1 << pipe_num)); 2829 } 2830 2831 if (athdiag_procfs_init(scn) != 0) { 2832 A_TARGET_ACCESS_UNLIKELY(scn); 2833 goto err; 2834 } 2835 scn->athdiag_procfs_inited = true; 2836 2837 HIF_DBG("%s: ce_init done", __func__); 2838 2839 init_tasklet_workers(hif_hdl); 2840 2841 HIF_DBG("%s: X, ret = %d", __func__, rv); 2842 2843 #ifdef ADRASTEA_SHADOW_REGISTERS 2844 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); 2845 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 2846 HIF_DBG("%s Shadow Register%d is mapped to address %x", 2847 __func__, i, 2848 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 2849 } 2850 #endif 2851 2852 return rv != QDF_STATUS_SUCCESS; 2853 2854 err: 2855 /* Failure, so clean up */ 2856 hif_unconfig_ce(scn); 2857 HIF_TRACE("%s: X, ret = %d", __func__, rv); 2858 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 2859 } 2860 2861 #ifdef WLAN_FEATURE_FASTPATH 2862 /** 2863 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler 2864 * @handler: Callback funtcion 2865 * @context: handle for callback function 2866 * 2867 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE 2868 */ 2869 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, 2870 fastpath_msg_handler handler, 2871 void *context) 2872 { 2873 struct CE_state *ce_state; 2874 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2875 int i; 2876 2877 if (!scn) { 2878 HIF_ERROR("%s: scn is NULL", __func__); 2879 QDF_ASSERT(0); 2880 return QDF_STATUS_E_FAILURE; 2881 } 2882 2883 if (!scn->fastpath_mode_on) { 2884 HIF_WARN("%s: Fastpath mode disabled", __func__); 2885 return QDF_STATUS_E_FAILURE; 2886 } 2887 2888 for (i = 0; i < scn->ce_count; i++) { 2889 ce_state = scn->ce_id_to_state[i]; 2890 if (ce_state->htt_rx_data) { 2891 ce_state->fastpath_handler = handler; 2892 ce_state->context = context; 2893 } 2894 } 2895 2896 return QDF_STATUS_SUCCESS; 2897 } 2898 qdf_export_symbol(hif_ce_fastpath_cb_register); 2899 #endif 2900 2901 #ifdef IPA_OFFLOAD 2902 /** 2903 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 2904 * @scn: bus context 2905 * @ce_sr_base_paddr: copyengine source ring base physical address 2906 * @ce_sr_ring_size: copyengine source ring size 2907 * @ce_reg_paddr: copyengine register physical address 2908 * 2909 * IPA micro controller data path offload feature enabled, 2910 * HIF should release copy engine related resource information to IPA UC 2911 * IPA UC will access hardware resource with released information 2912 * 2913 * Return: None 2914 */ 2915 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 2916 qdf_shared_mem_t **ce_sr, 2917 uint32_t *ce_sr_ring_size, 2918 qdf_dma_addr_t *ce_reg_paddr) 2919 { 2920 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2921 struct HIF_CE_pipe_info *pipe_info = 2922 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 2923 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 2924 2925 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 2926 ce_reg_paddr); 2927 } 2928 #endif /* IPA_OFFLOAD */ 2929 2930 2931 #ifdef ADRASTEA_SHADOW_REGISTERS 2932 2933 /* 2934 * Current shadow register config 2935 * 2936 * ----------------------------------------------------------- 2937 * Shadow Register | CE | src/dst write index 2938 * ----------------------------------------------------------- 2939 * 0 | 0 | src 2940 * 1 No Config - Doesn't point to anything 2941 * 2 No Config - Doesn't point to anything 2942 * 3 | 3 | src 2943 * 4 | 4 | src 2944 * 5 | 5 | src 2945 * 6 No Config - Doesn't point to anything 2946 * 7 | 7 | src 2947 * 8 No Config - Doesn't point to anything 2948 * 9 No Config - Doesn't point to anything 2949 * 10 No Config - Doesn't point to anything 2950 * 11 No Config - Doesn't point to anything 2951 * ----------------------------------------------------------- 2952 * 12 No Config - Doesn't point to anything 2953 * 13 | 1 | dst 2954 * 14 | 2 | dst 2955 * 15 No Config - Doesn't point to anything 2956 * 16 No Config - Doesn't point to anything 2957 * 17 No Config - Doesn't point to anything 2958 * 18 No Config - Doesn't point to anything 2959 * 19 | 7 | dst 2960 * 20 | 8 | dst 2961 * 21 No Config - Doesn't point to anything 2962 * 22 No Config - Doesn't point to anything 2963 * 23 No Config - Doesn't point to anything 2964 * ----------------------------------------------------------- 2965 * 2966 * 2967 * ToDo - Move shadow register config to following in the future 2968 * This helps free up a block of shadow registers towards the end. 2969 * Can be used for other purposes 2970 * 2971 * ----------------------------------------------------------- 2972 * Shadow Register | CE | src/dst write index 2973 * ----------------------------------------------------------- 2974 * 0 | 0 | src 2975 * 1 | 3 | src 2976 * 2 | 4 | src 2977 * 3 | 5 | src 2978 * 4 | 7 | src 2979 * ----------------------------------------------------------- 2980 * 5 | 1 | dst 2981 * 6 | 2 | dst 2982 * 7 | 7 | dst 2983 * 8 | 8 | dst 2984 * ----------------------------------------------------------- 2985 * 9 No Config - Doesn't point to anything 2986 * 12 No Config - Doesn't point to anything 2987 * 13 No Config - Doesn't point to anything 2988 * 14 No Config - Doesn't point to anything 2989 * 15 No Config - Doesn't point to anything 2990 * 16 No Config - Doesn't point to anything 2991 * 17 No Config - Doesn't point to anything 2992 * 18 No Config - Doesn't point to anything 2993 * 19 No Config - Doesn't point to anything 2994 * 20 No Config - Doesn't point to anything 2995 * 21 No Config - Doesn't point to anything 2996 * 22 No Config - Doesn't point to anything 2997 * 23 No Config - Doesn't point to anything 2998 * ----------------------------------------------------------- 2999 */ 3000 3001 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3002 { 3003 u32 addr = 0; 3004 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3005 3006 switch (ce) { 3007 case 0: 3008 addr = SHADOW_VALUE0; 3009 break; 3010 case 3: 3011 addr = SHADOW_VALUE3; 3012 break; 3013 case 4: 3014 addr = SHADOW_VALUE4; 3015 break; 3016 case 5: 3017 addr = SHADOW_VALUE5; 3018 break; 3019 case 7: 3020 addr = SHADOW_VALUE7; 3021 break; 3022 default: 3023 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3024 QDF_ASSERT(0); 3025 } 3026 return addr; 3027 3028 } 3029 3030 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3031 { 3032 u32 addr = 0; 3033 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3034 3035 switch (ce) { 3036 case 1: 3037 addr = SHADOW_VALUE13; 3038 break; 3039 case 2: 3040 addr = SHADOW_VALUE14; 3041 break; 3042 case 5: 3043 addr = SHADOW_VALUE17; 3044 break; 3045 case 7: 3046 addr = SHADOW_VALUE19; 3047 break; 3048 case 8: 3049 addr = SHADOW_VALUE20; 3050 break; 3051 case 9: 3052 addr = SHADOW_VALUE21; 3053 break; 3054 case 10: 3055 addr = SHADOW_VALUE22; 3056 break; 3057 case 11: 3058 addr = SHADOW_VALUE23; 3059 break; 3060 default: 3061 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3062 QDF_ASSERT(0); 3063 } 3064 3065 return addr; 3066 3067 } 3068 #endif 3069 3070 #if defined(FEATURE_LRO) 3071 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 3072 { 3073 struct CE_state *ce_state; 3074 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3075 3076 ce_state = scn->ce_id_to_state[ctx_id]; 3077 3078 return ce_state->lro_data; 3079 } 3080 #endif 3081 3082 /** 3083 * hif_map_service_to_pipe() - returns the ce ids pertaining to 3084 * this service 3085 * @scn: hif_softc pointer. 3086 * @svc_id: Service ID for which the mapping is needed. 3087 * @ul_pipe: address of the container in which ul pipe is returned. 3088 * @dl_pipe: address of the container in which dl pipe is returned. 3089 * @ul_is_polled: address of the container in which a bool 3090 * indicating if the UL CE for this service 3091 * is polled is returned. 3092 * @dl_is_polled: address of the container in which a bool 3093 * indicating if the DL CE for this service 3094 * is polled is returned. 3095 * 3096 * Return: Indicates whether the service has been found in the table. 3097 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 3098 * There will be warning logs if either leg has not been updated 3099 * because it missed the entry in the table (but this is not an err). 3100 */ 3101 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 3102 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 3103 int *dl_is_polled) 3104 { 3105 int status = QDF_STATUS_E_INVAL; 3106 unsigned int i; 3107 struct service_to_pipe element; 3108 struct service_to_pipe *tgt_svc_map_to_use; 3109 uint32_t sz_tgt_svc_map_to_use; 3110 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3111 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3112 bool dl_updated = false; 3113 bool ul_updated = false; 3114 3115 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 3116 &sz_tgt_svc_map_to_use); 3117 3118 *dl_is_polled = 0; /* polling for received messages not supported */ 3119 3120 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 3121 3122 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 3123 if (element.service_id == svc_id) { 3124 if (element.pipedir == PIPEDIR_OUT) { 3125 *ul_pipe = element.pipenum; 3126 *ul_is_polled = 3127 (hif_state->host_ce_config[*ul_pipe].flags & 3128 CE_ATTR_DISABLE_INTR) != 0; 3129 ul_updated = true; 3130 } else if (element.pipedir == PIPEDIR_IN) { 3131 *dl_pipe = element.pipenum; 3132 dl_updated = true; 3133 } 3134 status = QDF_STATUS_SUCCESS; 3135 } 3136 } 3137 if (ul_updated == false) 3138 HIF_INFO("%s: ul pipe is NOT updated for service %d", 3139 __func__, svc_id); 3140 if (dl_updated == false) 3141 HIF_INFO("%s: dl pipe is NOT updated for service %d", 3142 __func__, svc_id); 3143 3144 return status; 3145 } 3146 3147 #ifdef SHADOW_REG_DEBUG 3148 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 3149 uint32_t CE_ctrl_addr) 3150 { 3151 uint32_t read_from_hw, srri_from_ddr = 0; 3152 3153 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 3154 3155 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3156 3157 if (read_from_hw != srri_from_ddr) { 3158 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3159 __func__, srri_from_ddr, read_from_hw, 3160 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3161 QDF_ASSERT(0); 3162 } 3163 return srri_from_ddr; 3164 } 3165 3166 3167 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 3168 uint32_t CE_ctrl_addr) 3169 { 3170 uint32_t read_from_hw, drri_from_ddr = 0; 3171 3172 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 3173 3174 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3175 3176 if (read_from_hw != drri_from_ddr) { 3177 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3178 drri_from_ddr, read_from_hw, 3179 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3180 QDF_ASSERT(0); 3181 } 3182 return drri_from_ddr; 3183 } 3184 3185 #endif 3186 3187 #ifdef ADRASTEA_RRI_ON_DDR 3188 /** 3189 * hif_get_src_ring_read_index(): Called to get the SRRI 3190 * 3191 * @scn: hif_softc pointer 3192 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3193 * 3194 * This function returns the SRRI to the caller. For CEs that 3195 * dont have interrupts enabled, we look at the DDR based SRRI 3196 * 3197 * Return: SRRI 3198 */ 3199 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 3200 uint32_t CE_ctrl_addr) 3201 { 3202 struct CE_attr attr; 3203 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3204 3205 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3206 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3207 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3208 } else { 3209 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3210 return A_TARGET_READ(scn, 3211 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 3212 else 3213 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 3214 CE_ctrl_addr); 3215 } 3216 } 3217 3218 /** 3219 * hif_get_dst_ring_read_index(): Called to get the DRRI 3220 * 3221 * @scn: hif_softc pointer 3222 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3223 * 3224 * This function returns the DRRI to the caller. For CEs that 3225 * dont have interrupts enabled, we look at the DDR based DRRI 3226 * 3227 * Return: DRRI 3228 */ 3229 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 3230 uint32_t CE_ctrl_addr) 3231 { 3232 struct CE_attr attr; 3233 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3234 3235 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3236 3237 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3238 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3239 } else { 3240 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3241 return A_TARGET_READ(scn, 3242 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 3243 else 3244 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 3245 CE_ctrl_addr); 3246 } 3247 } 3248 3249 /** 3250 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3251 * 3252 * @scn: hif_softc pointer 3253 * 3254 * This function allocates non cached memory on ddr and sends 3255 * the physical address of this memory to the CE hardware. The 3256 * hardware updates the RRI on this particular location. 3257 * 3258 * Return: None 3259 */ 3260 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3261 { 3262 unsigned int i; 3263 qdf_dma_addr_t paddr_rri_on_ddr; 3264 uint32_t high_paddr, low_paddr; 3265 3266 scn->vaddr_rri_on_ddr = 3267 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 3268 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)), 3269 &paddr_rri_on_ddr); 3270 3271 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 3272 low_paddr = BITS0_TO_31(paddr_rri_on_ddr); 3273 high_paddr = BITS32_TO_35(paddr_rri_on_ddr); 3274 3275 HIF_DBG("%s using srri and drri from DDR", __func__); 3276 3277 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 3278 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 3279 3280 for (i = 0; i < CE_COUNT; i++) 3281 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 3282 3283 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t)); 3284 3285 } 3286 #else 3287 3288 /** 3289 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3290 * 3291 * @scn: hif_softc pointer 3292 * 3293 * This is a dummy implementation for platforms that don't 3294 * support this functionality. 3295 * 3296 * Return: None 3297 */ 3298 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3299 { 3300 } 3301 #endif 3302 3303 /** 3304 * hif_dump_ce_registers() - dump ce registers 3305 * @scn: hif_opaque_softc pointer. 3306 * 3307 * Output the copy engine registers 3308 * 3309 * Return: 0 for success or error code 3310 */ 3311 int hif_dump_ce_registers(struct hif_softc *scn) 3312 { 3313 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3314 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 3315 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 3316 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 3317 uint16_t i; 3318 QDF_STATUS status; 3319 3320 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 3321 if (scn->ce_id_to_state[i] == NULL) { 3322 HIF_DBG("CE%d not used.", i); 3323 continue; 3324 } 3325 3326 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 3327 (uint8_t *) &ce_reg_values[0], 3328 ce_reg_word_size * sizeof(uint32_t)); 3329 3330 if (status != QDF_STATUS_SUCCESS) { 3331 HIF_ERROR("Dumping CE register failed!"); 3332 return -EACCES; 3333 } 3334 HIF_ERROR("CE%d=>\n", i); 3335 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 3336 (uint8_t *) &ce_reg_values[0], 3337 ce_reg_word_size * sizeof(uint32_t)); 3338 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address 3339 + SR_WR_INDEX_ADDRESS), 3340 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 3341 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address 3342 + CURRENT_SRRI_ADDRESS), 3343 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 3344 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address 3345 + DST_WR_INDEX_ADDRESS), 3346 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 3347 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address 3348 + CURRENT_DRRI_ADDRESS), 3349 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 3350 qdf_print("---\n"); 3351 } 3352 return 0; 3353 } 3354 qdf_export_symbol(hif_dump_ce_registers); 3355 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 3356 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 3357 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 3358 { 3359 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3360 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3361 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 3362 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3363 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3364 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 3365 struct CE_ring_state *src_ring = ce_state->src_ring; 3366 struct CE_ring_state *dest_ring = ce_state->dest_ring; 3367 3368 if (src_ring) { 3369 hif_info->ul_pipe.nentries = src_ring->nentries; 3370 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 3371 hif_info->ul_pipe.sw_index = src_ring->sw_index; 3372 hif_info->ul_pipe.write_index = src_ring->write_index; 3373 hif_info->ul_pipe.hw_index = src_ring->hw_index; 3374 hif_info->ul_pipe.base_addr_CE_space = 3375 src_ring->base_addr_CE_space; 3376 hif_info->ul_pipe.base_addr_owner_space = 3377 src_ring->base_addr_owner_space; 3378 } 3379 3380 3381 if (dest_ring) { 3382 hif_info->dl_pipe.nentries = dest_ring->nentries; 3383 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 3384 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 3385 hif_info->dl_pipe.write_index = dest_ring->write_index; 3386 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 3387 hif_info->dl_pipe.base_addr_CE_space = 3388 dest_ring->base_addr_CE_space; 3389 hif_info->dl_pipe.base_addr_owner_space = 3390 dest_ring->base_addr_owner_space; 3391 } 3392 3393 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 3394 hif_info->ctrl_addr = ce_state->ctrl_addr; 3395 3396 return hif_info; 3397 } 3398 qdf_export_symbol(hif_get_addl_pipe_info); 3399 3400 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 3401 { 3402 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3403 3404 scn->nss_wifi_ol_mode = mode; 3405 return 0; 3406 } 3407 qdf_export_symbol(hif_set_nss_wifiol_mode); 3408 #endif 3409 3410 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 3411 { 3412 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3413 scn->hif_attribute = hif_attrib; 3414 } 3415 3416 3417 /* disable interrupts (only applicable for legacy copy engine currently */ 3418 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 3419 { 3420 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3421 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 3422 uint32_t ctrl_addr = CE_state->ctrl_addr; 3423 3424 Q_TARGET_ACCESS_BEGIN(scn); 3425 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 3426 Q_TARGET_ACCESS_END(scn); 3427 } 3428 qdf_export_symbol(hif_disable_interrupt); 3429 3430 /** 3431 * hif_fw_event_handler() - hif fw event handler 3432 * @hif_state: pointer to hif ce state structure 3433 * 3434 * Process fw events and raise HTC callback to process fw events. 3435 * 3436 * Return: none 3437 */ 3438 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 3439 { 3440 struct hif_msg_callbacks *msg_callbacks = 3441 &hif_state->msg_callbacks_current; 3442 3443 if (!msg_callbacks->fwEventHandler) 3444 return; 3445 3446 msg_callbacks->fwEventHandler(msg_callbacks->Context, 3447 QDF_STATUS_E_FAILURE); 3448 } 3449 3450 #ifndef QCA_WIFI_3_0 3451 /** 3452 * hif_fw_interrupt_handler() - FW interrupt handler 3453 * @irq: irq number 3454 * @arg: the user pointer 3455 * 3456 * Called from the PCI interrupt handler when a 3457 * firmware-generated interrupt to the Host. 3458 * 3459 * only registered for legacy ce devices 3460 * 3461 * Return: status of handled irq 3462 */ 3463 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3464 { 3465 struct hif_softc *scn = arg; 3466 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3467 uint32_t fw_indicator_address, fw_indicator; 3468 3469 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 3470 return ATH_ISR_NOSCHED; 3471 3472 fw_indicator_address = hif_state->fw_indicator_address; 3473 /* For sudden unplug this will return ~0 */ 3474 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 3475 3476 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 3477 /* ACK: clear Target-side pending event */ 3478 A_TARGET_WRITE(scn, fw_indicator_address, 3479 fw_indicator & ~FW_IND_EVENT_PENDING); 3480 if (Q_TARGET_ACCESS_END(scn) < 0) 3481 return ATH_ISR_SCHED; 3482 3483 if (hif_state->started) { 3484 hif_fw_event_handler(hif_state); 3485 } else { 3486 /* 3487 * Probable Target failure before we're prepared 3488 * to handle it. Generally unexpected. 3489 * fw_indicator used as bitmap, and defined as below: 3490 * FW_IND_EVENT_PENDING 0x1 3491 * FW_IND_INITIALIZED 0x2 3492 * FW_IND_NEEDRECOVER 0x4 3493 */ 3494 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 3495 ("%s: Early firmware event indicated 0x%x\n", 3496 __func__, fw_indicator)); 3497 } 3498 } else { 3499 if (Q_TARGET_ACCESS_END(scn) < 0) 3500 return ATH_ISR_SCHED; 3501 } 3502 3503 return ATH_ISR_SCHED; 3504 } 3505 #else 3506 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3507 { 3508 return ATH_ISR_SCHED; 3509 } 3510 #endif /* #ifdef QCA_WIFI_3_0 */ 3511 3512 3513 /** 3514 * hif_wlan_disable(): call the platform driver to disable wlan 3515 * @scn: HIF Context 3516 * 3517 * This function passes the con_mode to platform driver to disable 3518 * wlan. 3519 * 3520 * Return: void 3521 */ 3522 void hif_wlan_disable(struct hif_softc *scn) 3523 { 3524 enum pld_driver_mode mode; 3525 uint32_t con_mode = hif_get_conparam(scn); 3526 3527 if (scn->target_status == TARGET_STATUS_RESET) 3528 return; 3529 3530 if (QDF_GLOBAL_FTM_MODE == con_mode) 3531 mode = PLD_FTM; 3532 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3533 mode = PLD_EPPING; 3534 else 3535 mode = PLD_MISSION; 3536 3537 pld_wlan_disable(scn->qdf_dev->dev, mode); 3538 } 3539 3540 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 3541 { 3542 QDF_STATUS status; 3543 uint8_t ul_pipe, dl_pipe; 3544 int ul_is_polled, dl_is_polled; 3545 3546 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 3547 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 3548 HTC_CTRL_RSVD_SVC, 3549 &ul_pipe, &dl_pipe, 3550 &ul_is_polled, &dl_is_polled); 3551 if (status) { 3552 HIF_ERROR("%s: failed to map pipe: %d", __func__, status); 3553 return qdf_status_to_os_return(status); 3554 } 3555 3556 *ce_id = dl_pipe; 3557 3558 return 0; 3559 } 3560