1 /* 2 * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "targcfg.h" 20 #include "qdf_lock.h" 21 #include "qdf_status.h" 22 #include "qdf_status.h" 23 #include <qdf_atomic.h> /* qdf_atomic_read */ 24 #include <targaddrs.h> 25 #include "hif_io32.h" 26 #include <hif.h> 27 #include <target_type.h> 28 #include "regtable.h" 29 #define ATH_MODULE_NAME hif 30 #include <a_debug.h> 31 #include "hif_main.h" 32 #include "ce_api.h" 33 #include "qdf_trace.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "ce_internal.h" 37 #include "ce_reg.h" 38 #include "ce_assignment.h" 39 #include "ce_tasklet.h" 40 #ifndef CONFIG_WIN 41 #include "qwlan_version.h" 42 #endif 43 #include "qdf_module.h" 44 45 #define CE_POLL_TIMEOUT 10 /* ms */ 46 47 #define AGC_DUMP 1 48 #define CHANINFO_DUMP 2 49 #define BB_WATCHDOG_DUMP 3 50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 51 #define PCIE_ACCESS_DUMP 4 52 #endif 53 #include "mp_dev.h" 54 55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \ 56 !defined(QCA_WIFI_SUPPORT_SRNG) 57 #define QCA_WIFI_SUPPORT_SRNG 58 #endif 59 60 /* Forward references */ 61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 62 63 /* 64 * Fix EV118783, poll to check whether a BMI response comes 65 * other than waiting for the interruption which may be lost. 66 */ 67 /* #define BMI_RSP_POLLING */ 68 #define BMI_RSP_TO_MILLISEC 1000 69 70 #ifdef CONFIG_BYPASS_QMI 71 #define BYPASS_QMI 1 72 #else 73 #define BYPASS_QMI 0 74 #endif 75 76 #ifdef CONFIG_WIN 77 #if ENABLE_10_4_FW_HDR 78 #define WDI_IPA_SERVICE_GROUP 5 79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 82 #endif /* ENABLE_10_4_FW_HDR */ 83 #endif 84 85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); 86 static void hif_config_rri_on_ddr(struct hif_softc *scn); 87 88 /** 89 * hif_target_access_log_dump() - dump access log 90 * 91 * dump access log 92 * 93 * Return: n/a 94 */ 95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 96 static void hif_target_access_log_dump(void) 97 { 98 hif_target_dump_access_log(); 99 } 100 #endif 101 102 103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 104 uint8_t cmd_id, bool start) 105 { 106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 107 108 switch (cmd_id) { 109 case AGC_DUMP: 110 if (start) 111 priv_start_agc(scn); 112 else 113 priv_dump_agc(scn); 114 break; 115 case CHANINFO_DUMP: 116 if (start) 117 priv_start_cap_chaninfo(scn); 118 else 119 priv_dump_chaninfo(scn); 120 break; 121 case BB_WATCHDOG_DUMP: 122 priv_dump_bbwatchdog(scn); 123 break; 124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 125 case PCIE_ACCESS_DUMP: 126 hif_target_access_log_dump(); 127 break; 128 #endif 129 default: 130 HIF_ERROR("%s: Invalid htc dump command", __func__); 131 break; 132 } 133 } 134 135 static void ce_poll_timeout(void *arg) 136 { 137 struct CE_state *CE_state = (struct CE_state *)arg; 138 139 if (CE_state->timer_inited) { 140 ce_per_engine_service(CE_state->scn, CE_state->id); 141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 142 } 143 } 144 145 static unsigned int roundup_pwr2(unsigned int n) 146 { 147 int i; 148 unsigned int test_pwr2; 149 150 if (!(n & (n - 1))) 151 return n; /* already a power of 2 */ 152 153 test_pwr2 = 4; 154 for (i = 0; i < 29; i++) { 155 if (test_pwr2 > n) 156 return test_pwr2; 157 test_pwr2 = test_pwr2 << 1; 158 } 159 160 QDF_ASSERT(0); /* n too large */ 161 return 0; 162 } 163 164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 166 167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 177 #ifdef QCA_WIFI_3_0_ADRASTEA 178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 181 #endif 182 }; 183 184 #ifdef QCN7605_SUPPORT 185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 194 }; 195 #endif 196 197 #ifdef WLAN_FEATURE_EPPING 198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 208 }; 209 #endif 210 211 /* CE_PCI TABLE */ 212 /* 213 * NOTE: the table below is out of date, though still a useful reference. 214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 215 * mapping of HTC services to HIF pipes. 216 */ 217 /* 218 * This authoritative table defines Copy Engine configuration and the mapping 219 * of services/endpoints to CEs. A subset of this information is passed to 220 * the Target during startup as a prerequisite to entering BMI phase. 221 * See: 222 * target_service_to_ce_map - Target-side mapping 223 * hif_map_service_to_pipe - Host-side mapping 224 * target_ce_config - Target-side configuration 225 * host_ce_config - Host-side configuration 226 ============================================================================ 227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 228 | | | ctio | Size | Frequency 229 | | | n | | 230 ============================================================================ 231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 232 descriptor | | | | O(100B) | and regular 233 download | | | | | 234 ---------------------------------------------------------------------------- 235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 236 indication | | | | O(10B) | regular 237 upload | | | | | 238 ---------------------------------------------------------------------------- 239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 240 upload | | | | O(1000B) | (frequent 241 e.g. noise | | | | | during IP1.0 242 packets | | | | | testing) 243 ---------------------------------------------------------------------------- 244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 245 download | | | | O(1000B) | (frequent 246 e.g. | | | | | during IP1.0 247 misdirecte | | | | | testing) 248 d EAPOL | | | | | 249 packets | | | | | 250 ---------------------------------------------------------------------------- 251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 252 | DATA_VO (uplink) | | | | 253 ---------------------------------------------------------------------------- 254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 255 | DATA_VO (downlink) | | | | 256 ---------------------------------------------------------------------------- 257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 258 | | | | O(100B) | 259 ---------------------------------------------------------------------------- 260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 261 messages | (downlink) | | | O(100B) | 262 | | | | | 263 ---------------------------------------------------------------------------- 264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 265 | HTC_RAW_STREAMS | | | | 266 | (uplink) | | | | 267 ---------------------------------------------------------------------------- 268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 269 | HTC_RAW_STREAMS | | | | 270 | (downlink) | | | | 271 ---------------------------------------------------------------------------- 272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 273 | | | | | infrequent 274 ============================================================================ 275 */ 276 277 /* 278 * Map from service/endpoint to Copy Engine. 279 * This table is derived from the CE_PCI TABLE, above. 280 * It is passed to the Target at startup for use by firmware. 281 */ 282 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 283 { 284 WMI_DATA_VO_SVC, 285 PIPEDIR_OUT, /* out = UL = host -> target */ 286 3, 287 }, 288 { 289 WMI_DATA_VO_SVC, 290 PIPEDIR_IN, /* in = DL = target -> host */ 291 2, 292 }, 293 { 294 WMI_DATA_BK_SVC, 295 PIPEDIR_OUT, /* out = UL = host -> target */ 296 3, 297 }, 298 { 299 WMI_DATA_BK_SVC, 300 PIPEDIR_IN, /* in = DL = target -> host */ 301 2, 302 }, 303 { 304 WMI_DATA_BE_SVC, 305 PIPEDIR_OUT, /* out = UL = host -> target */ 306 3, 307 }, 308 { 309 WMI_DATA_BE_SVC, 310 PIPEDIR_IN, /* in = DL = target -> host */ 311 2, 312 }, 313 { 314 WMI_DATA_VI_SVC, 315 PIPEDIR_OUT, /* out = UL = host -> target */ 316 3, 317 }, 318 { 319 WMI_DATA_VI_SVC, 320 PIPEDIR_IN, /* in = DL = target -> host */ 321 2, 322 }, 323 { 324 WMI_CONTROL_SVC, 325 PIPEDIR_OUT, /* out = UL = host -> target */ 326 3, 327 }, 328 { 329 WMI_CONTROL_SVC, 330 PIPEDIR_IN, /* in = DL = target -> host */ 331 2, 332 }, 333 { 334 HTC_CTRL_RSVD_SVC, 335 PIPEDIR_OUT, /* out = UL = host -> target */ 336 0, /* could be moved to 3 (share with WMI) */ 337 }, 338 { 339 HTC_CTRL_RSVD_SVC, 340 PIPEDIR_IN, /* in = DL = target -> host */ 341 2, 342 }, 343 { 344 HTC_RAW_STREAMS_SVC, /* not currently used */ 345 PIPEDIR_OUT, /* out = UL = host -> target */ 346 0, 347 }, 348 { 349 HTC_RAW_STREAMS_SVC, /* not currently used */ 350 PIPEDIR_IN, /* in = DL = target -> host */ 351 2, 352 }, 353 { 354 HTT_DATA_MSG_SVC, 355 PIPEDIR_OUT, /* out = UL = host -> target */ 356 4, 357 }, 358 { 359 HTT_DATA_MSG_SVC, 360 PIPEDIR_IN, /* in = DL = target -> host */ 361 1, 362 }, 363 { 364 WDI_IPA_TX_SVC, 365 PIPEDIR_OUT, /* in = DL = target -> host */ 366 5, 367 }, 368 #if defined(QCA_WIFI_3_0_ADRASTEA) 369 { 370 HTT_DATA2_MSG_SVC, 371 PIPEDIR_IN, /* in = DL = target -> host */ 372 9, 373 }, 374 { 375 HTT_DATA3_MSG_SVC, 376 PIPEDIR_IN, /* in = DL = target -> host */ 377 10, 378 }, 379 { 380 PACKET_LOG_SVC, 381 PIPEDIR_IN, /* in = DL = target -> host */ 382 11, 383 }, 384 #endif 385 /* (Additions here) */ 386 387 { /* Must be last */ 388 0, 389 0, 390 0, 391 }, 392 }; 393 394 /* PIPEDIR_OUT = HOST to Target */ 395 /* PIPEDIR_IN = TARGET to HOST */ 396 #if (defined(QCA_WIFI_QCA8074)) 397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 417 /* (Additions here) */ 418 { 0, 0, 0, }, 419 }; 420 #else 421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 422 }; 423 #endif 424 425 #if (defined(QCA_WIFI_QCA8074V2)) 426 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 427 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 428 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 429 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 430 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 431 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 432 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 433 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 434 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 435 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 436 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 439 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 441 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 442 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 443 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 444 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 445 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 446 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 447 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 448 /* (Additions here) */ 449 { 0, 0, 0, }, 450 }; 451 #else 452 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 453 }; 454 #endif 455 456 /* PIPEDIR_OUT = HOST to Target */ 457 /* PIPEDIR_IN = TARGET to HOST */ 458 #ifdef QCN7605_SUPPORT 459 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 460 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 461 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 462 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 463 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 464 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 465 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 466 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 467 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 468 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 469 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 472 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 473 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 474 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 475 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 476 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 477 #ifdef IPA_OFFLOAD 478 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 479 #else 480 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 481 #endif 482 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 483 /* (Additions here) */ 484 { 0, 0, 0, }, 485 }; 486 #endif 487 488 #if (defined(QCA_WIFI_QCA6290)) 489 #ifdef CONFIG_WIN 490 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 491 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 492 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 493 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 494 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 495 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 496 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 497 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 498 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 499 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 500 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 501 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 502 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 503 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 504 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 505 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 506 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 507 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 508 /* (Additions here) */ 509 { 0, 0, 0, }, 510 }; 511 #else 512 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 513 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 514 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 515 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 516 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 517 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 518 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 519 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 520 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 521 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 522 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 523 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 524 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 525 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 526 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 527 /* (Additions here) */ 528 { 0, 0, 0, }, 529 }; 530 #endif 531 #else 532 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 533 }; 534 #endif 535 536 #if (defined(QCA_WIFI_QCA6390)) 537 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 538 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 539 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 540 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 541 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 542 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 543 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 544 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 545 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 546 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 547 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 548 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 549 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 550 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 551 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 552 /* (Additions here) */ 553 { 0, 0, 0, }, 554 }; 555 #else 556 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 557 }; 558 #endif 559 560 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 561 { 562 WMI_DATA_VO_SVC, 563 PIPEDIR_OUT, /* out = UL = host -> target */ 564 3, 565 }, 566 { 567 WMI_DATA_VO_SVC, 568 PIPEDIR_IN, /* in = DL = target -> host */ 569 2, 570 }, 571 { 572 WMI_DATA_BK_SVC, 573 PIPEDIR_OUT, /* out = UL = host -> target */ 574 3, 575 }, 576 { 577 WMI_DATA_BK_SVC, 578 PIPEDIR_IN, /* in = DL = target -> host */ 579 2, 580 }, 581 { 582 WMI_DATA_BE_SVC, 583 PIPEDIR_OUT, /* out = UL = host -> target */ 584 3, 585 }, 586 { 587 WMI_DATA_BE_SVC, 588 PIPEDIR_IN, /* in = DL = target -> host */ 589 2, 590 }, 591 { 592 WMI_DATA_VI_SVC, 593 PIPEDIR_OUT, /* out = UL = host -> target */ 594 3, 595 }, 596 { 597 WMI_DATA_VI_SVC, 598 PIPEDIR_IN, /* in = DL = target -> host */ 599 2, 600 }, 601 { 602 WMI_CONTROL_SVC, 603 PIPEDIR_OUT, /* out = UL = host -> target */ 604 3, 605 }, 606 { 607 WMI_CONTROL_SVC, 608 PIPEDIR_IN, /* in = DL = target -> host */ 609 2, 610 }, 611 { 612 HTC_CTRL_RSVD_SVC, 613 PIPEDIR_OUT, /* out = UL = host -> target */ 614 0, /* could be moved to 3 (share with WMI) */ 615 }, 616 { 617 HTC_CTRL_RSVD_SVC, 618 PIPEDIR_IN, /* in = DL = target -> host */ 619 1, 620 }, 621 { 622 HTC_RAW_STREAMS_SVC, /* not currently used */ 623 PIPEDIR_OUT, /* out = UL = host -> target */ 624 0, 625 }, 626 { 627 HTC_RAW_STREAMS_SVC, /* not currently used */ 628 PIPEDIR_IN, /* in = DL = target -> host */ 629 1, 630 }, 631 { 632 HTT_DATA_MSG_SVC, 633 PIPEDIR_OUT, /* out = UL = host -> target */ 634 4, 635 }, 636 #ifdef WLAN_FEATURE_FASTPATH 637 { 638 HTT_DATA_MSG_SVC, 639 PIPEDIR_IN, /* in = DL = target -> host */ 640 5, 641 }, 642 #else /* WLAN_FEATURE_FASTPATH */ 643 { 644 HTT_DATA_MSG_SVC, 645 PIPEDIR_IN, /* in = DL = target -> host */ 646 1, 647 }, 648 #endif /* WLAN_FEATURE_FASTPATH */ 649 650 /* (Additions here) */ 651 652 { /* Must be last */ 653 0, 654 0, 655 0, 656 }, 657 }; 658 659 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 660 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 661 662 #ifdef WLAN_FEATURE_EPPING 663 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 664 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 665 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 666 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 667 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 668 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 669 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 670 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 671 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 672 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 673 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 674 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 675 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 676 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 677 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 678 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 679 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 680 {0, 0, 0,}, /* Must be last */ 681 }; 682 683 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 684 **tgt_svc_map_to_use, 685 uint32_t *sz_tgt_svc_map_to_use) 686 { 687 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 688 *sz_tgt_svc_map_to_use = 689 sizeof(target_service_to_ce_map_wlan_epping); 690 } 691 #endif 692 693 #ifdef QCN7605_SUPPORT 694 static inline 695 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 696 uint32_t *sz_tgt_svc_map_to_use) 697 { 698 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 699 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 700 } 701 #else 702 static inline 703 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 704 uint32_t *sz_tgt_svc_map_to_use) 705 { 706 HIF_ERROR("%s: QCN7605 not supported", __func__); 707 } 708 #endif 709 710 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 711 struct service_to_pipe **tgt_svc_map_to_use, 712 uint32_t *sz_tgt_svc_map_to_use) 713 { 714 uint32_t mode = hif_get_conparam(scn); 715 struct hif_target_info *tgt_info = &scn->target_info; 716 717 if (QDF_IS_EPPING_ENABLED(mode)) { 718 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 719 sz_tgt_svc_map_to_use); 720 } else { 721 switch (tgt_info->target_type) { 722 default: 723 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 724 *sz_tgt_svc_map_to_use = 725 sizeof(target_service_to_ce_map_wlan); 726 break; 727 case TARGET_TYPE_QCN7605: 728 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 729 sz_tgt_svc_map_to_use); 730 break; 731 case TARGET_TYPE_AR900B: 732 case TARGET_TYPE_QCA9984: 733 case TARGET_TYPE_IPQ4019: 734 case TARGET_TYPE_QCA9888: 735 case TARGET_TYPE_AR9888: 736 case TARGET_TYPE_AR9888V2: 737 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 738 *sz_tgt_svc_map_to_use = 739 sizeof(target_service_to_ce_map_ar900b); 740 break; 741 case TARGET_TYPE_QCA6290: 742 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 743 *sz_tgt_svc_map_to_use = 744 sizeof(target_service_to_ce_map_qca6290); 745 break; 746 case TARGET_TYPE_QCA6390: 747 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 748 *sz_tgt_svc_map_to_use = 749 sizeof(target_service_to_ce_map_qca6390); 750 break; 751 case TARGET_TYPE_QCA8074: 752 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 753 *sz_tgt_svc_map_to_use = 754 sizeof(target_service_to_ce_map_qca8074); 755 break; 756 case TARGET_TYPE_QCA8074V2: 757 *tgt_svc_map_to_use = 758 target_service_to_ce_map_qca8074_v2; 759 *sz_tgt_svc_map_to_use = 760 sizeof(target_service_to_ce_map_qca8074_v2); 761 break; 762 } 763 } 764 } 765 766 /** 767 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 768 * @ce_state : pointer to the state context of the CE 769 * 770 * Description: 771 * Sets htt_rx_data attribute of the state structure if the 772 * CE serves one of the HTT DATA services. 773 * 774 * Return: 775 * false (attribute set to false) 776 * true (attribute set to true); 777 */ 778 static bool ce_mark_datapath(struct CE_state *ce_state) 779 { 780 struct service_to_pipe *svc_map; 781 uint32_t map_sz, map_len; 782 int i; 783 bool rc = false; 784 785 if (ce_state != NULL) { 786 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 787 &map_sz); 788 789 map_len = map_sz / sizeof(struct service_to_pipe); 790 for (i = 0; i < map_len; i++) { 791 if ((svc_map[i].pipenum == ce_state->id) && 792 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 793 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 794 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 795 /* HTT CEs are unidirectional */ 796 if (svc_map[i].pipedir == PIPEDIR_IN) 797 ce_state->htt_rx_data = true; 798 else 799 ce_state->htt_tx_data = true; 800 rc = true; 801 } 802 } 803 } 804 return rc; 805 } 806 807 /** 808 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 809 * @ce_id: ce in question 810 * @ring: ring state being examined 811 * @type: "src_ring" or "dest_ring" string for identifying the ring 812 * 813 * Warns on non-zero index values. 814 * Causes a kernel panic if the ring is not empty durring initialization. 815 */ 816 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 817 char *type) 818 { 819 if (ring->write_index != 0 || ring->sw_index != 0) 820 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", 821 ce_id, type, ring->sw_index, ring->write_index); 822 if (ring->write_index != ring->sw_index) 823 QDF_BUG(0); 824 } 825 826 #ifdef IPA_OFFLOAD 827 /** 828 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 829 * @scn: softc instance 830 * @ce_id: ce in question 831 * @base_addr: pointer to copyengine ring base address 832 * @ce_ring: copyengine instance 833 * @nentries: number of entries should be allocated 834 * @desc_size: ce desc size 835 * 836 * Return: QDF_STATUS_SUCCESS - for success 837 */ 838 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 839 qdf_dma_addr_t *base_addr, 840 struct CE_ring_state *ce_ring, 841 unsigned int nentries, uint32_t desc_size) 842 { 843 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) { 844 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev, 845 nentries * desc_size + CE_DESC_RING_ALIGN); 846 if (!scn->ipa_ce_ring) { 847 HIF_ERROR("%s: Failed to allocate memory for IPA ce ring", 848 __func__); 849 return QDF_STATUS_E_NOMEM; 850 } 851 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 852 &scn->ipa_ce_ring->mem_info); 853 ce_ring->base_addr_owner_space_unaligned = 854 scn->ipa_ce_ring->vaddr; 855 } else { 856 ce_ring->base_addr_owner_space_unaligned = 857 qdf_mem_alloc_consistent(scn->qdf_dev, 858 scn->qdf_dev->dev, 859 (nentries * desc_size + 860 CE_DESC_RING_ALIGN), 861 base_addr); 862 if (!ce_ring->base_addr_owner_space_unaligned) { 863 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 864 __func__, CE_id); 865 return QDF_STATUS_E_NOMEM; 866 } 867 } 868 return QDF_STATUS_SUCCESS; 869 } 870 871 /** 872 * ce_free_desc_ring() - Frees copyengine descriptor ring 873 * @scn: softc instance 874 * @ce_id: ce in question 875 * @ce_ring: copyengine instance 876 * @desc_size: ce desc size 877 * 878 * Return: None 879 */ 880 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 881 struct CE_ring_state *ce_ring, uint32_t desc_size) 882 { 883 if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) { 884 qdf_mem_shared_mem_free(scn->qdf_dev, 885 scn->ipa_ce_ring); 886 ce_ring->base_addr_owner_space_unaligned = NULL; 887 } else { 888 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 889 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 890 ce_ring->base_addr_owner_space_unaligned, 891 ce_ring->base_addr_CE_space, 0); 892 ce_ring->base_addr_owner_space_unaligned = NULL; 893 } 894 } 895 #else 896 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 897 qdf_dma_addr_t *base_addr, 898 struct CE_ring_state *ce_ring, 899 unsigned int nentries, uint32_t desc_size) 900 { 901 ce_ring->base_addr_owner_space_unaligned = 902 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 903 (nentries * desc_size + 904 CE_DESC_RING_ALIGN), base_addr); 905 if (!ce_ring->base_addr_owner_space_unaligned) { 906 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 907 __func__, CE_id); 908 return QDF_STATUS_E_NOMEM; 909 } 910 return QDF_STATUS_SUCCESS; 911 } 912 913 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 914 struct CE_ring_state *ce_ring, uint32_t desc_size) 915 { 916 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 917 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 918 ce_ring->base_addr_owner_space_unaligned, 919 ce_ring->base_addr_CE_space, 0); 920 ce_ring->base_addr_owner_space_unaligned = NULL; 921 } 922 #endif /* IPA_OFFLOAD */ 923 924 /* 925 * TODO: Need to explore the possibility of having this as part of a 926 * target context instead of a global array. 927 */ 928 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 929 930 void ce_service_register_module(enum ce_target_type target_type, 931 struct ce_ops* (*ce_attach)(void)) 932 { 933 if (target_type < CE_MAX_TARGET_TYPE) 934 ce_attach_register[target_type] = ce_attach; 935 } 936 937 qdf_export_symbol(ce_service_register_module); 938 939 /** 940 * ce_srng_based() - Does this target use srng 941 * @ce_state : pointer to the state context of the CE 942 * 943 * Description: 944 * returns true if the target is SRNG based 945 * 946 * Return: 947 * false (attribute set to false) 948 * true (attribute set to true); 949 */ 950 bool ce_srng_based(struct hif_softc *scn) 951 { 952 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 953 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 954 955 switch (tgt_info->target_type) { 956 case TARGET_TYPE_QCA8074: 957 case TARGET_TYPE_QCA8074V2: 958 case TARGET_TYPE_QCA6290: 959 case TARGET_TYPE_QCA6390: 960 return true; 961 default: 962 return false; 963 } 964 return false; 965 } 966 qdf_export_symbol(ce_srng_based); 967 968 #ifdef QCA_WIFI_SUPPORT_SRNG 969 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 970 { 971 struct ce_ops *ops = NULL; 972 973 if (ce_srng_based(scn)) { 974 if (ce_attach_register[CE_SVC_SRNG]) 975 ops = ce_attach_register[CE_SVC_SRNG](); 976 } else if (ce_attach_register[CE_SVC_LEGACY]) { 977 ops = ce_attach_register[CE_SVC_LEGACY](); 978 } 979 980 return ops; 981 } 982 983 984 #else /* QCA_LITHIUM */ 985 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 986 { 987 if (ce_attach_register[CE_SVC_LEGACY]) 988 return ce_attach_register[CE_SVC_LEGACY](); 989 990 return NULL; 991 } 992 #endif /* QCA_LITHIUM */ 993 994 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 995 struct pld_shadow_reg_v2_cfg **shadow_config, 996 int *num_shadow_registers_configured) { 997 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 998 999 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1000 scn, shadow_config, num_shadow_registers_configured); 1001 } 1002 1003 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1004 uint8_t ring_type) 1005 { 1006 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1007 1008 return hif_state->ce_services->ce_get_desc_size(ring_type); 1009 } 1010 1011 1012 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 1013 uint8_t ring_type, uint32_t nentries) 1014 { 1015 uint32_t ce_nbytes; 1016 char *ptr; 1017 qdf_dma_addr_t base_addr; 1018 struct CE_ring_state *ce_ring; 1019 uint32_t desc_size; 1020 struct hif_softc *scn = CE_state->scn; 1021 1022 ce_nbytes = sizeof(struct CE_ring_state) 1023 + (nentries * sizeof(void *)); 1024 ptr = qdf_mem_malloc(ce_nbytes); 1025 if (!ptr) 1026 return NULL; 1027 1028 ce_ring = (struct CE_ring_state *)ptr; 1029 ptr += sizeof(struct CE_ring_state); 1030 ce_ring->nentries = nentries; 1031 ce_ring->nentries_mask = nentries - 1; 1032 1033 ce_ring->low_water_mark_nentries = 0; 1034 ce_ring->high_water_mark_nentries = nentries; 1035 ce_ring->per_transfer_context = (void **)ptr; 1036 1037 desc_size = ce_get_desc_size(scn, ring_type); 1038 1039 /* Legacy platforms that do not support cache 1040 * coherent DMA are unsupported 1041 */ 1042 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 1043 ce_ring, nentries, 1044 desc_size) != 1045 QDF_STATUS_SUCCESS) { 1046 HIF_ERROR("%s: ring has no DMA mem", 1047 __func__); 1048 qdf_mem_free(ptr); 1049 return NULL; 1050 } 1051 ce_ring->base_addr_CE_space_unaligned = base_addr; 1052 1053 /* Correctly initialize memory to 0 to 1054 * prevent garbage data crashing system 1055 * when download firmware 1056 */ 1057 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 1058 nentries * desc_size + 1059 CE_DESC_RING_ALIGN); 1060 1061 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 1062 1063 ce_ring->base_addr_CE_space = 1064 (ce_ring->base_addr_CE_space_unaligned + 1065 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 1066 1067 ce_ring->base_addr_owner_space = (void *) 1068 (((size_t) ce_ring->base_addr_owner_space_unaligned + 1069 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 1070 } else { 1071 ce_ring->base_addr_CE_space = 1072 ce_ring->base_addr_CE_space_unaligned; 1073 ce_ring->base_addr_owner_space = 1074 ce_ring->base_addr_owner_space_unaligned; 1075 } 1076 1077 return ce_ring; 1078 } 1079 1080 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 1081 uint32_t ce_id, struct CE_ring_state *ring, 1082 struct CE_attr *attr) 1083 { 1084 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1085 1086 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 1087 ring, attr); 1088 } 1089 1090 int hif_ce_bus_early_suspend(struct hif_softc *scn) 1091 { 1092 uint8_t ul_pipe, dl_pipe; 1093 int ce_id, status, ul_is_polled, dl_is_polled; 1094 struct CE_state *ce_state; 1095 1096 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 1097 &ul_pipe, &dl_pipe, 1098 &ul_is_polled, &dl_is_polled); 1099 if (status) { 1100 HIF_ERROR("%s: pipe_mapping failure", __func__); 1101 return status; 1102 } 1103 1104 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1105 if (ce_id == ul_pipe) 1106 continue; 1107 if (ce_id == dl_pipe) 1108 continue; 1109 1110 ce_state = scn->ce_id_to_state[ce_id]; 1111 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1112 if (ce_state->state == CE_RUNNING) 1113 ce_state->state = CE_PAUSED; 1114 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1115 } 1116 1117 return status; 1118 } 1119 1120 int hif_ce_bus_late_resume(struct hif_softc *scn) 1121 { 1122 int ce_id; 1123 struct CE_state *ce_state; 1124 int write_index; 1125 bool index_updated; 1126 1127 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1128 ce_state = scn->ce_id_to_state[ce_id]; 1129 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1130 if (ce_state->state == CE_PENDING) { 1131 write_index = ce_state->src_ring->write_index; 1132 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 1133 write_index); 1134 ce_state->state = CE_RUNNING; 1135 index_updated = true; 1136 } else { 1137 index_updated = false; 1138 } 1139 1140 if (ce_state->state == CE_PAUSED) 1141 ce_state->state = CE_RUNNING; 1142 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1143 1144 if (index_updated) 1145 hif_record_ce_desc_event(scn, ce_id, 1146 RESUME_WRITE_INDEX_UPDATE, 1147 NULL, NULL, write_index, 0); 1148 } 1149 1150 return 0; 1151 } 1152 1153 /** 1154 * ce_oom_recovery() - try to recover rx ce from oom condition 1155 * @context: CE_state of the CE with oom rx ring 1156 * 1157 * the executing work Will continue to be rescheduled until 1158 * at least 1 descriptor is successfully posted to the rx ring. 1159 * 1160 * return: none 1161 */ 1162 static void ce_oom_recovery(void *context) 1163 { 1164 struct CE_state *ce_state = context; 1165 struct hif_softc *scn = ce_state->scn; 1166 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1167 struct HIF_CE_pipe_info *pipe_info = 1168 &ce_softc->pipe_info[ce_state->id]; 1169 1170 hif_post_recv_buffers_for_pipe(pipe_info); 1171 } 1172 1173 #if HIF_CE_DEBUG_DATA_BUF 1174 /** 1175 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1176 * the CE descriptors. 1177 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1178 * @scn: hif scn handle 1179 * ce_id: Copy Engine Id 1180 * 1181 * Return: QDF_STATUS 1182 */ 1183 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1184 { 1185 struct hif_ce_desc_event *event = NULL; 1186 struct hif_ce_desc_event *hist_ev = NULL; 1187 uint32_t index = 0; 1188 1189 hist_ev = 1190 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1191 1192 if (!hist_ev) 1193 return QDF_STATUS_E_NOMEM; 1194 1195 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1196 event = &hist_ev[index]; 1197 event->data = 1198 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 1199 if (event->data == NULL) 1200 return QDF_STATUS_E_NOMEM; 1201 } 1202 return QDF_STATUS_SUCCESS; 1203 } 1204 1205 /** 1206 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 1207 * the CE descriptors. 1208 * @scn: hif scn handle 1209 * ce_id: Copy Engine Id 1210 * 1211 * Return: 1212 */ 1213 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1214 { 1215 struct hif_ce_desc_event *event = NULL; 1216 struct hif_ce_desc_event *hist_ev = NULL; 1217 uint32_t index = 0; 1218 1219 hist_ev = 1220 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1221 1222 if (!hist_ev) 1223 return; 1224 1225 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1226 event = &hist_ev[index]; 1227 if (event->data != NULL) 1228 qdf_mem_free(event->data); 1229 event->data = NULL; 1230 event = NULL; 1231 } 1232 } 1233 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1234 1235 /* 1236 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked 1237 * for defined here 1238 */ 1239 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF 1240 /** 1241 * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing 1242 * @scn: hif scn handle 1243 * ce_id: Copy Engine Id 1244 * 1245 * Return: QDF_STATUS 1246 */ 1247 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn, 1248 unsigned int CE_id) 1249 { 1250 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 1251 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 1252 1253 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) { 1254 scn->hif_ce_desc_hist.enable[CE_id] = 0; 1255 return QDF_STATUS_E_NOMEM; 1256 } else { 1257 scn->hif_ce_desc_hist.enable[CE_id] = 1; 1258 return QDF_STATUS_SUCCESS; 1259 } 1260 } 1261 1262 /** 1263 * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors 1264 * storing. 1265 * @scn: hif scn handle 1266 * ce_id: Copy Engine Id 1267 * 1268 * Return: 1269 */ 1270 static inline void free_mem_ce_debug_history(struct hif_softc *scn, 1271 unsigned int CE_id) 1272 { 1273 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1274 struct hif_ce_desc_event *hist_ev = 1275 (struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id]; 1276 1277 if (!hist_ev) 1278 return; 1279 1280 #if HIF_CE_DEBUG_DATA_BUF 1281 if (ce_hist->data_enable[CE_id] == 1) { 1282 ce_hist->data_enable[CE_id] = 0; 1283 free_mem_ce_debug_hist_data(scn, CE_id); 1284 } 1285 #endif 1286 ce_hist->enable[CE_id] = 0; 1287 qdf_mem_free(ce_hist->hist_ev[CE_id]); 1288 ce_hist->hist_ev[CE_id] = NULL; 1289 } 1290 1291 /** 1292 * reset_ce_debug_history() - reset the index and ce id used for dumping the 1293 * CE records on the console using sysfs. 1294 * @scn: hif scn handle 1295 * 1296 * Return: 1297 */ 1298 static inline void reset_ce_debug_history(struct hif_softc *scn) 1299 { 1300 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1301 /* Initialise the CE debug history sysfs interface inputs ce_id and 1302 * index. Disable data storing 1303 */ 1304 ce_hist->hist_index = 0; 1305 ce_hist->hist_id = 0; 1306 } 1307 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 1308 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn, 1309 unsigned int CE_id) 1310 { 1311 return QDF_STATUS_SUCCESS; 1312 } 1313 1314 static inline void free_mem_ce_debug_history(struct hif_softc *scn, 1315 unsigned int CE_id) 1316 { 1317 } 1318 1319 static inline void reset_ce_debug_history(struct hif_softc *scn) 1320 { 1321 } 1322 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 1323 1324 void ce_enable_polling(void *cestate) 1325 { 1326 struct CE_state *CE_state = (struct CE_state *)cestate; 1327 1328 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1329 CE_state->timer_inited = true; 1330 } 1331 1332 void ce_disable_polling(void *cestate) 1333 { 1334 struct CE_state *CE_state = (struct CE_state *)cestate; 1335 1336 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1337 CE_state->timer_inited = false; 1338 } 1339 1340 /* 1341 * Initialize a Copy Engine based on caller-supplied attributes. 1342 * This may be called once to initialize both source and destination 1343 * rings or it may be called twice for separate source and destination 1344 * initialization. It may be that only one side or the other is 1345 * initialized by software/firmware. 1346 * 1347 * This should be called durring the initialization sequence before 1348 * interupts are enabled, so we don't have to worry about thread safety. 1349 */ 1350 struct CE_handle *ce_init(struct hif_softc *scn, 1351 unsigned int CE_id, struct CE_attr *attr) 1352 { 1353 struct CE_state *CE_state; 1354 uint32_t ctrl_addr; 1355 unsigned int nentries; 1356 bool malloc_CE_state = false; 1357 bool malloc_src_ring = false; 1358 int status; 1359 1360 QDF_ASSERT(CE_id < scn->ce_count); 1361 ctrl_addr = CE_BASE_ADDRESS(CE_id); 1362 CE_state = scn->ce_id_to_state[CE_id]; 1363 1364 if (!CE_state) { 1365 CE_state = 1366 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 1367 if (!CE_state) { 1368 HIF_ERROR("%s: CE_state has no mem", __func__); 1369 return NULL; 1370 } 1371 malloc_CE_state = true; 1372 qdf_spinlock_create(&CE_state->ce_index_lock); 1373 1374 CE_state->id = CE_id; 1375 CE_state->ctrl_addr = ctrl_addr; 1376 CE_state->state = CE_RUNNING; 1377 CE_state->attr_flags = attr->flags; 1378 } 1379 CE_state->scn = scn; 1380 1381 qdf_atomic_init(&CE_state->rx_pending); 1382 if (attr == NULL) { 1383 /* Already initialized; caller wants the handle */ 1384 return (struct CE_handle *)CE_state; 1385 } 1386 1387 if (CE_state->src_sz_max) 1388 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 1389 else 1390 CE_state->src_sz_max = attr->src_sz_max; 1391 1392 ce_init_ce_desc_event_log(scn, CE_id, 1393 attr->src_nentries + attr->dest_nentries); 1394 1395 /* source ring setup */ 1396 nentries = attr->src_nentries; 1397 if (nentries) { 1398 struct CE_ring_state *src_ring; 1399 1400 nentries = roundup_pwr2(nentries); 1401 if (CE_state->src_ring) { 1402 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 1403 } else { 1404 src_ring = CE_state->src_ring = 1405 ce_alloc_ring_state(CE_state, 1406 CE_RING_SRC, 1407 nentries); 1408 if (!src_ring) { 1409 /* cannot allocate src ring. If the 1410 * CE_state is allocated locally free 1411 * CE_State and return error. 1412 */ 1413 HIF_ERROR("%s: src ring has no mem", __func__); 1414 if (malloc_CE_state) { 1415 /* allocated CE_state locally */ 1416 qdf_mem_free(CE_state); 1417 malloc_CE_state = false; 1418 } 1419 return NULL; 1420 } 1421 /* we can allocate src ring. Mark that the src ring is 1422 * allocated locally 1423 */ 1424 malloc_src_ring = true; 1425 1426 /* 1427 * Also allocate a shadow src ring in 1428 * regular mem to use for faster access. 1429 */ 1430 src_ring->shadow_base_unaligned = 1431 qdf_mem_malloc(nentries * 1432 sizeof(struct CE_src_desc) + 1433 CE_DESC_RING_ALIGN); 1434 if (src_ring->shadow_base_unaligned == NULL) { 1435 HIF_ERROR("%s: src ring no shadow_base mem", 1436 __func__); 1437 goto error_no_dma_mem; 1438 } 1439 src_ring->shadow_base = (struct CE_src_desc *) 1440 (((size_t) src_ring->shadow_base_unaligned + 1441 CE_DESC_RING_ALIGN - 1) & 1442 ~(CE_DESC_RING_ALIGN - 1)); 1443 1444 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 1445 src_ring, attr); 1446 if (status < 0) 1447 goto error_target_access; 1448 1449 ce_ring_test_initial_indexes(CE_id, src_ring, 1450 "src_ring"); 1451 } 1452 } 1453 1454 /* destination ring setup */ 1455 nentries = attr->dest_nentries; 1456 if (nentries) { 1457 struct CE_ring_state *dest_ring; 1458 1459 nentries = roundup_pwr2(nentries); 1460 if (CE_state->dest_ring) { 1461 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 1462 } else { 1463 dest_ring = CE_state->dest_ring = 1464 ce_alloc_ring_state(CE_state, 1465 CE_RING_DEST, 1466 nentries); 1467 if (!dest_ring) { 1468 /* cannot allocate dst ring. If the CE_state 1469 * or src ring is allocated locally free 1470 * CE_State and src ring and return error. 1471 */ 1472 HIF_ERROR("%s: dest ring has no mem", 1473 __func__); 1474 goto error_no_dma_mem; 1475 } 1476 1477 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 1478 dest_ring, attr); 1479 if (status < 0) 1480 goto error_target_access; 1481 1482 ce_ring_test_initial_indexes(CE_id, dest_ring, 1483 "dest_ring"); 1484 1485 /* For srng based target, init status ring here */ 1486 if (ce_srng_based(CE_state->scn)) { 1487 CE_state->status_ring = 1488 ce_alloc_ring_state(CE_state, 1489 CE_RING_STATUS, 1490 nentries); 1491 if (CE_state->status_ring == NULL) { 1492 /*Allocation failed. Cleanup*/ 1493 qdf_mem_free(CE_state->dest_ring); 1494 if (malloc_src_ring) { 1495 qdf_mem_free 1496 (CE_state->src_ring); 1497 CE_state->src_ring = NULL; 1498 malloc_src_ring = false; 1499 } 1500 if (malloc_CE_state) { 1501 /* allocated CE_state locally */ 1502 scn->ce_id_to_state[CE_id] = 1503 NULL; 1504 qdf_mem_free(CE_state); 1505 malloc_CE_state = false; 1506 } 1507 1508 return NULL; 1509 } 1510 1511 status = ce_ring_setup(scn, CE_RING_STATUS, 1512 CE_id, CE_state->status_ring, 1513 attr); 1514 if (status < 0) 1515 goto error_target_access; 1516 1517 } 1518 1519 /* epping */ 1520 /* poll timer */ 1521 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 1522 qdf_timer_init(scn->qdf_dev, 1523 &CE_state->poll_timer, 1524 ce_poll_timeout, 1525 CE_state, 1526 QDF_TIMER_TYPE_WAKE_APPS); 1527 ce_enable_polling(CE_state); 1528 qdf_timer_mod(&CE_state->poll_timer, 1529 CE_POLL_TIMEOUT); 1530 } 1531 } 1532 } 1533 1534 if (!ce_srng_based(scn)) { 1535 /* Enable CE error interrupts */ 1536 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1537 goto error_target_access; 1538 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 1539 if (Q_TARGET_ACCESS_END(scn) < 0) 1540 goto error_target_access; 1541 } 1542 1543 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 1544 ce_oom_recovery, CE_state); 1545 1546 /* update the htt_data attribute */ 1547 ce_mark_datapath(CE_state); 1548 scn->ce_id_to_state[CE_id] = CE_state; 1549 1550 alloc_mem_ce_debug_history(scn, CE_id); 1551 1552 return (struct CE_handle *)CE_state; 1553 1554 error_target_access: 1555 error_no_dma_mem: 1556 ce_fini((struct CE_handle *)CE_state); 1557 return NULL; 1558 } 1559 1560 #ifdef WLAN_FEATURE_FASTPATH 1561 /** 1562 * hif_enable_fastpath() Update that we have enabled fastpath mode 1563 * @hif_ctx: HIF context 1564 * 1565 * For use in data path 1566 * 1567 * Retrun: void 1568 */ 1569 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 1570 { 1571 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1572 1573 if (ce_srng_based(scn)) { 1574 HIF_INFO("%s, srng rings do not support fastpath", __func__); 1575 return; 1576 } 1577 HIF_DBG("%s, Enabling fastpath mode", __func__); 1578 scn->fastpath_mode_on = true; 1579 } 1580 1581 /** 1582 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 1583 * @hif_ctx: HIF Context 1584 * 1585 * For use in data path to skip HTC 1586 * 1587 * Return: bool 1588 */ 1589 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 1590 { 1591 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1592 1593 return scn->fastpath_mode_on; 1594 } 1595 1596 /** 1597 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 1598 * @hif_ctx: HIF Context 1599 * 1600 * API to check if polling is enabled on all CEs. Returns true when polling 1601 * is enabled on all CEs. 1602 * 1603 * Return: bool 1604 */ 1605 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 1606 { 1607 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1608 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1609 struct CE_attr *attr; 1610 int id; 1611 1612 for (id = 0; id < scn->ce_count; id++) { 1613 attr = &hif_state->host_ce_config[id]; 1614 if (attr && (attr->dest_nentries) && 1615 !(attr->flags & CE_ATTR_ENABLE_POLL)) 1616 return false; 1617 } 1618 return true; 1619 } 1620 qdf_export_symbol(hif_is_polled_mode_enabled); 1621 1622 /** 1623 * hif_get_ce_handle - API to get CE handle for FastPath mode 1624 * @hif_ctx: HIF Context 1625 * @id: CopyEngine Id 1626 * 1627 * API to return CE handle for fastpath mode 1628 * 1629 * Return: void 1630 */ 1631 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 1632 { 1633 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1634 1635 return scn->ce_id_to_state[id]; 1636 } 1637 1638 /** 1639 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 1640 * No processing is required inside this function. 1641 * @ce_hdl: Cope engine handle 1642 * Using an assert, this function makes sure that, 1643 * the TX CE has been processed completely. 1644 * 1645 * This is called while dismantling CE structures. No other thread 1646 * should be using these structures while dismantling is occurring 1647 * therfore no locking is needed. 1648 * 1649 * Return: none 1650 */ 1651 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 1652 { 1653 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1654 struct CE_ring_state *src_ring = ce_state->src_ring; 1655 struct hif_softc *sc = ce_state->scn; 1656 uint32_t sw_index, write_index; 1657 1658 if (hif_is_nss_wifi_enabled(sc)) 1659 return; 1660 1661 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 1662 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", 1663 __func__, __LINE__); 1664 sw_index = src_ring->sw_index; 1665 write_index = src_ring->sw_index; 1666 1667 /* At this point Tx CE should be clean */ 1668 qdf_assert_always(sw_index == write_index); 1669 } 1670 } 1671 1672 /** 1673 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 1674 * @ce_hdl: Handle to CE 1675 * 1676 * These buffers are never allocated on the fly, but 1677 * are allocated only once during HIF start and freed 1678 * only once during HIF stop. 1679 * NOTE: 1680 * The assumption here is there is no in-flight DMA in progress 1681 * currently, so that buffers can be freed up safely. 1682 * 1683 * Return: NONE 1684 */ 1685 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 1686 { 1687 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1688 struct CE_ring_state *dst_ring = ce_state->dest_ring; 1689 qdf_nbuf_t nbuf; 1690 int i; 1691 1692 if (ce_state->scn->fastpath_mode_on == false) 1693 return; 1694 1695 if (!ce_state->htt_rx_data) 1696 return; 1697 1698 /* 1699 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 1700 * this CE is completely full: does not leave one blank space, to 1701 * distinguish between empty queue & full queue. So free all the 1702 * entries. 1703 */ 1704 for (i = 0; i < dst_ring->nentries; i++) { 1705 nbuf = dst_ring->per_transfer_context[i]; 1706 1707 /* 1708 * The reasons for doing this check are: 1709 * 1) Protect against calling cleanup before allocating buffers 1710 * 2) In a corner case, FASTPATH_mode_on may be set, but we 1711 * could have a partially filled ring, because of a memory 1712 * allocation failure in the middle of allocating ring. 1713 * This check accounts for that case, checking 1714 * fastpath_mode_on flag or started flag would not have 1715 * covered that case. This is not in performance path, 1716 * so OK to do this. 1717 */ 1718 if (nbuf) { 1719 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 1720 QDF_DMA_FROM_DEVICE); 1721 qdf_nbuf_free(nbuf); 1722 } 1723 } 1724 } 1725 1726 /** 1727 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 1728 * @scn: HIF handle 1729 * 1730 * Datapath Rx CEs are special case, where we reuse all the message buffers. 1731 * Hence we have to post all the entries in the pipe, even, in the beginning 1732 * unlike for other CE pipes where one less than dest_nentries are filled in 1733 * the beginning. 1734 * 1735 * Return: None 1736 */ 1737 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1738 { 1739 int pipe_num; 1740 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1741 1742 if (scn->fastpath_mode_on == false) 1743 return; 1744 1745 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 1746 struct HIF_CE_pipe_info *pipe_info = 1747 &hif_state->pipe_info[pipe_num]; 1748 struct CE_state *ce_state = 1749 scn->ce_id_to_state[pipe_info->pipe_num]; 1750 1751 if (ce_state->htt_rx_data) 1752 atomic_inc(&pipe_info->recv_bufs_needed); 1753 } 1754 } 1755 #else 1756 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1757 { 1758 } 1759 1760 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 1761 { 1762 return false; 1763 } 1764 1765 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state) 1766 { 1767 return false; 1768 } 1769 #endif /* WLAN_FEATURE_FASTPATH */ 1770 1771 void ce_fini(struct CE_handle *copyeng) 1772 { 1773 struct CE_state *CE_state = (struct CE_state *)copyeng; 1774 unsigned int CE_id = CE_state->id; 1775 struct hif_softc *scn = CE_state->scn; 1776 uint32_t desc_size; 1777 1778 bool inited = CE_state->timer_inited; 1779 CE_state->state = CE_UNUSED; 1780 scn->ce_id_to_state[CE_id] = NULL; 1781 /* Set the flag to false first to stop processing in ce_poll_timeout */ 1782 ce_disable_polling(CE_state); 1783 1784 qdf_lro_deinit(CE_state->lro_data); 1785 1786 if (CE_state->src_ring) { 1787 /* Cleanup the datapath Tx ring */ 1788 ce_h2t_tx_ce_cleanup(copyeng); 1789 1790 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 1791 if (CE_state->src_ring->shadow_base_unaligned) 1792 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 1793 if (CE_state->src_ring->base_addr_owner_space_unaligned) 1794 ce_free_desc_ring(scn, CE_state->id, 1795 CE_state->src_ring, 1796 desc_size); 1797 qdf_mem_free(CE_state->src_ring); 1798 } 1799 if (CE_state->dest_ring) { 1800 /* Cleanup the datapath Rx ring */ 1801 ce_t2h_msg_ce_cleanup(copyeng); 1802 1803 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 1804 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 1805 ce_free_desc_ring(scn, CE_state->id, 1806 CE_state->dest_ring, 1807 desc_size); 1808 qdf_mem_free(CE_state->dest_ring); 1809 1810 /* epping */ 1811 if (inited) { 1812 qdf_timer_free(&CE_state->poll_timer); 1813 } 1814 } 1815 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 1816 /* Cleanup the datapath Tx ring */ 1817 ce_h2t_tx_ce_cleanup(copyeng); 1818 1819 if (CE_state->status_ring->shadow_base_unaligned) 1820 qdf_mem_free( 1821 CE_state->status_ring->shadow_base_unaligned); 1822 1823 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 1824 if (CE_state->status_ring->base_addr_owner_space_unaligned) 1825 ce_free_desc_ring(scn, CE_state->id, 1826 CE_state->status_ring, 1827 desc_size); 1828 qdf_mem_free(CE_state->status_ring); 1829 } 1830 1831 free_mem_ce_debug_history(scn, CE_id); 1832 reset_ce_debug_history(scn); 1833 ce_deinit_ce_desc_event_log(scn, CE_id); 1834 1835 qdf_spinlock_destroy(&CE_state->ce_index_lock); 1836 qdf_mem_free(CE_state); 1837 } 1838 1839 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 1840 { 1841 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1842 1843 qdf_mem_zero(&hif_state->msg_callbacks_pending, 1844 sizeof(hif_state->msg_callbacks_pending)); 1845 qdf_mem_zero(&hif_state->msg_callbacks_current, 1846 sizeof(hif_state->msg_callbacks_current)); 1847 } 1848 1849 /* Send the first nbytes bytes of the buffer */ 1850 QDF_STATUS 1851 hif_send_head(struct hif_opaque_softc *hif_ctx, 1852 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 1853 qdf_nbuf_t nbuf, unsigned int data_attr) 1854 { 1855 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1856 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1857 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1858 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 1859 int bytes = nbytes, nfrags = 0; 1860 struct ce_sendlist sendlist; 1861 int status, i = 0; 1862 unsigned int mux_id = 0; 1863 1864 if (nbytes > qdf_nbuf_len(nbuf)) { 1865 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes, 1866 (uint32_t)qdf_nbuf_len(nbuf)); 1867 QDF_ASSERT(0); 1868 } 1869 1870 transfer_id = 1871 (mux_id & MUX_ID_MASK) | 1872 (transfer_id & TRANSACTION_ID_MASK); 1873 data_attr &= DESC_DATA_FLAG_MASK; 1874 /* 1875 * The common case involves sending multiple fragments within a 1876 * single download (the tx descriptor and the tx frame header). 1877 * So, optimize for the case of multiple fragments by not even 1878 * checking whether it's necessary to use a sendlist. 1879 * The overhead of using a sendlist for a single buffer download 1880 * is not a big deal, since it happens rarely (for WMI messages). 1881 */ 1882 ce_sendlist_init(&sendlist); 1883 do { 1884 qdf_dma_addr_t frag_paddr; 1885 int frag_bytes; 1886 1887 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 1888 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 1889 /* 1890 * Clear the packet offset for all but the first CE desc. 1891 */ 1892 if (i++ > 0) 1893 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 1894 1895 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 1896 frag_bytes > 1897 bytes ? bytes : frag_bytes, 1898 qdf_nbuf_get_frag_is_wordstream 1899 (nbuf, 1900 nfrags) ? 0 : 1901 CE_SEND_FLAG_SWAP_DISABLE, 1902 data_attr); 1903 if (status != QDF_STATUS_SUCCESS) { 1904 HIF_ERROR("%s: error, frag_num %d larger than limit", 1905 __func__, nfrags); 1906 return status; 1907 } 1908 bytes -= frag_bytes; 1909 nfrags++; 1910 } while (bytes > 0); 1911 1912 /* Make sure we have resources to handle this request */ 1913 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 1914 if (pipe_info->num_sends_allowed < nfrags) { 1915 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1916 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 1917 return QDF_STATUS_E_RESOURCES; 1918 } 1919 pipe_info->num_sends_allowed -= nfrags; 1920 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1921 1922 if (qdf_unlikely(ce_hdl == NULL)) { 1923 HIF_ERROR("%s: error CE handle is null", __func__); 1924 return A_ERROR; 1925 } 1926 1927 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 1928 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 1929 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 1930 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 1931 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 1932 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 1933 1934 return status; 1935 } 1936 1937 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 1938 int force) 1939 { 1940 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1941 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1942 1943 if (!force) { 1944 int resources; 1945 /* 1946 * Decide whether to actually poll for completions, or just 1947 * wait for a later chance. If there seem to be plenty of 1948 * resources left, then just wait, since checking involves 1949 * reading a CE register, which is a relatively expensive 1950 * operation. 1951 */ 1952 resources = hif_get_free_queue_number(hif_ctx, pipe); 1953 /* 1954 * If at least 50% of the total resources are still available, 1955 * don't bother checking again yet. 1956 */ 1957 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 1958 1)) 1959 return; 1960 } 1961 #if ATH_11AC_TXCOMPACT 1962 ce_per_engine_servicereap(scn, pipe); 1963 #else 1964 ce_per_engine_service(scn, pipe); 1965 #endif 1966 } 1967 1968 uint16_t 1969 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 1970 { 1971 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1972 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1973 uint16_t rv; 1974 1975 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 1976 rv = pipe_info->num_sends_allowed; 1977 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1978 return rv; 1979 } 1980 1981 /* Called by lower (CE) layer when a send to Target completes. */ 1982 static void 1983 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 1984 void *transfer_context, qdf_dma_addr_t CE_data, 1985 unsigned int nbytes, unsigned int transfer_id, 1986 unsigned int sw_index, unsigned int hw_index, 1987 unsigned int toeplitz_hash_result) 1988 { 1989 struct HIF_CE_pipe_info *pipe_info = 1990 (struct HIF_CE_pipe_info *)ce_context; 1991 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 1992 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 1993 unsigned int sw_idx = sw_index, hw_idx = hw_index; 1994 struct hif_msg_callbacks *msg_callbacks = 1995 &pipe_info->pipe_callbacks; 1996 1997 do { 1998 /* 1999 * The upper layer callback will be triggered 2000 * when last fragment is complteted. 2001 */ 2002 if (transfer_context != CE_SENDLIST_ITEM_CTXT) { 2003 if (scn->target_status == TARGET_STATUS_RESET) { 2004 2005 qdf_nbuf_unmap_single(scn->qdf_dev, 2006 transfer_context, 2007 QDF_DMA_TO_DEVICE); 2008 qdf_nbuf_free(transfer_context); 2009 } else 2010 msg_callbacks->txCompletionHandler( 2011 msg_callbacks->Context, 2012 transfer_context, transfer_id, 2013 toeplitz_hash_result); 2014 } 2015 2016 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2017 pipe_info->num_sends_allowed++; 2018 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2019 } while (ce_completed_send_next(copyeng, 2020 &ce_context, &transfer_context, 2021 &CE_data, &nbytes, &transfer_id, 2022 &sw_idx, &hw_idx, 2023 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 2024 } 2025 2026 /** 2027 * hif_ce_do_recv(): send message from copy engine to upper layers 2028 * @msg_callbacks: structure containing callback and callback context 2029 * @netbuff: skb containing message 2030 * @nbytes: number of bytes in the message 2031 * @pipe_info: used for the pipe_number info 2032 * 2033 * Checks the packet length, configures the length in the netbuff, 2034 * and calls the upper layer callback. 2035 * 2036 * return: None 2037 */ 2038 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 2039 qdf_nbuf_t netbuf, int nbytes, 2040 struct HIF_CE_pipe_info *pipe_info) { 2041 if (nbytes <= pipe_info->buf_sz) { 2042 qdf_nbuf_set_pktlen(netbuf, nbytes); 2043 msg_callbacks-> 2044 rxCompletionHandler(msg_callbacks->Context, 2045 netbuf, pipe_info->pipe_num); 2046 } else { 2047 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", 2048 __func__, netbuf, nbytes); 2049 2050 qdf_nbuf_free(netbuf); 2051 } 2052 } 2053 2054 /* Called by lower (CE) layer when data is received from the Target. */ 2055 static void 2056 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 2057 void *transfer_context, qdf_dma_addr_t CE_data, 2058 unsigned int nbytes, unsigned int transfer_id, 2059 unsigned int flags) 2060 { 2061 struct HIF_CE_pipe_info *pipe_info = 2062 (struct HIF_CE_pipe_info *)ce_context; 2063 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2064 struct CE_state *ce_state = (struct CE_state *) copyeng; 2065 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2066 #ifdef HIF_PCI 2067 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state); 2068 #endif 2069 struct hif_msg_callbacks *msg_callbacks = 2070 &pipe_info->pipe_callbacks; 2071 2072 do { 2073 #ifdef HIF_PCI 2074 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); 2075 #endif 2076 qdf_nbuf_unmap_single(scn->qdf_dev, 2077 (qdf_nbuf_t) transfer_context, 2078 QDF_DMA_FROM_DEVICE); 2079 2080 atomic_inc(&pipe_info->recv_bufs_needed); 2081 hif_post_recv_buffers_for_pipe(pipe_info); 2082 if (scn->target_status == TARGET_STATUS_RESET) 2083 qdf_nbuf_free(transfer_context); 2084 else 2085 hif_ce_do_recv(msg_callbacks, transfer_context, 2086 nbytes, pipe_info); 2087 2088 /* Set up force_break flag if num of receices reaches 2089 * MAX_NUM_OF_RECEIVES 2090 */ 2091 ce_state->receive_count++; 2092 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 2093 ce_state->force_break = 1; 2094 break; 2095 } 2096 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 2097 &CE_data, &nbytes, &transfer_id, 2098 &flags) == QDF_STATUS_SUCCESS); 2099 2100 } 2101 2102 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 2103 2104 void 2105 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 2106 struct hif_msg_callbacks *callbacks) 2107 { 2108 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2109 2110 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 2111 spin_lock_init(&pcie_access_log_lock); 2112 #endif 2113 /* Save callbacks for later installation */ 2114 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 2115 sizeof(hif_state->msg_callbacks_pending)); 2116 2117 } 2118 2119 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 2120 { 2121 struct CE_handle *ce_diag = hif_state->ce_diag; 2122 int pipe_num; 2123 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2124 struct hif_msg_callbacks *hif_msg_callbacks = 2125 &hif_state->msg_callbacks_current; 2126 2127 /* daemonize("hif_compl_thread"); */ 2128 2129 if (scn->ce_count == 0) { 2130 HIF_ERROR("%s: Invalid ce_count", __func__); 2131 return -EINVAL; 2132 } 2133 2134 if (!hif_msg_callbacks || 2135 !hif_msg_callbacks->rxCompletionHandler || 2136 !hif_msg_callbacks->txCompletionHandler) { 2137 HIF_ERROR("%s: no completion handler registered", __func__); 2138 return -EFAULT; 2139 } 2140 2141 A_TARGET_ACCESS_LIKELY(scn); 2142 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2143 struct CE_attr attr; 2144 struct HIF_CE_pipe_info *pipe_info; 2145 2146 pipe_info = &hif_state->pipe_info[pipe_num]; 2147 if (pipe_info->ce_hdl == ce_diag) 2148 continue; /* Handle Diagnostic CE specially */ 2149 attr = hif_state->host_ce_config[pipe_num]; 2150 if (attr.src_nentries) { 2151 /* pipe used to send to target */ 2152 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", 2153 __func__, pipe_num, pipe_info); 2154 ce_send_cb_register(pipe_info->ce_hdl, 2155 hif_pci_ce_send_done, pipe_info, 2156 attr.flags & CE_ATTR_DISABLE_INTR); 2157 pipe_info->num_sends_allowed = attr.src_nentries - 1; 2158 } 2159 if (attr.dest_nentries) { 2160 /* pipe used to receive from target */ 2161 ce_recv_cb_register(pipe_info->ce_hdl, 2162 hif_pci_ce_recv_data, pipe_info, 2163 attr.flags & CE_ATTR_DISABLE_INTR); 2164 } 2165 2166 if (attr.src_nentries) 2167 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 2168 2169 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 2170 sizeof(pipe_info->pipe_callbacks)); 2171 } 2172 2173 A_TARGET_ACCESS_UNLIKELY(scn); 2174 return 0; 2175 } 2176 2177 /* 2178 * Install pending msg callbacks. 2179 * 2180 * TBDXXX: This hack is needed because upper layers install msg callbacks 2181 * for use with HTC before BMI is done; yet this HIF implementation 2182 * needs to continue to use BMI msg callbacks. Really, upper layers 2183 * should not register HTC callbacks until AFTER BMI phase. 2184 */ 2185 static void hif_msg_callbacks_install(struct hif_softc *scn) 2186 { 2187 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2188 2189 qdf_mem_copy(&hif_state->msg_callbacks_current, 2190 &hif_state->msg_callbacks_pending, 2191 sizeof(hif_state->msg_callbacks_pending)); 2192 } 2193 2194 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 2195 uint8_t *DLPipe) 2196 { 2197 int ul_is_polled, dl_is_polled; 2198 2199 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 2200 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 2201 } 2202 2203 /** 2204 * hif_dump_pipe_debug_count() - Log error count 2205 * @scn: hif_softc pointer. 2206 * 2207 * Output the pipe error counts of each pipe to log file 2208 * 2209 * Return: N/A 2210 */ 2211 void hif_dump_pipe_debug_count(struct hif_softc *scn) 2212 { 2213 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2214 int pipe_num; 2215 2216 if (hif_state == NULL) { 2217 HIF_ERROR("%s hif_state is NULL", __func__); 2218 return; 2219 } 2220 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2221 struct HIF_CE_pipe_info *pipe_info; 2222 2223 pipe_info = &hif_state->pipe_info[pipe_num]; 2224 2225 if (pipe_info->nbuf_alloc_err_count > 0 || 2226 pipe_info->nbuf_dma_err_count > 0 || 2227 pipe_info->nbuf_ce_enqueue_err_count) 2228 HIF_ERROR( 2229 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 2230 __func__, pipe_info->pipe_num, 2231 atomic_read(&pipe_info->recv_bufs_needed), 2232 pipe_info->nbuf_alloc_err_count, 2233 pipe_info->nbuf_dma_err_count, 2234 pipe_info->nbuf_ce_enqueue_err_count); 2235 } 2236 } 2237 2238 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 2239 void *nbuf, uint32_t *error_cnt, 2240 enum hif_ce_event_type failure_type, 2241 const char *failure_type_string) 2242 { 2243 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 2244 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 2245 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2246 int ce_id = CE_state->id; 2247 uint32_t error_cnt_tmp; 2248 2249 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2250 error_cnt_tmp = ++(*error_cnt); 2251 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2252 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", 2253 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 2254 failure_type_string); 2255 hif_record_ce_desc_event(scn, ce_id, failure_type, 2256 NULL, nbuf, bufs_needed_tmp, 0); 2257 /* if we fail to allocate the last buffer for an rx pipe, 2258 * there is no trigger to refill the ce and we will 2259 * eventually crash 2260 */ 2261 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) 2262 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 2263 2264 } 2265 2266 2267 2268 2269 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 2270 { 2271 struct CE_handle *ce_hdl; 2272 qdf_size_t buf_sz; 2273 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2274 QDF_STATUS status; 2275 uint32_t bufs_posted = 0; 2276 2277 buf_sz = pipe_info->buf_sz; 2278 if (buf_sz == 0) { 2279 /* Unused Copy Engine */ 2280 return QDF_STATUS_SUCCESS; 2281 } 2282 2283 ce_hdl = pipe_info->ce_hdl; 2284 2285 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2286 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 2287 qdf_dma_addr_t CE_data; /* CE space buffer address */ 2288 qdf_nbuf_t nbuf; 2289 2290 atomic_dec(&pipe_info->recv_bufs_needed); 2291 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2292 2293 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 2294 if (!nbuf) { 2295 hif_post_recv_buffers_failure(pipe_info, nbuf, 2296 &pipe_info->nbuf_alloc_err_count, 2297 HIF_RX_NBUF_ALLOC_FAILURE, 2298 "HIF_RX_NBUF_ALLOC_FAILURE"); 2299 return QDF_STATUS_E_NOMEM; 2300 } 2301 2302 /* 2303 * qdf_nbuf_peek_header(nbuf, &data, &unused); 2304 * CE_data = dma_map_single(dev, data, buf_sz, ); 2305 * DMA_FROM_DEVICE); 2306 */ 2307 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 2308 QDF_DMA_FROM_DEVICE); 2309 2310 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2311 hif_post_recv_buffers_failure(pipe_info, nbuf, 2312 &pipe_info->nbuf_dma_err_count, 2313 HIF_RX_NBUF_MAP_FAILURE, 2314 "HIF_RX_NBUF_MAP_FAILURE"); 2315 qdf_nbuf_free(nbuf); 2316 return status; 2317 } 2318 2319 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 2320 2321 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 2322 buf_sz, DMA_FROM_DEVICE); 2323 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 2324 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2325 hif_post_recv_buffers_failure(pipe_info, nbuf, 2326 &pipe_info->nbuf_ce_enqueue_err_count, 2327 HIF_RX_NBUF_ENQUEUE_FAILURE, 2328 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 2329 2330 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 2331 QDF_DMA_FROM_DEVICE); 2332 qdf_nbuf_free(nbuf); 2333 return status; 2334 } 2335 2336 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2337 bufs_posted++; 2338 } 2339 pipe_info->nbuf_alloc_err_count = 2340 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 2341 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 2342 pipe_info->nbuf_dma_err_count = 2343 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 2344 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 2345 pipe_info->nbuf_ce_enqueue_err_count = 2346 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 2347 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 2348 2349 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2350 2351 return QDF_STATUS_SUCCESS; 2352 } 2353 2354 /* 2355 * Try to post all desired receive buffers for all pipes. 2356 * Returns 0 for non fastpath rx copy engine as 2357 * oom_allocation_work will be scheduled to recover any 2358 * failures, non-zero if unable to completely replenish 2359 * receive buffers for fastpath rx Copy engine. 2360 */ 2361 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 2362 { 2363 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2364 int pipe_num; 2365 struct CE_state *ce_state = NULL; 2366 QDF_STATUS qdf_status; 2367 2368 A_TARGET_ACCESS_LIKELY(scn); 2369 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2370 struct HIF_CE_pipe_info *pipe_info; 2371 2372 ce_state = scn->ce_id_to_state[pipe_num]; 2373 pipe_info = &hif_state->pipe_info[pipe_num]; 2374 2375 if (hif_is_nss_wifi_enabled(scn) && 2376 ce_state && (ce_state->htt_rx_data)) 2377 continue; 2378 2379 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 2380 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 2381 ce_state->htt_rx_data && 2382 scn->fastpath_mode_on) { 2383 A_TARGET_ACCESS_UNLIKELY(scn); 2384 return qdf_status; 2385 } 2386 } 2387 2388 A_TARGET_ACCESS_UNLIKELY(scn); 2389 2390 return QDF_STATUS_SUCCESS; 2391 } 2392 2393 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 2394 { 2395 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2396 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2397 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2398 2399 hif_update_fastpath_recv_bufs_cnt(scn); 2400 2401 hif_msg_callbacks_install(scn); 2402 2403 if (hif_completion_thread_startup(hif_state)) 2404 return QDF_STATUS_E_FAILURE; 2405 2406 /* enable buffer cleanup */ 2407 hif_state->started = true; 2408 2409 /* Post buffers once to start things off. */ 2410 qdf_status = hif_post_recv_buffers(scn); 2411 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2412 /* cleanup is done in hif_ce_disable */ 2413 HIF_ERROR("%s:failed to post buffers", __func__); 2414 return qdf_status; 2415 } 2416 2417 return qdf_status; 2418 } 2419 2420 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2421 { 2422 struct hif_softc *scn; 2423 struct CE_handle *ce_hdl; 2424 uint32_t buf_sz; 2425 struct HIF_CE_state *hif_state; 2426 qdf_nbuf_t netbuf; 2427 qdf_dma_addr_t CE_data; 2428 void *per_CE_context; 2429 2430 buf_sz = pipe_info->buf_sz; 2431 /* Unused Copy Engine */ 2432 if (buf_sz == 0) 2433 return; 2434 2435 2436 hif_state = pipe_info->HIF_CE_state; 2437 if (!hif_state->started) 2438 return; 2439 2440 scn = HIF_GET_SOFTC(hif_state); 2441 ce_hdl = pipe_info->ce_hdl; 2442 2443 if (scn->qdf_dev == NULL) 2444 return; 2445 while (ce_revoke_recv_next 2446 (ce_hdl, &per_CE_context, (void **)&netbuf, 2447 &CE_data) == QDF_STATUS_SUCCESS) { 2448 if (netbuf) { 2449 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 2450 QDF_DMA_FROM_DEVICE); 2451 qdf_nbuf_free(netbuf); 2452 } 2453 } 2454 } 2455 2456 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2457 { 2458 struct CE_handle *ce_hdl; 2459 struct HIF_CE_state *hif_state; 2460 struct hif_softc *scn; 2461 qdf_nbuf_t netbuf; 2462 void *per_CE_context; 2463 qdf_dma_addr_t CE_data; 2464 unsigned int nbytes; 2465 unsigned int id; 2466 uint32_t buf_sz; 2467 uint32_t toeplitz_hash_result; 2468 2469 buf_sz = pipe_info->buf_sz; 2470 if (buf_sz == 0) { 2471 /* Unused Copy Engine */ 2472 return; 2473 } 2474 2475 hif_state = pipe_info->HIF_CE_state; 2476 if (!hif_state->started) { 2477 return; 2478 } 2479 2480 scn = HIF_GET_SOFTC(hif_state); 2481 2482 ce_hdl = pipe_info->ce_hdl; 2483 2484 while (ce_cancel_send_next 2485 (ce_hdl, &per_CE_context, 2486 (void **)&netbuf, &CE_data, &nbytes, 2487 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 2488 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 2489 /* 2490 * Packets enqueued by htt_h2t_ver_req_msg() and 2491 * htt_h2t_rx_ring_cfg_msg_ll() have already been 2492 * freed in htt_htc_misc_pkt_pool_free() in 2493 * wlantl_close(), so do not free them here again 2494 * by checking whether it's the endpoint 2495 * which they are queued in. 2496 */ 2497 if (id == scn->htc_htt_tx_endpoint) 2498 return; 2499 /* Indicate the completion to higher 2500 * layer to free the buffer 2501 */ 2502 if (pipe_info->pipe_callbacks.txCompletionHandler) 2503 pipe_info->pipe_callbacks. 2504 txCompletionHandler(pipe_info-> 2505 pipe_callbacks.Context, 2506 netbuf, id, toeplitz_hash_result); 2507 } 2508 } 2509 } 2510 2511 /* 2512 * Cleanup residual buffers for device shutdown: 2513 * buffers that were enqueued for receive 2514 * buffers that were to be sent 2515 * Note: Buffers that had completed but which were 2516 * not yet processed are on a completion queue. They 2517 * are handled when the completion thread shuts down. 2518 */ 2519 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 2520 { 2521 int pipe_num; 2522 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2523 struct CE_state *ce_state; 2524 2525 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2526 struct HIF_CE_pipe_info *pipe_info; 2527 2528 ce_state = scn->ce_id_to_state[pipe_num]; 2529 if (hif_is_nss_wifi_enabled(scn) && ce_state && 2530 ((ce_state->htt_tx_data) || 2531 (ce_state->htt_rx_data))) { 2532 continue; 2533 } 2534 2535 pipe_info = &hif_state->pipe_info[pipe_num]; 2536 hif_recv_buffer_cleanup_on_pipe(pipe_info); 2537 hif_send_buffer_cleanup_on_pipe(pipe_info); 2538 } 2539 } 2540 2541 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 2542 { 2543 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2544 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2545 2546 hif_buffer_cleanup(hif_state); 2547 } 2548 2549 static void hif_destroy_oom_work(struct hif_softc *scn) 2550 { 2551 struct CE_state *ce_state; 2552 int ce_id; 2553 2554 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2555 ce_state = scn->ce_id_to_state[ce_id]; 2556 if (ce_state) 2557 qdf_destroy_work(scn->qdf_dev, 2558 &ce_state->oom_allocation_work); 2559 } 2560 } 2561 2562 void hif_ce_stop(struct hif_softc *scn) 2563 { 2564 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2565 int pipe_num; 2566 2567 /* 2568 * before cleaning up any memory, ensure irq & 2569 * bottom half contexts will not be re-entered 2570 */ 2571 hif_disable_isr(&scn->osc); 2572 hif_destroy_oom_work(scn); 2573 scn->hif_init_done = false; 2574 2575 /* 2576 * At this point, asynchronous threads are stopped, 2577 * The Target should not DMA nor interrupt, Host code may 2578 * not initiate anything more. So we just need to clean 2579 * up Host-side state. 2580 */ 2581 2582 if (scn->athdiag_procfs_inited) { 2583 athdiag_procfs_remove(); 2584 scn->athdiag_procfs_inited = false; 2585 } 2586 2587 hif_buffer_cleanup(hif_state); 2588 2589 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2590 struct HIF_CE_pipe_info *pipe_info; 2591 struct CE_attr attr; 2592 struct CE_handle *ce_diag = hif_state->ce_diag; 2593 2594 pipe_info = &hif_state->pipe_info[pipe_num]; 2595 if (pipe_info->ce_hdl) { 2596 if (pipe_info->ce_hdl != ce_diag) { 2597 attr = hif_state->host_ce_config[pipe_num]; 2598 if (attr.src_nentries) 2599 qdf_spinlock_destroy(&pipe_info-> 2600 completion_freeq_lock); 2601 } 2602 ce_fini(pipe_info->ce_hdl); 2603 pipe_info->ce_hdl = NULL; 2604 pipe_info->buf_sz = 0; 2605 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2606 } 2607 } 2608 2609 if (hif_state->sleep_timer_init) { 2610 qdf_timer_stop(&hif_state->sleep_timer); 2611 qdf_timer_free(&hif_state->sleep_timer); 2612 hif_state->sleep_timer_init = false; 2613 } 2614 2615 hif_state->started = false; 2616 } 2617 2618 #ifdef QCN7605_SUPPORT 2619 static inline 2620 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg 2621 **target_shadow_reg_cfg_ret, 2622 uint32_t *shadow_cfg_sz_ret) 2623 { 2624 if (target_shadow_reg_cfg_ret) 2625 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605; 2626 if (shadow_cfg_sz_ret) 2627 *shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605); 2628 } 2629 #else 2630 static inline 2631 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg 2632 **target_shadow_reg_cfg_ret, 2633 uint32_t *shadow_cfg_sz_ret) 2634 { 2635 HIF_ERROR("QCN7605 not supported"); 2636 } 2637 #endif 2638 2639 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 2640 struct shadow_reg_cfg 2641 **target_shadow_reg_cfg_ret, 2642 uint32_t *shadow_cfg_sz_ret) 2643 { 2644 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 2645 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 2646 2647 switch (tgt_info->target_type) { 2648 case TARGET_TYPE_QCN7605: 2649 hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret, 2650 shadow_cfg_sz_ret); 2651 break; 2652 default: 2653 if (target_shadow_reg_cfg_ret) 2654 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 2655 if (shadow_cfg_sz_ret) 2656 *shadow_cfg_sz_ret = shadow_cfg_sz; 2657 } 2658 } 2659 2660 /** 2661 * hif_get_target_ce_config() - get copy engine configuration 2662 * @target_ce_config_ret: basic copy engine configuration 2663 * @target_ce_config_sz_ret: size of the basic configuration in bytes 2664 * @target_service_to_ce_map_ret: service mapping for the copy engines 2665 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 2666 * @target_shadow_reg_cfg_ret: shadow register configuration 2667 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 2668 * 2669 * providing accessor to these values outside of this file. 2670 * currently these are stored in static pointers to const sections. 2671 * there are multiple configurations that are selected from at compile time. 2672 * Runtime selection would need to consider mode, target type and bus type. 2673 * 2674 * Return: return by parameter. 2675 */ 2676 void hif_get_target_ce_config(struct hif_softc *scn, 2677 struct CE_pipe_config **target_ce_config_ret, 2678 uint32_t *target_ce_config_sz_ret, 2679 struct service_to_pipe **target_service_to_ce_map_ret, 2680 uint32_t *target_service_to_ce_map_sz_ret, 2681 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 2682 uint32_t *shadow_cfg_sz_ret) 2683 { 2684 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2685 2686 *target_ce_config_ret = hif_state->target_ce_config; 2687 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 2688 2689 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 2690 target_service_to_ce_map_sz_ret); 2691 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 2692 shadow_cfg_sz_ret); 2693 } 2694 2695 #ifdef CONFIG_SHADOW_V2 2696 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2697 { 2698 int i; 2699 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2700 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); 2701 2702 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 2703 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2704 "%s: i %d, val %x", __func__, i, 2705 cfg->shadow_reg_v2_cfg[i].addr); 2706 } 2707 } 2708 2709 #else 2710 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2711 { 2712 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2713 "%s: CONFIG_SHADOW_V2 not defined", __func__); 2714 } 2715 #endif 2716 2717 /** 2718 * hif_wlan_enable(): call the platform driver to enable wlan 2719 * @scn: HIF Context 2720 * 2721 * This function passes the con_mode and CE configuration to 2722 * platform driver to enable wlan. 2723 * 2724 * Return: linux error code 2725 */ 2726 int hif_wlan_enable(struct hif_softc *scn) 2727 { 2728 struct pld_wlan_enable_cfg cfg; 2729 enum pld_driver_mode mode; 2730 uint32_t con_mode = hif_get_conparam(scn); 2731 2732 hif_get_target_ce_config(scn, 2733 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 2734 &cfg.num_ce_tgt_cfg, 2735 (struct service_to_pipe **)&cfg.ce_svc_cfg, 2736 &cfg.num_ce_svc_pipe_cfg, 2737 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 2738 &cfg.num_shadow_reg_cfg); 2739 2740 /* translate from structure size to array size */ 2741 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 2742 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 2743 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 2744 2745 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, 2746 &cfg.num_shadow_reg_v2_cfg); 2747 2748 hif_print_hal_shadow_register_cfg(&cfg); 2749 2750 if (QDF_GLOBAL_FTM_MODE == con_mode) 2751 mode = PLD_FTM; 2752 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 2753 mode = PLD_COLDBOOT_CALIBRATION; 2754 else if (QDF_IS_EPPING_ENABLED(con_mode)) 2755 mode = PLD_EPPING; 2756 else 2757 mode = PLD_MISSION; 2758 2759 if (BYPASS_QMI) 2760 return 0; 2761 else 2762 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, 2763 mode, QWLAN_VERSIONSTR); 2764 } 2765 2766 #ifdef WLAN_FEATURE_EPPING 2767 2768 #define CE_EPPING_USES_IRQ true 2769 2770 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) 2771 { 2772 if (CE_EPPING_USES_IRQ) 2773 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 2774 else 2775 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 2776 hif_state->target_ce_config = target_ce_config_wlan_epping; 2777 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 2778 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 2779 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 2780 } 2781 #endif 2782 2783 #ifdef QCN7605_SUPPORT 2784 static inline 2785 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 2786 struct HIF_CE_state *hif_state) 2787 { 2788 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 2789 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 2790 hif_state->target_ce_config_sz = 2791 sizeof(target_ce_config_wlan_qcn7605); 2792 scn->ce_count = QCN7605_CE_COUNT; 2793 } 2794 #else 2795 static inline 2796 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 2797 struct HIF_CE_state *hif_state) 2798 { 2799 HIF_ERROR("QCN7605 not supported"); 2800 } 2801 #endif 2802 2803 #ifdef CE_SVC_CMN_INIT 2804 #ifdef QCA_WIFI_SUPPORT_SRNG 2805 static inline void hif_ce_service_init(void) 2806 { 2807 ce_service_srng_init(); 2808 } 2809 #else 2810 static inline void hif_ce_service_init(void) 2811 { 2812 ce_service_legacy_init(); 2813 } 2814 #endif 2815 #else 2816 static inline void hif_ce_service_init(void) 2817 { 2818 } 2819 #endif 2820 2821 2822 /** 2823 * hif_ce_prepare_config() - load the correct static tables. 2824 * @scn: hif context 2825 * 2826 * Epping uses different static attribute tables than mission mode. 2827 */ 2828 void hif_ce_prepare_config(struct hif_softc *scn) 2829 { 2830 uint32_t mode = hif_get_conparam(scn); 2831 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 2832 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 2833 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2834 2835 hif_ce_service_init(); 2836 hif_state->ce_services = ce_services_attach(scn); 2837 2838 scn->ce_count = HOST_CE_COUNT; 2839 /* if epping is enabled we need to use the epping configuration. */ 2840 if (QDF_IS_EPPING_ENABLED(mode)) { 2841 hif_ce_prepare_epping_config(hif_state); 2842 } 2843 2844 switch (tgt_info->target_type) { 2845 default: 2846 hif_state->host_ce_config = host_ce_config_wlan; 2847 hif_state->target_ce_config = target_ce_config_wlan; 2848 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 2849 break; 2850 case TARGET_TYPE_QCN7605: 2851 hif_set_ce_config_qcn7605(scn, hif_state); 2852 break; 2853 case TARGET_TYPE_AR900B: 2854 case TARGET_TYPE_QCA9984: 2855 case TARGET_TYPE_IPQ4019: 2856 case TARGET_TYPE_QCA9888: 2857 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 2858 hif_state->host_ce_config = 2859 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 2860 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 2861 hif_state->host_ce_config = 2862 host_lowdesc_ce_cfg_wlan_ar900b; 2863 } else { 2864 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 2865 } 2866 2867 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 2868 hif_state->target_ce_config_sz = 2869 sizeof(target_ce_config_wlan_ar900b); 2870 2871 break; 2872 2873 case TARGET_TYPE_AR9888: 2874 case TARGET_TYPE_AR9888V2: 2875 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 2876 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 2877 } else { 2878 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 2879 } 2880 2881 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 2882 hif_state->target_ce_config_sz = 2883 sizeof(target_ce_config_wlan_ar9888); 2884 2885 break; 2886 2887 case TARGET_TYPE_QCA8074: 2888 case TARGET_TYPE_QCA8074V2: 2889 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 2890 hif_state->host_ce_config = 2891 host_ce_config_wlan_qca8074_pci; 2892 hif_state->target_ce_config = 2893 target_ce_config_wlan_qca8074_pci; 2894 hif_state->target_ce_config_sz = 2895 sizeof(target_ce_config_wlan_qca8074_pci); 2896 } else { 2897 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 2898 hif_state->target_ce_config = 2899 target_ce_config_wlan_qca8074; 2900 hif_state->target_ce_config_sz = 2901 sizeof(target_ce_config_wlan_qca8074); 2902 } 2903 break; 2904 case TARGET_TYPE_QCA6290: 2905 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 2906 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 2907 hif_state->target_ce_config_sz = 2908 sizeof(target_ce_config_wlan_qca6290); 2909 2910 scn->ce_count = QCA_6290_CE_COUNT; 2911 break; 2912 case TARGET_TYPE_QCA6390: 2913 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 2914 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 2915 hif_state->target_ce_config_sz = 2916 sizeof(target_ce_config_wlan_qca6390); 2917 2918 scn->ce_count = QCA_6390_CE_COUNT; 2919 break; 2920 } 2921 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 2922 } 2923 2924 /** 2925 * hif_ce_open() - do ce specific allocations 2926 * @hif_sc: pointer to hif context 2927 * 2928 * return: 0 for success or QDF_STATUS_E_NOMEM 2929 */ 2930 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 2931 { 2932 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2933 2934 qdf_spinlock_create(&hif_state->irq_reg_lock); 2935 qdf_spinlock_create(&hif_state->keep_awake_lock); 2936 return QDF_STATUS_SUCCESS; 2937 } 2938 2939 /** 2940 * hif_ce_close() - do ce specific free 2941 * @hif_sc: pointer to hif context 2942 */ 2943 void hif_ce_close(struct hif_softc *hif_sc) 2944 { 2945 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2946 2947 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 2948 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 2949 } 2950 2951 /** 2952 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 2953 * @hif_sc: hif context 2954 * 2955 * uses state variables to support cleaning up when hif_config_ce fails. 2956 */ 2957 void hif_unconfig_ce(struct hif_softc *hif_sc) 2958 { 2959 int pipe_num; 2960 struct HIF_CE_pipe_info *pipe_info; 2961 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2962 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 2963 2964 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 2965 pipe_info = &hif_state->pipe_info[pipe_num]; 2966 if (pipe_info->ce_hdl) { 2967 ce_unregister_irq(hif_state, (1 << pipe_num)); 2968 } 2969 } 2970 deinit_tasklet_workers(hif_hdl); 2971 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 2972 pipe_info = &hif_state->pipe_info[pipe_num]; 2973 if (pipe_info->ce_hdl) { 2974 ce_fini(pipe_info->ce_hdl); 2975 pipe_info->ce_hdl = NULL; 2976 pipe_info->buf_sz = 0; 2977 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2978 } 2979 } 2980 if (hif_sc->athdiag_procfs_inited) { 2981 athdiag_procfs_remove(); 2982 hif_sc->athdiag_procfs_inited = false; 2983 } 2984 } 2985 2986 #ifdef CONFIG_BYPASS_QMI 2987 #define FW_SHARED_MEM (2 * 1024 * 1024) 2988 2989 /** 2990 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 2991 * @scn: pointer to HIF structure 2992 * 2993 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 2994 * 2995 * Return: void 2996 */ 2997 static void hif_post_static_buf_to_target(struct hif_softc *scn) 2998 { 2999 void *target_va; 3000 phys_addr_t target_pa; 3001 3002 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 3003 FW_SHARED_MEM, &target_pa); 3004 if (NULL == target_va) { 3005 HIF_TRACE("Memory allocation failed could not post target buf"); 3006 return; 3007 } 3008 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3009 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa); 3010 } 3011 #else 3012 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 3013 { 3014 } 3015 #endif 3016 3017 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 3018 bool wait_for_it) 3019 { 3020 /* todo */ 3021 return 0; 3022 } 3023 3024 /** 3025 * hif_config_ce() - configure copy engines 3026 * @scn: hif context 3027 * 3028 * Prepares fw, copy engine hardware and host sw according 3029 * to the attributes selected by hif_ce_prepare_config. 3030 * 3031 * also calls athdiag_procfs_init 3032 * 3033 * return: 0 for success nonzero for failure. 3034 */ 3035 int hif_config_ce(struct hif_softc *scn) 3036 { 3037 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3038 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3039 struct HIF_CE_pipe_info *pipe_info; 3040 int pipe_num; 3041 struct CE_state *ce_state = NULL; 3042 3043 #ifdef ADRASTEA_SHADOW_REGISTERS 3044 int i; 3045 #endif 3046 QDF_STATUS rv = QDF_STATUS_SUCCESS; 3047 3048 scn->notice_send = true; 3049 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 3050 3051 hif_post_static_buf_to_target(scn); 3052 3053 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 3054 3055 hif_config_rri_on_ddr(scn); 3056 3057 if (ce_srng_based(scn)) 3058 scn->bus_ops.hif_target_sleep_state_adjust = 3059 &hif_srng_sleep_state_adjust; 3060 3061 /* Initialise the CE debug history sysfs interface inputs ce_id and 3062 * index. Disable data storing 3063 */ 3064 reset_ce_debug_history(scn); 3065 3066 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3067 struct CE_attr *attr; 3068 3069 pipe_info = &hif_state->pipe_info[pipe_num]; 3070 pipe_info->pipe_num = pipe_num; 3071 pipe_info->HIF_CE_state = hif_state; 3072 attr = &hif_state->host_ce_config[pipe_num]; 3073 3074 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 3075 ce_state = scn->ce_id_to_state[pipe_num]; 3076 if (!ce_state) { 3077 A_TARGET_ACCESS_UNLIKELY(scn); 3078 goto err; 3079 } 3080 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 3081 QDF_ASSERT(pipe_info->ce_hdl != NULL); 3082 if (pipe_info->ce_hdl == NULL) { 3083 rv = QDF_STATUS_E_FAILURE; 3084 A_TARGET_ACCESS_UNLIKELY(scn); 3085 goto err; 3086 } 3087 3088 ce_state->lro_data = qdf_lro_init(); 3089 3090 if (attr->flags & CE_ATTR_DIAG) { 3091 /* Reserve the ultimate CE for 3092 * Diagnostic Window support 3093 */ 3094 hif_state->ce_diag = pipe_info->ce_hdl; 3095 continue; 3096 } 3097 3098 if (hif_is_nss_wifi_enabled(scn) && ce_state && 3099 (ce_state->htt_rx_data)) 3100 continue; 3101 3102 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); 3103 if (attr->dest_nentries > 0) { 3104 atomic_set(&pipe_info->recv_bufs_needed, 3105 init_buffer_count(attr->dest_nentries - 1)); 3106 /*SRNG based CE has one entry less */ 3107 if (ce_srng_based(scn)) 3108 atomic_dec(&pipe_info->recv_bufs_needed); 3109 } else { 3110 atomic_set(&pipe_info->recv_bufs_needed, 0); 3111 } 3112 ce_tasklet_init(hif_state, (1 << pipe_num)); 3113 ce_register_irq(hif_state, (1 << pipe_num)); 3114 } 3115 3116 if (athdiag_procfs_init(scn) != 0) { 3117 A_TARGET_ACCESS_UNLIKELY(scn); 3118 goto err; 3119 } 3120 scn->athdiag_procfs_inited = true; 3121 3122 HIF_DBG("%s: ce_init done", __func__); 3123 3124 init_tasklet_workers(hif_hdl); 3125 3126 HIF_DBG("%s: X, ret = %d", __func__, rv); 3127 3128 #ifdef ADRASTEA_SHADOW_REGISTERS 3129 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); 3130 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 3131 HIF_DBG("%s Shadow Register%d is mapped to address %x", 3132 __func__, i, 3133 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 3134 } 3135 #endif 3136 3137 return rv != QDF_STATUS_SUCCESS; 3138 3139 err: 3140 /* Failure, so clean up */ 3141 hif_unconfig_ce(scn); 3142 HIF_TRACE("%s: X, ret = %d", __func__, rv); 3143 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 3144 } 3145 3146 #ifdef WLAN_FEATURE_FASTPATH 3147 /** 3148 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler 3149 * @handler: Callback funtcion 3150 * @context: handle for callback function 3151 * 3152 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE 3153 */ 3154 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx, 3155 fastpath_msg_handler handler, 3156 void *context) 3157 { 3158 struct CE_state *ce_state; 3159 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 3160 int i; 3161 3162 if (!scn) { 3163 HIF_ERROR("%s: scn is NULL", __func__); 3164 QDF_ASSERT(0); 3165 return QDF_STATUS_E_FAILURE; 3166 } 3167 3168 if (!scn->fastpath_mode_on) { 3169 HIF_WARN("%s: Fastpath mode disabled", __func__); 3170 return QDF_STATUS_E_FAILURE; 3171 } 3172 3173 for (i = 0; i < scn->ce_count; i++) { 3174 ce_state = scn->ce_id_to_state[i]; 3175 if (ce_state->htt_rx_data) { 3176 ce_state->fastpath_handler = handler; 3177 ce_state->context = context; 3178 } 3179 } 3180 3181 return QDF_STATUS_SUCCESS; 3182 } 3183 qdf_export_symbol(hif_ce_fastpath_cb_register); 3184 #endif 3185 3186 #ifdef IPA_OFFLOAD 3187 /** 3188 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 3189 * @scn: bus context 3190 * @ce_sr_base_paddr: copyengine source ring base physical address 3191 * @ce_sr_ring_size: copyengine source ring size 3192 * @ce_reg_paddr: copyengine register physical address 3193 * 3194 * IPA micro controller data path offload feature enabled, 3195 * HIF should release copy engine related resource information to IPA UC 3196 * IPA UC will access hardware resource with released information 3197 * 3198 * Return: None 3199 */ 3200 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 3201 qdf_shared_mem_t **ce_sr, 3202 uint32_t *ce_sr_ring_size, 3203 qdf_dma_addr_t *ce_reg_paddr) 3204 { 3205 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3206 struct HIF_CE_pipe_info *pipe_info = 3207 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 3208 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3209 3210 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 3211 ce_reg_paddr); 3212 } 3213 #endif /* IPA_OFFLOAD */ 3214 3215 3216 #ifdef ADRASTEA_SHADOW_REGISTERS 3217 3218 /* 3219 * Current shadow register config 3220 * 3221 * ----------------------------------------------------------- 3222 * Shadow Register | CE | src/dst write index 3223 * ----------------------------------------------------------- 3224 * 0 | 0 | src 3225 * 1 No Config - Doesn't point to anything 3226 * 2 No Config - Doesn't point to anything 3227 * 3 | 3 | src 3228 * 4 | 4 | src 3229 * 5 | 5 | src 3230 * 6 No Config - Doesn't point to anything 3231 * 7 | 7 | src 3232 * 8 No Config - Doesn't point to anything 3233 * 9 No Config - Doesn't point to anything 3234 * 10 No Config - Doesn't point to anything 3235 * 11 No Config - Doesn't point to anything 3236 * ----------------------------------------------------------- 3237 * 12 No Config - Doesn't point to anything 3238 * 13 | 1 | dst 3239 * 14 | 2 | dst 3240 * 15 No Config - Doesn't point to anything 3241 * 16 No Config - Doesn't point to anything 3242 * 17 No Config - Doesn't point to anything 3243 * 18 No Config - Doesn't point to anything 3244 * 19 | 7 | dst 3245 * 20 | 8 | dst 3246 * 21 No Config - Doesn't point to anything 3247 * 22 No Config - Doesn't point to anything 3248 * 23 No Config - Doesn't point to anything 3249 * ----------------------------------------------------------- 3250 * 3251 * 3252 * ToDo - Move shadow register config to following in the future 3253 * This helps free up a block of shadow registers towards the end. 3254 * Can be used for other purposes 3255 * 3256 * ----------------------------------------------------------- 3257 * Shadow Register | CE | src/dst write index 3258 * ----------------------------------------------------------- 3259 * 0 | 0 | src 3260 * 1 | 3 | src 3261 * 2 | 4 | src 3262 * 3 | 5 | src 3263 * 4 | 7 | src 3264 * ----------------------------------------------------------- 3265 * 5 | 1 | dst 3266 * 6 | 2 | dst 3267 * 7 | 7 | dst 3268 * 8 | 8 | dst 3269 * ----------------------------------------------------------- 3270 * 9 No Config - Doesn't point to anything 3271 * 12 No Config - Doesn't point to anything 3272 * 13 No Config - Doesn't point to anything 3273 * 14 No Config - Doesn't point to anything 3274 * 15 No Config - Doesn't point to anything 3275 * 16 No Config - Doesn't point to anything 3276 * 17 No Config - Doesn't point to anything 3277 * 18 No Config - Doesn't point to anything 3278 * 19 No Config - Doesn't point to anything 3279 * 20 No Config - Doesn't point to anything 3280 * 21 No Config - Doesn't point to anything 3281 * 22 No Config - Doesn't point to anything 3282 * 23 No Config - Doesn't point to anything 3283 * ----------------------------------------------------------- 3284 */ 3285 #ifndef QCN7605_SUPPORT 3286 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3287 { 3288 u32 addr = 0; 3289 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3290 3291 switch (ce) { 3292 case 0: 3293 addr = SHADOW_VALUE0; 3294 break; 3295 case 3: 3296 addr = SHADOW_VALUE3; 3297 break; 3298 case 4: 3299 addr = SHADOW_VALUE4; 3300 break; 3301 case 5: 3302 addr = SHADOW_VALUE5; 3303 break; 3304 case 7: 3305 addr = SHADOW_VALUE7; 3306 break; 3307 default: 3308 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3309 QDF_ASSERT(0); 3310 } 3311 return addr; 3312 3313 } 3314 3315 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3316 { 3317 u32 addr = 0; 3318 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3319 3320 switch (ce) { 3321 case 1: 3322 addr = SHADOW_VALUE13; 3323 break; 3324 case 2: 3325 addr = SHADOW_VALUE14; 3326 break; 3327 case 5: 3328 addr = SHADOW_VALUE17; 3329 break; 3330 case 7: 3331 addr = SHADOW_VALUE19; 3332 break; 3333 case 8: 3334 addr = SHADOW_VALUE20; 3335 break; 3336 case 9: 3337 addr = SHADOW_VALUE21; 3338 break; 3339 case 10: 3340 addr = SHADOW_VALUE22; 3341 break; 3342 case 11: 3343 addr = SHADOW_VALUE23; 3344 break; 3345 default: 3346 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3347 QDF_ASSERT(0); 3348 } 3349 3350 return addr; 3351 3352 } 3353 #else 3354 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3355 { 3356 u32 addr = 0; 3357 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3358 3359 switch (ce) { 3360 case 0: 3361 addr = SHADOW_VALUE0; 3362 break; 3363 case 4: 3364 addr = SHADOW_VALUE4; 3365 break; 3366 case 5: 3367 addr = SHADOW_VALUE5; 3368 break; 3369 default: 3370 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3371 QDF_ASSERT(0); 3372 } 3373 return addr; 3374 } 3375 3376 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3377 { 3378 u32 addr = 0; 3379 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3380 3381 switch (ce) { 3382 case 1: 3383 addr = SHADOW_VALUE13; 3384 break; 3385 case 2: 3386 addr = SHADOW_VALUE14; 3387 break; 3388 case 3: 3389 addr = SHADOW_VALUE15; 3390 break; 3391 case 5: 3392 addr = SHADOW_VALUE17; 3393 break; 3394 case 7: 3395 addr = SHADOW_VALUE19; 3396 break; 3397 case 8: 3398 addr = SHADOW_VALUE20; 3399 break; 3400 case 9: 3401 addr = SHADOW_VALUE21; 3402 break; 3403 case 10: 3404 addr = SHADOW_VALUE22; 3405 break; 3406 case 11: 3407 addr = SHADOW_VALUE23; 3408 break; 3409 default: 3410 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3411 QDF_ASSERT(0); 3412 } 3413 3414 return addr; 3415 } 3416 #endif 3417 #endif 3418 3419 #if defined(FEATURE_LRO) 3420 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 3421 { 3422 struct CE_state *ce_state; 3423 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3424 3425 ce_state = scn->ce_id_to_state[ctx_id]; 3426 3427 return ce_state->lro_data; 3428 } 3429 #endif 3430 3431 /** 3432 * hif_map_service_to_pipe() - returns the ce ids pertaining to 3433 * this service 3434 * @scn: hif_softc pointer. 3435 * @svc_id: Service ID for which the mapping is needed. 3436 * @ul_pipe: address of the container in which ul pipe is returned. 3437 * @dl_pipe: address of the container in which dl pipe is returned. 3438 * @ul_is_polled: address of the container in which a bool 3439 * indicating if the UL CE for this service 3440 * is polled is returned. 3441 * @dl_is_polled: address of the container in which a bool 3442 * indicating if the DL CE for this service 3443 * is polled is returned. 3444 * 3445 * Return: Indicates whether the service has been found in the table. 3446 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 3447 * There will be warning logs if either leg has not been updated 3448 * because it missed the entry in the table (but this is not an err). 3449 */ 3450 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 3451 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 3452 int *dl_is_polled) 3453 { 3454 int status = QDF_STATUS_E_INVAL; 3455 unsigned int i; 3456 struct service_to_pipe element; 3457 struct service_to_pipe *tgt_svc_map_to_use; 3458 uint32_t sz_tgt_svc_map_to_use; 3459 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3460 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3461 bool dl_updated = false; 3462 bool ul_updated = false; 3463 3464 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 3465 &sz_tgt_svc_map_to_use); 3466 3467 *dl_is_polled = 0; /* polling for received messages not supported */ 3468 3469 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 3470 3471 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 3472 if (element.service_id == svc_id) { 3473 if (element.pipedir == PIPEDIR_OUT) { 3474 *ul_pipe = element.pipenum; 3475 *ul_is_polled = 3476 (hif_state->host_ce_config[*ul_pipe].flags & 3477 CE_ATTR_DISABLE_INTR) != 0; 3478 ul_updated = true; 3479 } else if (element.pipedir == PIPEDIR_IN) { 3480 *dl_pipe = element.pipenum; 3481 dl_updated = true; 3482 } 3483 status = QDF_STATUS_SUCCESS; 3484 } 3485 } 3486 if (ul_updated == false) 3487 HIF_DBG("ul pipe is NOT updated for service %d", svc_id); 3488 if (dl_updated == false) 3489 HIF_DBG("dl pipe is NOT updated for service %d", svc_id); 3490 3491 return status; 3492 } 3493 3494 #ifdef SHADOW_REG_DEBUG 3495 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 3496 uint32_t CE_ctrl_addr) 3497 { 3498 uint32_t read_from_hw, srri_from_ddr = 0; 3499 3500 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 3501 3502 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3503 3504 if (read_from_hw != srri_from_ddr) { 3505 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3506 __func__, srri_from_ddr, read_from_hw, 3507 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3508 QDF_ASSERT(0); 3509 } 3510 return srri_from_ddr; 3511 } 3512 3513 3514 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 3515 uint32_t CE_ctrl_addr) 3516 { 3517 uint32_t read_from_hw, drri_from_ddr = 0; 3518 3519 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 3520 3521 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3522 3523 if (read_from_hw != drri_from_ddr) { 3524 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3525 drri_from_ddr, read_from_hw, 3526 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3527 QDF_ASSERT(0); 3528 } 3529 return drri_from_ddr; 3530 } 3531 3532 #endif 3533 3534 #ifdef ADRASTEA_RRI_ON_DDR 3535 /** 3536 * hif_get_src_ring_read_index(): Called to get the SRRI 3537 * 3538 * @scn: hif_softc pointer 3539 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3540 * 3541 * This function returns the SRRI to the caller. For CEs that 3542 * dont have interrupts enabled, we look at the DDR based SRRI 3543 * 3544 * Return: SRRI 3545 */ 3546 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 3547 uint32_t CE_ctrl_addr) 3548 { 3549 struct CE_attr attr; 3550 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3551 3552 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3553 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3554 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3555 } else { 3556 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3557 return A_TARGET_READ(scn, 3558 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 3559 else 3560 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 3561 CE_ctrl_addr); 3562 } 3563 } 3564 3565 /** 3566 * hif_get_dst_ring_read_index(): Called to get the DRRI 3567 * 3568 * @scn: hif_softc pointer 3569 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3570 * 3571 * This function returns the DRRI to the caller. For CEs that 3572 * dont have interrupts enabled, we look at the DDR based DRRI 3573 * 3574 * Return: DRRI 3575 */ 3576 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 3577 uint32_t CE_ctrl_addr) 3578 { 3579 struct CE_attr attr; 3580 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3581 3582 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3583 3584 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3585 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3586 } else { 3587 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3588 return A_TARGET_READ(scn, 3589 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 3590 else 3591 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 3592 CE_ctrl_addr); 3593 } 3594 } 3595 3596 /** 3597 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3598 * 3599 * @scn: hif_softc pointer 3600 * 3601 * This function allocates non cached memory on ddr and sends 3602 * the physical address of this memory to the CE hardware. The 3603 * hardware updates the RRI on this particular location. 3604 * 3605 * Return: None 3606 */ 3607 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3608 { 3609 unsigned int i; 3610 qdf_dma_addr_t paddr_rri_on_ddr; 3611 uint32_t high_paddr, low_paddr; 3612 3613 scn->vaddr_rri_on_ddr = 3614 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 3615 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)), 3616 &paddr_rri_on_ddr); 3617 3618 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 3619 low_paddr = BITS0_TO_31(paddr_rri_on_ddr); 3620 high_paddr = BITS32_TO_35(paddr_rri_on_ddr); 3621 3622 HIF_DBG("%s using srri and drri from DDR", __func__); 3623 3624 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 3625 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 3626 3627 for (i = 0; i < CE_COUNT; i++) 3628 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 3629 3630 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t)); 3631 3632 } 3633 #else 3634 3635 /** 3636 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3637 * 3638 * @scn: hif_softc pointer 3639 * 3640 * This is a dummy implementation for platforms that don't 3641 * support this functionality. 3642 * 3643 * Return: None 3644 */ 3645 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3646 { 3647 } 3648 #endif 3649 3650 /** 3651 * hif_dump_ce_registers() - dump ce registers 3652 * @scn: hif_opaque_softc pointer. 3653 * 3654 * Output the copy engine registers 3655 * 3656 * Return: 0 for success or error code 3657 */ 3658 int hif_dump_ce_registers(struct hif_softc *scn) 3659 { 3660 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3661 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 3662 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 3663 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 3664 uint16_t i; 3665 QDF_STATUS status; 3666 3667 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 3668 if (scn->ce_id_to_state[i] == NULL) { 3669 HIF_DBG("CE%d not used.", i); 3670 continue; 3671 } 3672 3673 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 3674 (uint8_t *) &ce_reg_values[0], 3675 ce_reg_word_size * sizeof(uint32_t)); 3676 3677 if (status != QDF_STATUS_SUCCESS) { 3678 HIF_ERROR("Dumping CE register failed!"); 3679 return -EACCES; 3680 } 3681 HIF_ERROR("CE%d=>\n", i); 3682 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 3683 (uint8_t *) &ce_reg_values[0], 3684 ce_reg_word_size * sizeof(uint32_t)); 3685 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 3686 + SR_WR_INDEX_ADDRESS), 3687 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 3688 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 3689 + CURRENT_SRRI_ADDRESS), 3690 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 3691 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 3692 + DST_WR_INDEX_ADDRESS), 3693 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 3694 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 3695 + CURRENT_DRRI_ADDRESS), 3696 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 3697 qdf_print("---"); 3698 } 3699 return 0; 3700 } 3701 qdf_export_symbol(hif_dump_ce_registers); 3702 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 3703 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 3704 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 3705 { 3706 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3707 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3708 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 3709 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3710 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3711 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 3712 struct CE_ring_state *src_ring = ce_state->src_ring; 3713 struct CE_ring_state *dest_ring = ce_state->dest_ring; 3714 3715 if (src_ring) { 3716 hif_info->ul_pipe.nentries = src_ring->nentries; 3717 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 3718 hif_info->ul_pipe.sw_index = src_ring->sw_index; 3719 hif_info->ul_pipe.write_index = src_ring->write_index; 3720 hif_info->ul_pipe.hw_index = src_ring->hw_index; 3721 hif_info->ul_pipe.base_addr_CE_space = 3722 src_ring->base_addr_CE_space; 3723 hif_info->ul_pipe.base_addr_owner_space = 3724 src_ring->base_addr_owner_space; 3725 } 3726 3727 3728 if (dest_ring) { 3729 hif_info->dl_pipe.nentries = dest_ring->nentries; 3730 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 3731 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 3732 hif_info->dl_pipe.write_index = dest_ring->write_index; 3733 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 3734 hif_info->dl_pipe.base_addr_CE_space = 3735 dest_ring->base_addr_CE_space; 3736 hif_info->dl_pipe.base_addr_owner_space = 3737 dest_ring->base_addr_owner_space; 3738 } 3739 3740 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 3741 hif_info->ctrl_addr = ce_state->ctrl_addr; 3742 3743 return hif_info; 3744 } 3745 qdf_export_symbol(hif_get_addl_pipe_info); 3746 3747 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 3748 { 3749 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3750 3751 scn->nss_wifi_ol_mode = mode; 3752 return 0; 3753 } 3754 qdf_export_symbol(hif_set_nss_wifiol_mode); 3755 #endif 3756 3757 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 3758 { 3759 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3760 scn->hif_attribute = hif_attrib; 3761 } 3762 3763 3764 /* disable interrupts (only applicable for legacy copy engine currently */ 3765 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 3766 { 3767 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3768 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 3769 uint32_t ctrl_addr = CE_state->ctrl_addr; 3770 3771 Q_TARGET_ACCESS_BEGIN(scn); 3772 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 3773 Q_TARGET_ACCESS_END(scn); 3774 } 3775 qdf_export_symbol(hif_disable_interrupt); 3776 3777 /** 3778 * hif_fw_event_handler() - hif fw event handler 3779 * @hif_state: pointer to hif ce state structure 3780 * 3781 * Process fw events and raise HTC callback to process fw events. 3782 * 3783 * Return: none 3784 */ 3785 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 3786 { 3787 struct hif_msg_callbacks *msg_callbacks = 3788 &hif_state->msg_callbacks_current; 3789 3790 if (!msg_callbacks->fwEventHandler) 3791 return; 3792 3793 msg_callbacks->fwEventHandler(msg_callbacks->Context, 3794 QDF_STATUS_E_FAILURE); 3795 } 3796 3797 #ifndef QCA_WIFI_3_0 3798 /** 3799 * hif_fw_interrupt_handler() - FW interrupt handler 3800 * @irq: irq number 3801 * @arg: the user pointer 3802 * 3803 * Called from the PCI interrupt handler when a 3804 * firmware-generated interrupt to the Host. 3805 * 3806 * only registered for legacy ce devices 3807 * 3808 * Return: status of handled irq 3809 */ 3810 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3811 { 3812 struct hif_softc *scn = arg; 3813 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3814 uint32_t fw_indicator_address, fw_indicator; 3815 3816 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 3817 return ATH_ISR_NOSCHED; 3818 3819 fw_indicator_address = hif_state->fw_indicator_address; 3820 /* For sudden unplug this will return ~0 */ 3821 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 3822 3823 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 3824 /* ACK: clear Target-side pending event */ 3825 A_TARGET_WRITE(scn, fw_indicator_address, 3826 fw_indicator & ~FW_IND_EVENT_PENDING); 3827 if (Q_TARGET_ACCESS_END(scn) < 0) 3828 return ATH_ISR_SCHED; 3829 3830 if (hif_state->started) { 3831 hif_fw_event_handler(hif_state); 3832 } else { 3833 /* 3834 * Probable Target failure before we're prepared 3835 * to handle it. Generally unexpected. 3836 * fw_indicator used as bitmap, and defined as below: 3837 * FW_IND_EVENT_PENDING 0x1 3838 * FW_IND_INITIALIZED 0x2 3839 * FW_IND_NEEDRECOVER 0x4 3840 */ 3841 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 3842 ("%s: Early firmware event indicated 0x%x\n", 3843 __func__, fw_indicator)); 3844 } 3845 } else { 3846 if (Q_TARGET_ACCESS_END(scn) < 0) 3847 return ATH_ISR_SCHED; 3848 } 3849 3850 return ATH_ISR_SCHED; 3851 } 3852 #else 3853 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3854 { 3855 return ATH_ISR_SCHED; 3856 } 3857 #endif /* #ifdef QCA_WIFI_3_0 */ 3858 3859 3860 /** 3861 * hif_wlan_disable(): call the platform driver to disable wlan 3862 * @scn: HIF Context 3863 * 3864 * This function passes the con_mode to platform driver to disable 3865 * wlan. 3866 * 3867 * Return: void 3868 */ 3869 void hif_wlan_disable(struct hif_softc *scn) 3870 { 3871 enum pld_driver_mode mode; 3872 uint32_t con_mode = hif_get_conparam(scn); 3873 3874 if (scn->target_status == TARGET_STATUS_RESET) 3875 return; 3876 3877 if (QDF_GLOBAL_FTM_MODE == con_mode) 3878 mode = PLD_FTM; 3879 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3880 mode = PLD_EPPING; 3881 else 3882 mode = PLD_MISSION; 3883 3884 pld_wlan_disable(scn->qdf_dev->dev, mode); 3885 } 3886 3887 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 3888 { 3889 QDF_STATUS status; 3890 uint8_t ul_pipe, dl_pipe; 3891 int ul_is_polled, dl_is_polled; 3892 3893 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 3894 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 3895 HTC_CTRL_RSVD_SVC, 3896 &ul_pipe, &dl_pipe, 3897 &ul_is_polled, &dl_is_polled); 3898 if (status) { 3899 HIF_ERROR("%s: failed to map pipe: %d", __func__, status); 3900 return qdf_status_to_os_return(status); 3901 } 3902 3903 *ce_id = dl_pipe; 3904 3905 return 0; 3906 } 3907