1 /* 2 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "targcfg.h" 20 #include "qdf_lock.h" 21 #include "qdf_status.h" 22 #include "qdf_status.h" 23 #include <qdf_atomic.h> /* qdf_atomic_read */ 24 #include <targaddrs.h> 25 #include "hif_io32.h" 26 #include <hif.h> 27 #include <target_type.h> 28 #include "regtable.h" 29 #define ATH_MODULE_NAME hif 30 #include <a_debug.h> 31 #include "hif_main.h" 32 #include "ce_api.h" 33 #include "qdf_trace.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "ce_internal.h" 37 #include "ce_reg.h" 38 #include "ce_assignment.h" 39 #include "ce_tasklet.h" 40 #ifndef CONFIG_WIN 41 #include "qwlan_version.h" 42 #endif 43 #include "qdf_module.h" 44 45 #define CE_POLL_TIMEOUT 10 /* ms */ 46 47 #define AGC_DUMP 1 48 #define CHANINFO_DUMP 2 49 #define BB_WATCHDOG_DUMP 3 50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 51 #define PCIE_ACCESS_DUMP 4 52 #endif 53 #include "mp_dev.h" 54 55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \ 56 !defined(QCA_WIFI_SUPPORT_SRNG) 57 #define QCA_WIFI_SUPPORT_SRNG 58 #endif 59 60 /* Forward references */ 61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 62 63 /* 64 * Fix EV118783, poll to check whether a BMI response comes 65 * other than waiting for the interruption which may be lost. 66 */ 67 /* #define BMI_RSP_POLLING */ 68 #define BMI_RSP_TO_MILLISEC 1000 69 70 #ifdef CONFIG_BYPASS_QMI 71 #define BYPASS_QMI 1 72 #else 73 #define BYPASS_QMI 0 74 #endif 75 76 #ifdef CONFIG_WIN 77 #if ENABLE_10_4_FW_HDR 78 #define WDI_IPA_SERVICE_GROUP 5 79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 82 #endif /* ENABLE_10_4_FW_HDR */ 83 #endif 84 85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); 86 static void hif_config_rri_on_ddr(struct hif_softc *scn); 87 88 /** 89 * hif_target_access_log_dump() - dump access log 90 * 91 * dump access log 92 * 93 * Return: n/a 94 */ 95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 96 static void hif_target_access_log_dump(void) 97 { 98 hif_target_dump_access_log(); 99 } 100 #endif 101 102 103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 104 uint8_t cmd_id, bool start) 105 { 106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 107 108 switch (cmd_id) { 109 case AGC_DUMP: 110 if (start) 111 priv_start_agc(scn); 112 else 113 priv_dump_agc(scn); 114 break; 115 case CHANINFO_DUMP: 116 if (start) 117 priv_start_cap_chaninfo(scn); 118 else 119 priv_dump_chaninfo(scn); 120 break; 121 case BB_WATCHDOG_DUMP: 122 priv_dump_bbwatchdog(scn); 123 break; 124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 125 case PCIE_ACCESS_DUMP: 126 hif_target_access_log_dump(); 127 break; 128 #endif 129 default: 130 HIF_ERROR("%s: Invalid htc dump command", __func__); 131 break; 132 } 133 } 134 135 static void ce_poll_timeout(void *arg) 136 { 137 struct CE_state *CE_state = (struct CE_state *)arg; 138 139 if (CE_state->timer_inited) { 140 ce_per_engine_service(CE_state->scn, CE_state->id); 141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 142 } 143 } 144 145 static unsigned int roundup_pwr2(unsigned int n) 146 { 147 int i; 148 unsigned int test_pwr2; 149 150 if (!(n & (n - 1))) 151 return n; /* already a power of 2 */ 152 153 test_pwr2 = 4; 154 for (i = 0; i < 29; i++) { 155 if (test_pwr2 > n) 156 return test_pwr2; 157 test_pwr2 = test_pwr2 << 1; 158 } 159 160 QDF_ASSERT(0); /* n too large */ 161 return 0; 162 } 163 164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 166 167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 177 #ifdef QCA_WIFI_3_0_ADRASTEA 178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 181 #endif 182 }; 183 184 #ifdef QCN7605_SUPPORT 185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 194 }; 195 #endif 196 197 #ifdef WLAN_FEATURE_EPPING 198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 208 }; 209 #endif 210 211 /* CE_PCI TABLE */ 212 /* 213 * NOTE: the table below is out of date, though still a useful reference. 214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 215 * mapping of HTC services to HIF pipes. 216 */ 217 /* 218 * This authoritative table defines Copy Engine configuration and the mapping 219 * of services/endpoints to CEs. A subset of this information is passed to 220 * the Target during startup as a prerequisite to entering BMI phase. 221 * See: 222 * target_service_to_ce_map - Target-side mapping 223 * hif_map_service_to_pipe - Host-side mapping 224 * target_ce_config - Target-side configuration 225 * host_ce_config - Host-side configuration 226 ============================================================================ 227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 228 | | | ctio | Size | Frequency 229 | | | n | | 230 ============================================================================ 231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 232 descriptor | | | | O(100B) | and regular 233 download | | | | | 234 ---------------------------------------------------------------------------- 235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 236 indication | | | | O(10B) | regular 237 upload | | | | | 238 ---------------------------------------------------------------------------- 239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 240 upload | | | | O(1000B) | (frequent 241 e.g. noise | | | | | during IP1.0 242 packets | | | | | testing) 243 ---------------------------------------------------------------------------- 244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 245 download | | | | O(1000B) | (frequent 246 e.g. | | | | | during IP1.0 247 misdirecte | | | | | testing) 248 d EAPOL | | | | | 249 packets | | | | | 250 ---------------------------------------------------------------------------- 251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 252 | DATA_VO (uplink) | | | | 253 ---------------------------------------------------------------------------- 254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 255 | DATA_VO (downlink) | | | | 256 ---------------------------------------------------------------------------- 257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 258 | | | | O(100B) | 259 ---------------------------------------------------------------------------- 260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 261 messages | (downlink) | | | O(100B) | 262 | | | | | 263 ---------------------------------------------------------------------------- 264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 265 | HTC_RAW_STREAMS | | | | 266 | (uplink) | | | | 267 ---------------------------------------------------------------------------- 268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 269 | HTC_RAW_STREAMS | | | | 270 | (downlink) | | | | 271 ---------------------------------------------------------------------------- 272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 273 | | | | | infrequent 274 ============================================================================ 275 */ 276 277 /* 278 * Map from service/endpoint to Copy Engine. 279 * This table is derived from the CE_PCI TABLE, above. 280 * It is passed to the Target at startup for use by firmware. 281 */ 282 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 283 { 284 WMI_DATA_VO_SVC, 285 PIPEDIR_OUT, /* out = UL = host -> target */ 286 3, 287 }, 288 { 289 WMI_DATA_VO_SVC, 290 PIPEDIR_IN, /* in = DL = target -> host */ 291 2, 292 }, 293 { 294 WMI_DATA_BK_SVC, 295 PIPEDIR_OUT, /* out = UL = host -> target */ 296 3, 297 }, 298 { 299 WMI_DATA_BK_SVC, 300 PIPEDIR_IN, /* in = DL = target -> host */ 301 2, 302 }, 303 { 304 WMI_DATA_BE_SVC, 305 PIPEDIR_OUT, /* out = UL = host -> target */ 306 3, 307 }, 308 { 309 WMI_DATA_BE_SVC, 310 PIPEDIR_IN, /* in = DL = target -> host */ 311 2, 312 }, 313 { 314 WMI_DATA_VI_SVC, 315 PIPEDIR_OUT, /* out = UL = host -> target */ 316 3, 317 }, 318 { 319 WMI_DATA_VI_SVC, 320 PIPEDIR_IN, /* in = DL = target -> host */ 321 2, 322 }, 323 { 324 WMI_CONTROL_SVC, 325 PIPEDIR_OUT, /* out = UL = host -> target */ 326 3, 327 }, 328 { 329 WMI_CONTROL_SVC, 330 PIPEDIR_IN, /* in = DL = target -> host */ 331 2, 332 }, 333 { 334 HTC_CTRL_RSVD_SVC, 335 PIPEDIR_OUT, /* out = UL = host -> target */ 336 0, /* could be moved to 3 (share with WMI) */ 337 }, 338 { 339 HTC_CTRL_RSVD_SVC, 340 PIPEDIR_IN, /* in = DL = target -> host */ 341 2, 342 }, 343 { 344 HTC_RAW_STREAMS_SVC, /* not currently used */ 345 PIPEDIR_OUT, /* out = UL = host -> target */ 346 0, 347 }, 348 { 349 HTC_RAW_STREAMS_SVC, /* not currently used */ 350 PIPEDIR_IN, /* in = DL = target -> host */ 351 2, 352 }, 353 { 354 HTT_DATA_MSG_SVC, 355 PIPEDIR_OUT, /* out = UL = host -> target */ 356 4, 357 }, 358 { 359 HTT_DATA_MSG_SVC, 360 PIPEDIR_IN, /* in = DL = target -> host */ 361 1, 362 }, 363 { 364 WDI_IPA_TX_SVC, 365 PIPEDIR_OUT, /* in = DL = target -> host */ 366 5, 367 }, 368 #if defined(QCA_WIFI_3_0_ADRASTEA) 369 { 370 HTT_DATA2_MSG_SVC, 371 PIPEDIR_IN, /* in = DL = target -> host */ 372 9, 373 }, 374 { 375 HTT_DATA3_MSG_SVC, 376 PIPEDIR_IN, /* in = DL = target -> host */ 377 10, 378 }, 379 { 380 PACKET_LOG_SVC, 381 PIPEDIR_IN, /* in = DL = target -> host */ 382 11, 383 }, 384 #endif 385 /* (Additions here) */ 386 387 { /* Must be last */ 388 0, 389 0, 390 0, 391 }, 392 }; 393 394 /* PIPEDIR_OUT = HOST to Target */ 395 /* PIPEDIR_IN = TARGET to HOST */ 396 #if (defined(QCA_WIFI_QCA8074)) 397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 417 /* (Additions here) */ 418 { 0, 0, 0, }, 419 }; 420 #else 421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 422 }; 423 #endif 424 425 #if (defined(QCA_WIFI_QCA8074V2)) 426 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 427 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 428 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 429 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 430 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 431 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 432 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 433 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 434 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 435 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 436 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 439 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 441 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 442 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 443 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 444 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 445 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 446 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 447 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 448 /* (Additions here) */ 449 { 0, 0, 0, }, 450 }; 451 #else 452 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 453 }; 454 #endif 455 456 #if (defined(QCA_WIFI_QCA6018)) 457 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 458 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 459 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 460 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 461 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 462 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 463 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 464 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 465 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 466 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 467 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 468 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 469 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 472 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 473 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 474 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 475 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 476 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 477 /* (Additions here) */ 478 { 0, 0, 0, }, 479 }; 480 #else 481 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 482 }; 483 #endif 484 485 /* PIPEDIR_OUT = HOST to Target */ 486 /* PIPEDIR_IN = TARGET to HOST */ 487 #ifdef QCN7605_SUPPORT 488 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 489 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 490 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 491 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 492 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 493 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 494 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 495 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 496 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 497 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 498 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 499 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 500 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 501 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 502 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 503 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 504 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 505 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 506 #ifdef IPA_OFFLOAD 507 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 508 #else 509 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 510 #endif 511 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 512 /* (Additions here) */ 513 { 0, 0, 0, }, 514 }; 515 #endif 516 517 #if (defined(QCA_WIFI_QCA6290)) 518 #ifdef CONFIG_WIN 519 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 520 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 521 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 522 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 523 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 524 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 525 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 526 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 527 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 528 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 529 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 530 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 531 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 532 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 533 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 534 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 535 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 536 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 537 /* (Additions here) */ 538 { 0, 0, 0, }, 539 }; 540 #else 541 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 542 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 543 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 544 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 545 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 546 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 547 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 548 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 549 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 550 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 551 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 552 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 553 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 554 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 555 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 556 /* (Additions here) */ 557 { 0, 0, 0, }, 558 }; 559 #endif 560 #else 561 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 562 }; 563 #endif 564 565 #if (defined(QCA_WIFI_QCA6390)) 566 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 567 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 568 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 569 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 570 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 571 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 572 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 573 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 574 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 575 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 576 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 577 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 578 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 579 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 580 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 581 /* (Additions here) */ 582 { 0, 0, 0, }, 583 }; 584 #else 585 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 586 }; 587 #endif 588 589 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 590 { 591 WMI_DATA_VO_SVC, 592 PIPEDIR_OUT, /* out = UL = host -> target */ 593 3, 594 }, 595 { 596 WMI_DATA_VO_SVC, 597 PIPEDIR_IN, /* in = DL = target -> host */ 598 2, 599 }, 600 { 601 WMI_DATA_BK_SVC, 602 PIPEDIR_OUT, /* out = UL = host -> target */ 603 3, 604 }, 605 { 606 WMI_DATA_BK_SVC, 607 PIPEDIR_IN, /* in = DL = target -> host */ 608 2, 609 }, 610 { 611 WMI_DATA_BE_SVC, 612 PIPEDIR_OUT, /* out = UL = host -> target */ 613 3, 614 }, 615 { 616 WMI_DATA_BE_SVC, 617 PIPEDIR_IN, /* in = DL = target -> host */ 618 2, 619 }, 620 { 621 WMI_DATA_VI_SVC, 622 PIPEDIR_OUT, /* out = UL = host -> target */ 623 3, 624 }, 625 { 626 WMI_DATA_VI_SVC, 627 PIPEDIR_IN, /* in = DL = target -> host */ 628 2, 629 }, 630 { 631 WMI_CONTROL_SVC, 632 PIPEDIR_OUT, /* out = UL = host -> target */ 633 3, 634 }, 635 { 636 WMI_CONTROL_SVC, 637 PIPEDIR_IN, /* in = DL = target -> host */ 638 2, 639 }, 640 { 641 HTC_CTRL_RSVD_SVC, 642 PIPEDIR_OUT, /* out = UL = host -> target */ 643 0, /* could be moved to 3 (share with WMI) */ 644 }, 645 { 646 HTC_CTRL_RSVD_SVC, 647 PIPEDIR_IN, /* in = DL = target -> host */ 648 1, 649 }, 650 { 651 HTC_RAW_STREAMS_SVC, /* not currently used */ 652 PIPEDIR_OUT, /* out = UL = host -> target */ 653 0, 654 }, 655 { 656 HTC_RAW_STREAMS_SVC, /* not currently used */ 657 PIPEDIR_IN, /* in = DL = target -> host */ 658 1, 659 }, 660 { 661 HTT_DATA_MSG_SVC, 662 PIPEDIR_OUT, /* out = UL = host -> target */ 663 4, 664 }, 665 #ifdef WLAN_FEATURE_FASTPATH 666 { 667 HTT_DATA_MSG_SVC, 668 PIPEDIR_IN, /* in = DL = target -> host */ 669 5, 670 }, 671 #else /* WLAN_FEATURE_FASTPATH */ 672 { 673 HTT_DATA_MSG_SVC, 674 PIPEDIR_IN, /* in = DL = target -> host */ 675 1, 676 }, 677 #endif /* WLAN_FEATURE_FASTPATH */ 678 679 /* (Additions here) */ 680 681 { /* Must be last */ 682 0, 683 0, 684 0, 685 }, 686 }; 687 688 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 689 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 690 691 #ifdef WLAN_FEATURE_EPPING 692 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 693 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 694 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 695 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 696 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 697 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 698 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 699 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 700 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 701 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 702 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 703 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 704 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 705 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 706 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 707 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 708 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 709 {0, 0, 0,}, /* Must be last */ 710 }; 711 712 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 713 **tgt_svc_map_to_use, 714 uint32_t *sz_tgt_svc_map_to_use) 715 { 716 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 717 *sz_tgt_svc_map_to_use = 718 sizeof(target_service_to_ce_map_wlan_epping); 719 } 720 #endif 721 722 #ifdef QCN7605_SUPPORT 723 static inline 724 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 725 uint32_t *sz_tgt_svc_map_to_use) 726 { 727 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 728 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 729 } 730 #else 731 static inline 732 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 733 uint32_t *sz_tgt_svc_map_to_use) 734 { 735 HIF_ERROR("%s: QCN7605 not supported", __func__); 736 } 737 #endif 738 739 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 740 struct service_to_pipe **tgt_svc_map_to_use, 741 uint32_t *sz_tgt_svc_map_to_use) 742 { 743 uint32_t mode = hif_get_conparam(scn); 744 struct hif_target_info *tgt_info = &scn->target_info; 745 746 if (QDF_IS_EPPING_ENABLED(mode)) { 747 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 748 sz_tgt_svc_map_to_use); 749 } else { 750 switch (tgt_info->target_type) { 751 default: 752 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 753 *sz_tgt_svc_map_to_use = 754 sizeof(target_service_to_ce_map_wlan); 755 break; 756 case TARGET_TYPE_QCN7605: 757 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 758 sz_tgt_svc_map_to_use); 759 break; 760 case TARGET_TYPE_AR900B: 761 case TARGET_TYPE_QCA9984: 762 case TARGET_TYPE_IPQ4019: 763 case TARGET_TYPE_QCA9888: 764 case TARGET_TYPE_AR9888: 765 case TARGET_TYPE_AR9888V2: 766 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 767 *sz_tgt_svc_map_to_use = 768 sizeof(target_service_to_ce_map_ar900b); 769 break; 770 case TARGET_TYPE_QCA6290: 771 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 772 *sz_tgt_svc_map_to_use = 773 sizeof(target_service_to_ce_map_qca6290); 774 break; 775 case TARGET_TYPE_QCA6390: 776 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 777 *sz_tgt_svc_map_to_use = 778 sizeof(target_service_to_ce_map_qca6390); 779 break; 780 case TARGET_TYPE_QCA8074: 781 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 782 *sz_tgt_svc_map_to_use = 783 sizeof(target_service_to_ce_map_qca8074); 784 break; 785 case TARGET_TYPE_QCA8074V2: 786 *tgt_svc_map_to_use = 787 target_service_to_ce_map_qca8074_v2; 788 *sz_tgt_svc_map_to_use = 789 sizeof(target_service_to_ce_map_qca8074_v2); 790 break; 791 case TARGET_TYPE_QCA6018: 792 *tgt_svc_map_to_use = 793 target_service_to_ce_map_qca6018; 794 *sz_tgt_svc_map_to_use = 795 sizeof(target_service_to_ce_map_qca6018); 796 break; 797 } 798 } 799 } 800 801 /** 802 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 803 * @ce_state : pointer to the state context of the CE 804 * 805 * Description: 806 * Sets htt_rx_data attribute of the state structure if the 807 * CE serves one of the HTT DATA services. 808 * 809 * Return: 810 * false (attribute set to false) 811 * true (attribute set to true); 812 */ 813 static bool ce_mark_datapath(struct CE_state *ce_state) 814 { 815 struct service_to_pipe *svc_map; 816 uint32_t map_sz, map_len; 817 int i; 818 bool rc = false; 819 820 if (ce_state != NULL) { 821 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 822 &map_sz); 823 824 map_len = map_sz / sizeof(struct service_to_pipe); 825 for (i = 0; i < map_len; i++) { 826 if ((svc_map[i].pipenum == ce_state->id) && 827 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 828 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 829 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 830 /* HTT CEs are unidirectional */ 831 if (svc_map[i].pipedir == PIPEDIR_IN) 832 ce_state->htt_rx_data = true; 833 else 834 ce_state->htt_tx_data = true; 835 rc = true; 836 } 837 } 838 } 839 return rc; 840 } 841 842 /** 843 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 844 * @ce_id: ce in question 845 * @ring: ring state being examined 846 * @type: "src_ring" or "dest_ring" string for identifying the ring 847 * 848 * Warns on non-zero index values. 849 * Causes a kernel panic if the ring is not empty durring initialization. 850 */ 851 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 852 char *type) 853 { 854 if (ring->write_index != 0 || ring->sw_index != 0) 855 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", 856 ce_id, type, ring->sw_index, ring->write_index); 857 if (ring->write_index != ring->sw_index) 858 QDF_BUG(0); 859 } 860 861 #ifdef IPA_OFFLOAD 862 /** 863 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 864 * @scn: softc instance 865 * @ce_id: ce in question 866 * @base_addr: pointer to copyengine ring base address 867 * @ce_ring: copyengine instance 868 * @nentries: number of entries should be allocated 869 * @desc_size: ce desc size 870 * 871 * Return: QDF_STATUS_SUCCESS - for success 872 */ 873 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 874 qdf_dma_addr_t *base_addr, 875 struct CE_ring_state *ce_ring, 876 unsigned int nentries, uint32_t desc_size) 877 { 878 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 879 !ce_srng_based(scn)) { 880 if (!scn->ipa_ce_ring) { 881 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( 882 scn->qdf_dev, 883 nentries * desc_size + CE_DESC_RING_ALIGN); 884 if (!scn->ipa_ce_ring) { 885 HIF_ERROR( 886 "%s: Failed to allocate memory for IPA ce ring", 887 __func__); 888 return QDF_STATUS_E_NOMEM; 889 } 890 } 891 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 892 &scn->ipa_ce_ring->mem_info); 893 ce_ring->base_addr_owner_space_unaligned = 894 scn->ipa_ce_ring->vaddr; 895 } else { 896 ce_ring->base_addr_owner_space_unaligned = 897 qdf_mem_alloc_consistent(scn->qdf_dev, 898 scn->qdf_dev->dev, 899 (nentries * desc_size + 900 CE_DESC_RING_ALIGN), 901 base_addr); 902 if (!ce_ring->base_addr_owner_space_unaligned) { 903 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 904 __func__, CE_id); 905 return QDF_STATUS_E_NOMEM; 906 } 907 } 908 return QDF_STATUS_SUCCESS; 909 } 910 911 /** 912 * ce_free_desc_ring() - Frees copyengine descriptor ring 913 * @scn: softc instance 914 * @ce_id: ce in question 915 * @ce_ring: copyengine instance 916 * @desc_size: ce desc size 917 * 918 * Return: None 919 */ 920 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 921 struct CE_ring_state *ce_ring, uint32_t desc_size) 922 { 923 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 924 !ce_srng_based(scn)) { 925 if (scn->ipa_ce_ring) { 926 qdf_mem_shared_mem_free(scn->qdf_dev, 927 scn->ipa_ce_ring); 928 scn->ipa_ce_ring = NULL; 929 } 930 ce_ring->base_addr_owner_space_unaligned = NULL; 931 } else { 932 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 933 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 934 ce_ring->base_addr_owner_space_unaligned, 935 ce_ring->base_addr_CE_space, 0); 936 ce_ring->base_addr_owner_space_unaligned = NULL; 937 } 938 } 939 #else 940 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 941 qdf_dma_addr_t *base_addr, 942 struct CE_ring_state *ce_ring, 943 unsigned int nentries, uint32_t desc_size) 944 { 945 ce_ring->base_addr_owner_space_unaligned = 946 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 947 (nentries * desc_size + 948 CE_DESC_RING_ALIGN), base_addr); 949 if (!ce_ring->base_addr_owner_space_unaligned) { 950 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 951 __func__, CE_id); 952 return QDF_STATUS_E_NOMEM; 953 } 954 return QDF_STATUS_SUCCESS; 955 } 956 957 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 958 struct CE_ring_state *ce_ring, uint32_t desc_size) 959 { 960 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 961 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 962 ce_ring->base_addr_owner_space_unaligned, 963 ce_ring->base_addr_CE_space, 0); 964 ce_ring->base_addr_owner_space_unaligned = NULL; 965 } 966 #endif /* IPA_OFFLOAD */ 967 968 /* 969 * TODO: Need to explore the possibility of having this as part of a 970 * target context instead of a global array. 971 */ 972 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 973 974 void ce_service_register_module(enum ce_target_type target_type, 975 struct ce_ops* (*ce_attach)(void)) 976 { 977 if (target_type < CE_MAX_TARGET_TYPE) 978 ce_attach_register[target_type] = ce_attach; 979 } 980 981 qdf_export_symbol(ce_service_register_module); 982 983 /** 984 * ce_srng_based() - Does this target use srng 985 * @ce_state : pointer to the state context of the CE 986 * 987 * Description: 988 * returns true if the target is SRNG based 989 * 990 * Return: 991 * false (attribute set to false) 992 * true (attribute set to true); 993 */ 994 bool ce_srng_based(struct hif_softc *scn) 995 { 996 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 997 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 998 999 switch (tgt_info->target_type) { 1000 case TARGET_TYPE_QCA8074: 1001 case TARGET_TYPE_QCA8074V2: 1002 case TARGET_TYPE_QCA6290: 1003 case TARGET_TYPE_QCA6390: 1004 case TARGET_TYPE_QCA6018: 1005 return true; 1006 default: 1007 return false; 1008 } 1009 return false; 1010 } 1011 qdf_export_symbol(ce_srng_based); 1012 1013 #ifdef QCA_WIFI_SUPPORT_SRNG 1014 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1015 { 1016 struct ce_ops *ops = NULL; 1017 1018 if (ce_srng_based(scn)) { 1019 if (ce_attach_register[CE_SVC_SRNG]) 1020 ops = ce_attach_register[CE_SVC_SRNG](); 1021 } else if (ce_attach_register[CE_SVC_LEGACY]) { 1022 ops = ce_attach_register[CE_SVC_LEGACY](); 1023 } 1024 1025 return ops; 1026 } 1027 1028 1029 #else /* QCA_LITHIUM */ 1030 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1031 { 1032 if (ce_attach_register[CE_SVC_LEGACY]) 1033 return ce_attach_register[CE_SVC_LEGACY](); 1034 1035 return NULL; 1036 } 1037 #endif /* QCA_LITHIUM */ 1038 1039 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 1040 struct pld_shadow_reg_v2_cfg **shadow_config, 1041 int *num_shadow_registers_configured) { 1042 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1043 1044 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1045 scn, shadow_config, num_shadow_registers_configured); 1046 } 1047 1048 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1049 uint8_t ring_type) 1050 { 1051 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1052 1053 return hif_state->ce_services->ce_get_desc_size(ring_type); 1054 } 1055 1056 1057 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 1058 uint8_t ring_type, uint32_t nentries) 1059 { 1060 uint32_t ce_nbytes; 1061 char *ptr; 1062 qdf_dma_addr_t base_addr; 1063 struct CE_ring_state *ce_ring; 1064 uint32_t desc_size; 1065 struct hif_softc *scn = CE_state->scn; 1066 1067 ce_nbytes = sizeof(struct CE_ring_state) 1068 + (nentries * sizeof(void *)); 1069 ptr = qdf_mem_malloc(ce_nbytes); 1070 if (!ptr) 1071 return NULL; 1072 1073 ce_ring = (struct CE_ring_state *)ptr; 1074 ptr += sizeof(struct CE_ring_state); 1075 ce_ring->nentries = nentries; 1076 ce_ring->nentries_mask = nentries - 1; 1077 1078 ce_ring->low_water_mark_nentries = 0; 1079 ce_ring->high_water_mark_nentries = nentries; 1080 ce_ring->per_transfer_context = (void **)ptr; 1081 1082 desc_size = ce_get_desc_size(scn, ring_type); 1083 1084 /* Legacy platforms that do not support cache 1085 * coherent DMA are unsupported 1086 */ 1087 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 1088 ce_ring, nentries, 1089 desc_size) != 1090 QDF_STATUS_SUCCESS) { 1091 HIF_ERROR("%s: ring has no DMA mem", 1092 __func__); 1093 qdf_mem_free(ce_ring); 1094 return NULL; 1095 } 1096 ce_ring->base_addr_CE_space_unaligned = base_addr; 1097 1098 /* Correctly initialize memory to 0 to 1099 * prevent garbage data crashing system 1100 * when download firmware 1101 */ 1102 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 1103 nentries * desc_size + 1104 CE_DESC_RING_ALIGN); 1105 1106 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 1107 1108 ce_ring->base_addr_CE_space = 1109 (ce_ring->base_addr_CE_space_unaligned + 1110 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 1111 1112 ce_ring->base_addr_owner_space = (void *) 1113 (((size_t) ce_ring->base_addr_owner_space_unaligned + 1114 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 1115 } else { 1116 ce_ring->base_addr_CE_space = 1117 ce_ring->base_addr_CE_space_unaligned; 1118 ce_ring->base_addr_owner_space = 1119 ce_ring->base_addr_owner_space_unaligned; 1120 } 1121 1122 return ce_ring; 1123 } 1124 1125 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 1126 uint32_t ce_id, struct CE_ring_state *ring, 1127 struct CE_attr *attr) 1128 { 1129 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1130 1131 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 1132 ring, attr); 1133 } 1134 1135 int hif_ce_bus_early_suspend(struct hif_softc *scn) 1136 { 1137 uint8_t ul_pipe, dl_pipe; 1138 int ce_id, status, ul_is_polled, dl_is_polled; 1139 struct CE_state *ce_state; 1140 1141 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 1142 &ul_pipe, &dl_pipe, 1143 &ul_is_polled, &dl_is_polled); 1144 if (status) { 1145 HIF_ERROR("%s: pipe_mapping failure", __func__); 1146 return status; 1147 } 1148 1149 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1150 if (ce_id == ul_pipe) 1151 continue; 1152 if (ce_id == dl_pipe) 1153 continue; 1154 1155 ce_state = scn->ce_id_to_state[ce_id]; 1156 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1157 if (ce_state->state == CE_RUNNING) 1158 ce_state->state = CE_PAUSED; 1159 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1160 } 1161 1162 return status; 1163 } 1164 1165 int hif_ce_bus_late_resume(struct hif_softc *scn) 1166 { 1167 int ce_id; 1168 struct CE_state *ce_state; 1169 int write_index = 0; 1170 bool index_updated; 1171 1172 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1173 ce_state = scn->ce_id_to_state[ce_id]; 1174 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1175 if (ce_state->state == CE_PENDING) { 1176 write_index = ce_state->src_ring->write_index; 1177 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 1178 write_index); 1179 ce_state->state = CE_RUNNING; 1180 index_updated = true; 1181 } else { 1182 index_updated = false; 1183 } 1184 1185 if (ce_state->state == CE_PAUSED) 1186 ce_state->state = CE_RUNNING; 1187 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1188 1189 if (index_updated) 1190 hif_record_ce_desc_event(scn, ce_id, 1191 RESUME_WRITE_INDEX_UPDATE, 1192 NULL, NULL, write_index, 0); 1193 } 1194 1195 return 0; 1196 } 1197 1198 /** 1199 * ce_oom_recovery() - try to recover rx ce from oom condition 1200 * @context: CE_state of the CE with oom rx ring 1201 * 1202 * the executing work Will continue to be rescheduled until 1203 * at least 1 descriptor is successfully posted to the rx ring. 1204 * 1205 * return: none 1206 */ 1207 static void ce_oom_recovery(void *context) 1208 { 1209 struct CE_state *ce_state = context; 1210 struct hif_softc *scn = ce_state->scn; 1211 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1212 struct HIF_CE_pipe_info *pipe_info = 1213 &ce_softc->pipe_info[ce_state->id]; 1214 1215 hif_post_recv_buffers_for_pipe(pipe_info); 1216 } 1217 1218 #if HIF_CE_DEBUG_DATA_BUF 1219 /** 1220 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1221 * the CE descriptors. 1222 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1223 * @scn: hif scn handle 1224 * ce_id: Copy Engine Id 1225 * 1226 * Return: QDF_STATUS 1227 */ 1228 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1229 { 1230 struct hif_ce_desc_event *event = NULL; 1231 struct hif_ce_desc_event *hist_ev = NULL; 1232 uint32_t index = 0; 1233 1234 hist_ev = 1235 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1236 1237 if (!hist_ev) 1238 return QDF_STATUS_E_NOMEM; 1239 1240 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1241 event = &hist_ev[index]; 1242 event->data = 1243 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 1244 if (event->data == NULL) 1245 return QDF_STATUS_E_NOMEM; 1246 } 1247 return QDF_STATUS_SUCCESS; 1248 } 1249 1250 /** 1251 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 1252 * the CE descriptors. 1253 * @scn: hif scn handle 1254 * ce_id: Copy Engine Id 1255 * 1256 * Return: 1257 */ 1258 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1259 { 1260 struct hif_ce_desc_event *event = NULL; 1261 struct hif_ce_desc_event *hist_ev = NULL; 1262 uint32_t index = 0; 1263 1264 hist_ev = 1265 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1266 1267 if (!hist_ev) 1268 return; 1269 1270 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1271 event = &hist_ev[index]; 1272 if (event->data != NULL) 1273 qdf_mem_free(event->data); 1274 event->data = NULL; 1275 event = NULL; 1276 } 1277 } 1278 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1279 1280 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) /* MCL */ 1281 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX]; 1282 1283 /** 1284 * alloc_mem_ce_debug_history() - Allocate CE descriptor history 1285 * @scn: hif scn handle 1286 * @ce_id: Copy Engine Id 1287 * 1288 * Return: QDF_STATUS 1289 */ 1290 static QDF_STATUS 1291 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 1292 { 1293 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1294 1295 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id]; 1296 ce_hist->enable[ce_id] = 1; 1297 1298 return QDF_STATUS_SUCCESS; 1299 } 1300 1301 /** 1302 * free_mem_ce_debug_history() - Free CE descriptor history 1303 * @scn: hif scn handle 1304 * @ce_id: Copy Engine Id 1305 * 1306 * Return: None 1307 */ 1308 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 1309 { 1310 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1311 1312 ce_hist->enable[ce_id] = 0; 1313 ce_hist->hist_ev[ce_id] = NULL; 1314 } 1315 1316 #elif HIF_CE_DEBUG_DATA_BUF /* WIN */ 1317 1318 static QDF_STATUS 1319 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1320 { 1321 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 1322 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 1323 1324 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) { 1325 scn->hif_ce_desc_hist.enable[CE_id] = 0; 1326 return QDF_STATUS_E_NOMEM; 1327 } else { 1328 scn->hif_ce_desc_hist.enable[CE_id] = 1; 1329 return QDF_STATUS_SUCCESS; 1330 } 1331 } 1332 1333 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1334 { 1335 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1336 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; 1337 1338 if (!hist_ev) 1339 return; 1340 1341 if (ce_hist->data_enable[CE_id] == 1) { 1342 ce_hist->data_enable[CE_id] = 0; 1343 free_mem_ce_debug_hist_data(scn, CE_id); 1344 } 1345 1346 ce_hist->enable[CE_id] = 0; 1347 qdf_mem_free(ce_hist->hist_ev[CE_id]); 1348 ce_hist->hist_ev[CE_id] = NULL; 1349 } 1350 1351 #else /* Disabled */ 1352 1353 static inline QDF_STATUS 1354 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1355 { 1356 return QDF_STATUS_SUCCESS; 1357 } 1358 1359 static inline void 1360 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 1361 #endif 1362 1363 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF 1364 /** 1365 * reset_ce_debug_history() - reset the index and ce id used for dumping the 1366 * CE records on the console using sysfs. 1367 * @scn: hif scn handle 1368 * 1369 * Return: 1370 */ 1371 static inline void reset_ce_debug_history(struct hif_softc *scn) 1372 { 1373 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1374 /* Initialise the CE debug history sysfs interface inputs ce_id and 1375 * index. Disable data storing 1376 */ 1377 ce_hist->hist_index = 0; 1378 ce_hist->hist_id = 0; 1379 } 1380 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 1381 static inline void reset_ce_debug_history(struct hif_softc *scn) { } 1382 #endif /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */ 1383 1384 void ce_enable_polling(void *cestate) 1385 { 1386 struct CE_state *CE_state = (struct CE_state *)cestate; 1387 1388 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1389 CE_state->timer_inited = true; 1390 } 1391 1392 void ce_disable_polling(void *cestate) 1393 { 1394 struct CE_state *CE_state = (struct CE_state *)cestate; 1395 1396 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1397 CE_state->timer_inited = false; 1398 } 1399 1400 /* 1401 * Initialize a Copy Engine based on caller-supplied attributes. 1402 * This may be called once to initialize both source and destination 1403 * rings or it may be called twice for separate source and destination 1404 * initialization. It may be that only one side or the other is 1405 * initialized by software/firmware. 1406 * 1407 * This should be called durring the initialization sequence before 1408 * interupts are enabled, so we don't have to worry about thread safety. 1409 */ 1410 struct CE_handle *ce_init(struct hif_softc *scn, 1411 unsigned int CE_id, struct CE_attr *attr) 1412 { 1413 struct CE_state *CE_state; 1414 uint32_t ctrl_addr; 1415 unsigned int nentries; 1416 bool malloc_CE_state = false; 1417 bool malloc_src_ring = false; 1418 int status; 1419 1420 QDF_ASSERT(CE_id < scn->ce_count); 1421 ctrl_addr = CE_BASE_ADDRESS(CE_id); 1422 CE_state = scn->ce_id_to_state[CE_id]; 1423 1424 if (!CE_state) { 1425 CE_state = 1426 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 1427 if (!CE_state) { 1428 HIF_ERROR("%s: CE_state has no mem", __func__); 1429 return NULL; 1430 } 1431 malloc_CE_state = true; 1432 qdf_spinlock_create(&CE_state->ce_index_lock); 1433 1434 CE_state->id = CE_id; 1435 CE_state->ctrl_addr = ctrl_addr; 1436 CE_state->state = CE_RUNNING; 1437 CE_state->attr_flags = attr->flags; 1438 } 1439 CE_state->scn = scn; 1440 CE_state->service = ce_engine_service_reg; 1441 1442 qdf_atomic_init(&CE_state->rx_pending); 1443 if (attr == NULL) { 1444 /* Already initialized; caller wants the handle */ 1445 return (struct CE_handle *)CE_state; 1446 } 1447 1448 if (CE_state->src_sz_max) 1449 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 1450 else 1451 CE_state->src_sz_max = attr->src_sz_max; 1452 1453 ce_init_ce_desc_event_log(scn, CE_id, 1454 attr->src_nentries + attr->dest_nentries); 1455 1456 /* source ring setup */ 1457 nentries = attr->src_nentries; 1458 if (nentries) { 1459 struct CE_ring_state *src_ring; 1460 1461 nentries = roundup_pwr2(nentries); 1462 if (CE_state->src_ring) { 1463 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 1464 } else { 1465 src_ring = CE_state->src_ring = 1466 ce_alloc_ring_state(CE_state, 1467 CE_RING_SRC, 1468 nentries); 1469 if (!src_ring) { 1470 /* cannot allocate src ring. If the 1471 * CE_state is allocated locally free 1472 * CE_State and return error. 1473 */ 1474 HIF_ERROR("%s: src ring has no mem", __func__); 1475 if (malloc_CE_state) { 1476 /* allocated CE_state locally */ 1477 qdf_mem_free(CE_state); 1478 malloc_CE_state = false; 1479 } 1480 return NULL; 1481 } 1482 /* we can allocate src ring. Mark that the src ring is 1483 * allocated locally 1484 */ 1485 malloc_src_ring = true; 1486 1487 /* 1488 * Also allocate a shadow src ring in 1489 * regular mem to use for faster access. 1490 */ 1491 src_ring->shadow_base_unaligned = 1492 qdf_mem_malloc(nentries * 1493 sizeof(struct CE_src_desc) + 1494 CE_DESC_RING_ALIGN); 1495 if (src_ring->shadow_base_unaligned == NULL) { 1496 HIF_ERROR("%s: src ring no shadow_base mem", 1497 __func__); 1498 goto error_no_dma_mem; 1499 } 1500 src_ring->shadow_base = (struct CE_src_desc *) 1501 (((size_t) src_ring->shadow_base_unaligned + 1502 CE_DESC_RING_ALIGN - 1) & 1503 ~(CE_DESC_RING_ALIGN - 1)); 1504 1505 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 1506 src_ring, attr); 1507 if (status < 0) 1508 goto error_target_access; 1509 1510 ce_ring_test_initial_indexes(CE_id, src_ring, 1511 "src_ring"); 1512 } 1513 } 1514 1515 /* destination ring setup */ 1516 nentries = attr->dest_nentries; 1517 if (nentries) { 1518 struct CE_ring_state *dest_ring; 1519 1520 nentries = roundup_pwr2(nentries); 1521 if (CE_state->dest_ring) { 1522 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 1523 } else { 1524 dest_ring = CE_state->dest_ring = 1525 ce_alloc_ring_state(CE_state, 1526 CE_RING_DEST, 1527 nentries); 1528 if (!dest_ring) { 1529 /* cannot allocate dst ring. If the CE_state 1530 * or src ring is allocated locally free 1531 * CE_State and src ring and return error. 1532 */ 1533 HIF_ERROR("%s: dest ring has no mem", 1534 __func__); 1535 goto error_no_dma_mem; 1536 } 1537 1538 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 1539 dest_ring, attr); 1540 if (status < 0) 1541 goto error_target_access; 1542 1543 ce_ring_test_initial_indexes(CE_id, dest_ring, 1544 "dest_ring"); 1545 1546 /* For srng based target, init status ring here */ 1547 if (ce_srng_based(CE_state->scn)) { 1548 CE_state->status_ring = 1549 ce_alloc_ring_state(CE_state, 1550 CE_RING_STATUS, 1551 nentries); 1552 if (CE_state->status_ring == NULL) { 1553 /*Allocation failed. Cleanup*/ 1554 qdf_mem_free(CE_state->dest_ring); 1555 if (malloc_src_ring) { 1556 qdf_mem_free 1557 (CE_state->src_ring); 1558 CE_state->src_ring = NULL; 1559 malloc_src_ring = false; 1560 } 1561 if (malloc_CE_state) { 1562 /* allocated CE_state locally */ 1563 scn->ce_id_to_state[CE_id] = 1564 NULL; 1565 qdf_mem_free(CE_state); 1566 malloc_CE_state = false; 1567 } 1568 1569 return NULL; 1570 } 1571 1572 status = ce_ring_setup(scn, CE_RING_STATUS, 1573 CE_id, CE_state->status_ring, 1574 attr); 1575 if (status < 0) 1576 goto error_target_access; 1577 1578 } 1579 1580 /* epping */ 1581 /* poll timer */ 1582 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 1583 qdf_timer_init(scn->qdf_dev, 1584 &CE_state->poll_timer, 1585 ce_poll_timeout, 1586 CE_state, 1587 QDF_TIMER_TYPE_WAKE_APPS); 1588 ce_enable_polling(CE_state); 1589 qdf_timer_mod(&CE_state->poll_timer, 1590 CE_POLL_TIMEOUT); 1591 } 1592 } 1593 } 1594 1595 if (!ce_srng_based(scn)) { 1596 /* Enable CE error interrupts */ 1597 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1598 goto error_target_access; 1599 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 1600 if (Q_TARGET_ACCESS_END(scn) < 0) 1601 goto error_target_access; 1602 } 1603 1604 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 1605 ce_oom_recovery, CE_state); 1606 1607 /* update the htt_data attribute */ 1608 ce_mark_datapath(CE_state); 1609 scn->ce_id_to_state[CE_id] = CE_state; 1610 1611 alloc_mem_ce_debug_history(scn, CE_id); 1612 1613 return (struct CE_handle *)CE_state; 1614 1615 error_target_access: 1616 error_no_dma_mem: 1617 ce_fini((struct CE_handle *)CE_state); 1618 return NULL; 1619 } 1620 1621 /** 1622 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 1623 * @hif_ctx: HIF Context 1624 * 1625 * API to check if polling is enabled on all CEs. Returns true when polling 1626 * is enabled on all CEs. 1627 * 1628 * Return: bool 1629 */ 1630 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 1631 { 1632 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1633 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1634 struct CE_attr *attr; 1635 int id; 1636 1637 for (id = 0; id < scn->ce_count; id++) { 1638 attr = &hif_state->host_ce_config[id]; 1639 if (attr && (attr->dest_nentries) && 1640 !(attr->flags & CE_ATTR_ENABLE_POLL)) 1641 return false; 1642 } 1643 return true; 1644 } 1645 qdf_export_symbol(hif_is_polled_mode_enabled); 1646 1647 #ifdef WLAN_FEATURE_FASTPATH 1648 /** 1649 * hif_enable_fastpath() Update that we have enabled fastpath mode 1650 * @hif_ctx: HIF context 1651 * 1652 * For use in data path 1653 * 1654 * Retrun: void 1655 */ 1656 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 1657 { 1658 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1659 1660 if (ce_srng_based(scn)) { 1661 HIF_INFO("%s, srng rings do not support fastpath", __func__); 1662 return; 1663 } 1664 HIF_DBG("%s, Enabling fastpath mode", __func__); 1665 scn->fastpath_mode_on = true; 1666 } 1667 1668 /** 1669 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 1670 * @hif_ctx: HIF Context 1671 * 1672 * For use in data path to skip HTC 1673 * 1674 * Return: bool 1675 */ 1676 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 1677 { 1678 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1679 1680 return scn->fastpath_mode_on; 1681 } 1682 1683 /** 1684 * hif_get_ce_handle - API to get CE handle for FastPath mode 1685 * @hif_ctx: HIF Context 1686 * @id: CopyEngine Id 1687 * 1688 * API to return CE handle for fastpath mode 1689 * 1690 * Return: void 1691 */ 1692 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 1693 { 1694 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1695 1696 return scn->ce_id_to_state[id]; 1697 } 1698 qdf_export_symbol(hif_get_ce_handle); 1699 1700 /** 1701 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 1702 * No processing is required inside this function. 1703 * @ce_hdl: Cope engine handle 1704 * Using an assert, this function makes sure that, 1705 * the TX CE has been processed completely. 1706 * 1707 * This is called while dismantling CE structures. No other thread 1708 * should be using these structures while dismantling is occurring 1709 * therfore no locking is needed. 1710 * 1711 * Return: none 1712 */ 1713 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 1714 { 1715 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1716 struct CE_ring_state *src_ring = ce_state->src_ring; 1717 struct hif_softc *sc = ce_state->scn; 1718 uint32_t sw_index, write_index; 1719 1720 if (hif_is_nss_wifi_enabled(sc)) 1721 return; 1722 1723 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 1724 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", 1725 __func__, __LINE__); 1726 sw_index = src_ring->sw_index; 1727 write_index = src_ring->sw_index; 1728 1729 /* At this point Tx CE should be clean */ 1730 qdf_assert_always(sw_index == write_index); 1731 } 1732 } 1733 1734 /** 1735 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 1736 * @ce_hdl: Handle to CE 1737 * 1738 * These buffers are never allocated on the fly, but 1739 * are allocated only once during HIF start and freed 1740 * only once during HIF stop. 1741 * NOTE: 1742 * The assumption here is there is no in-flight DMA in progress 1743 * currently, so that buffers can be freed up safely. 1744 * 1745 * Return: NONE 1746 */ 1747 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 1748 { 1749 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1750 struct CE_ring_state *dst_ring = ce_state->dest_ring; 1751 qdf_nbuf_t nbuf; 1752 int i; 1753 1754 if (ce_state->scn->fastpath_mode_on == false) 1755 return; 1756 1757 if (!ce_state->htt_rx_data) 1758 return; 1759 1760 /* 1761 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 1762 * this CE is completely full: does not leave one blank space, to 1763 * distinguish between empty queue & full queue. So free all the 1764 * entries. 1765 */ 1766 for (i = 0; i < dst_ring->nentries; i++) { 1767 nbuf = dst_ring->per_transfer_context[i]; 1768 1769 /* 1770 * The reasons for doing this check are: 1771 * 1) Protect against calling cleanup before allocating buffers 1772 * 2) In a corner case, FASTPATH_mode_on may be set, but we 1773 * could have a partially filled ring, because of a memory 1774 * allocation failure in the middle of allocating ring. 1775 * This check accounts for that case, checking 1776 * fastpath_mode_on flag or started flag would not have 1777 * covered that case. This is not in performance path, 1778 * so OK to do this. 1779 */ 1780 if (nbuf) { 1781 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 1782 QDF_DMA_FROM_DEVICE); 1783 qdf_nbuf_free(nbuf); 1784 } 1785 } 1786 } 1787 1788 /** 1789 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 1790 * @scn: HIF handle 1791 * 1792 * Datapath Rx CEs are special case, where we reuse all the message buffers. 1793 * Hence we have to post all the entries in the pipe, even, in the beginning 1794 * unlike for other CE pipes where one less than dest_nentries are filled in 1795 * the beginning. 1796 * 1797 * Return: None 1798 */ 1799 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1800 { 1801 int pipe_num; 1802 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1803 1804 if (scn->fastpath_mode_on == false) 1805 return; 1806 1807 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 1808 struct HIF_CE_pipe_info *pipe_info = 1809 &hif_state->pipe_info[pipe_num]; 1810 struct CE_state *ce_state = 1811 scn->ce_id_to_state[pipe_info->pipe_num]; 1812 1813 if (ce_state->htt_rx_data) 1814 atomic_inc(&pipe_info->recv_bufs_needed); 1815 } 1816 } 1817 #else 1818 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1819 { 1820 } 1821 1822 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 1823 { 1824 return false; 1825 } 1826 #endif /* WLAN_FEATURE_FASTPATH */ 1827 1828 void ce_fini(struct CE_handle *copyeng) 1829 { 1830 struct CE_state *CE_state = (struct CE_state *)copyeng; 1831 unsigned int CE_id = CE_state->id; 1832 struct hif_softc *scn = CE_state->scn; 1833 uint32_t desc_size; 1834 1835 bool inited = CE_state->timer_inited; 1836 CE_state->state = CE_UNUSED; 1837 scn->ce_id_to_state[CE_id] = NULL; 1838 /* Set the flag to false first to stop processing in ce_poll_timeout */ 1839 ce_disable_polling(CE_state); 1840 1841 qdf_lro_deinit(CE_state->lro_data); 1842 1843 if (CE_state->src_ring) { 1844 /* Cleanup the datapath Tx ring */ 1845 ce_h2t_tx_ce_cleanup(copyeng); 1846 1847 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 1848 if (CE_state->src_ring->shadow_base_unaligned) 1849 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 1850 if (CE_state->src_ring->base_addr_owner_space_unaligned) 1851 ce_free_desc_ring(scn, CE_state->id, 1852 CE_state->src_ring, 1853 desc_size); 1854 qdf_mem_free(CE_state->src_ring); 1855 } 1856 if (CE_state->dest_ring) { 1857 /* Cleanup the datapath Rx ring */ 1858 ce_t2h_msg_ce_cleanup(copyeng); 1859 1860 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 1861 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 1862 ce_free_desc_ring(scn, CE_state->id, 1863 CE_state->dest_ring, 1864 desc_size); 1865 qdf_mem_free(CE_state->dest_ring); 1866 1867 /* epping */ 1868 if (inited) { 1869 qdf_timer_free(&CE_state->poll_timer); 1870 } 1871 } 1872 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 1873 /* Cleanup the datapath Tx ring */ 1874 ce_h2t_tx_ce_cleanup(copyeng); 1875 1876 if (CE_state->status_ring->shadow_base_unaligned) 1877 qdf_mem_free( 1878 CE_state->status_ring->shadow_base_unaligned); 1879 1880 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 1881 if (CE_state->status_ring->base_addr_owner_space_unaligned) 1882 ce_free_desc_ring(scn, CE_state->id, 1883 CE_state->status_ring, 1884 desc_size); 1885 qdf_mem_free(CE_state->status_ring); 1886 } 1887 1888 free_mem_ce_debug_history(scn, CE_id); 1889 reset_ce_debug_history(scn); 1890 ce_deinit_ce_desc_event_log(scn, CE_id); 1891 1892 qdf_spinlock_destroy(&CE_state->ce_index_lock); 1893 qdf_mem_free(CE_state); 1894 } 1895 1896 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 1897 { 1898 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1899 1900 qdf_mem_zero(&hif_state->msg_callbacks_pending, 1901 sizeof(hif_state->msg_callbacks_pending)); 1902 qdf_mem_zero(&hif_state->msg_callbacks_current, 1903 sizeof(hif_state->msg_callbacks_current)); 1904 } 1905 1906 /* Send the first nbytes bytes of the buffer */ 1907 QDF_STATUS 1908 hif_send_head(struct hif_opaque_softc *hif_ctx, 1909 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 1910 qdf_nbuf_t nbuf, unsigned int data_attr) 1911 { 1912 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1913 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1914 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1915 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 1916 int bytes = nbytes, nfrags = 0; 1917 struct ce_sendlist sendlist; 1918 int status, i = 0; 1919 unsigned int mux_id = 0; 1920 1921 if (nbytes > qdf_nbuf_len(nbuf)) { 1922 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes, 1923 (uint32_t)qdf_nbuf_len(nbuf)); 1924 QDF_ASSERT(0); 1925 } 1926 1927 transfer_id = 1928 (mux_id & MUX_ID_MASK) | 1929 (transfer_id & TRANSACTION_ID_MASK); 1930 data_attr &= DESC_DATA_FLAG_MASK; 1931 /* 1932 * The common case involves sending multiple fragments within a 1933 * single download (the tx descriptor and the tx frame header). 1934 * So, optimize for the case of multiple fragments by not even 1935 * checking whether it's necessary to use a sendlist. 1936 * The overhead of using a sendlist for a single buffer download 1937 * is not a big deal, since it happens rarely (for WMI messages). 1938 */ 1939 ce_sendlist_init(&sendlist); 1940 do { 1941 qdf_dma_addr_t frag_paddr; 1942 int frag_bytes; 1943 1944 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 1945 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 1946 /* 1947 * Clear the packet offset for all but the first CE desc. 1948 */ 1949 if (i++ > 0) 1950 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 1951 1952 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 1953 frag_bytes > 1954 bytes ? bytes : frag_bytes, 1955 qdf_nbuf_get_frag_is_wordstream 1956 (nbuf, 1957 nfrags) ? 0 : 1958 CE_SEND_FLAG_SWAP_DISABLE, 1959 data_attr); 1960 if (status != QDF_STATUS_SUCCESS) { 1961 HIF_ERROR("%s: error, frag_num %d larger than limit", 1962 __func__, nfrags); 1963 return status; 1964 } 1965 bytes -= frag_bytes; 1966 nfrags++; 1967 } while (bytes > 0); 1968 1969 /* Make sure we have resources to handle this request */ 1970 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 1971 if (pipe_info->num_sends_allowed < nfrags) { 1972 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1973 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 1974 return QDF_STATUS_E_RESOURCES; 1975 } 1976 pipe_info->num_sends_allowed -= nfrags; 1977 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1978 1979 if (qdf_unlikely(ce_hdl == NULL)) { 1980 HIF_ERROR("%s: error CE handle is null", __func__); 1981 return A_ERROR; 1982 } 1983 1984 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 1985 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 1986 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 1987 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 1988 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 1989 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 1990 1991 return status; 1992 } 1993 1994 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 1995 int force) 1996 { 1997 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1998 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1999 2000 if (!force) { 2001 int resources; 2002 /* 2003 * Decide whether to actually poll for completions, or just 2004 * wait for a later chance. If there seem to be plenty of 2005 * resources left, then just wait, since checking involves 2006 * reading a CE register, which is a relatively expensive 2007 * operation. 2008 */ 2009 resources = hif_get_free_queue_number(hif_ctx, pipe); 2010 /* 2011 * If at least 50% of the total resources are still available, 2012 * don't bother checking again yet. 2013 */ 2014 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 2015 1)) 2016 return; 2017 } 2018 #if ATH_11AC_TXCOMPACT 2019 ce_per_engine_servicereap(scn, pipe); 2020 #else 2021 ce_per_engine_service(scn, pipe); 2022 #endif 2023 } 2024 2025 uint16_t 2026 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2027 { 2028 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2029 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2030 uint16_t rv; 2031 2032 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2033 rv = pipe_info->num_sends_allowed; 2034 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2035 return rv; 2036 } 2037 2038 /* Called by lower (CE) layer when a send to Target completes. */ 2039 static void 2040 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 2041 void *transfer_context, qdf_dma_addr_t CE_data, 2042 unsigned int nbytes, unsigned int transfer_id, 2043 unsigned int sw_index, unsigned int hw_index, 2044 unsigned int toeplitz_hash_result) 2045 { 2046 struct HIF_CE_pipe_info *pipe_info = 2047 (struct HIF_CE_pipe_info *)ce_context; 2048 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2049 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2050 unsigned int sw_idx = sw_index, hw_idx = hw_index; 2051 struct hif_msg_callbacks *msg_callbacks = 2052 &pipe_info->pipe_callbacks; 2053 2054 do { 2055 /* 2056 * The upper layer callback will be triggered 2057 * when last fragment is complteted. 2058 */ 2059 if (transfer_context != CE_SENDLIST_ITEM_CTXT) { 2060 if (scn->target_status == TARGET_STATUS_RESET) { 2061 2062 qdf_nbuf_unmap_single(scn->qdf_dev, 2063 transfer_context, 2064 QDF_DMA_TO_DEVICE); 2065 qdf_nbuf_free(transfer_context); 2066 } else 2067 msg_callbacks->txCompletionHandler( 2068 msg_callbacks->Context, 2069 transfer_context, transfer_id, 2070 toeplitz_hash_result); 2071 } 2072 2073 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2074 pipe_info->num_sends_allowed++; 2075 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2076 } while (ce_completed_send_next(copyeng, 2077 &ce_context, &transfer_context, 2078 &CE_data, &nbytes, &transfer_id, 2079 &sw_idx, &hw_idx, 2080 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 2081 } 2082 2083 /** 2084 * hif_ce_do_recv(): send message from copy engine to upper layers 2085 * @msg_callbacks: structure containing callback and callback context 2086 * @netbuff: skb containing message 2087 * @nbytes: number of bytes in the message 2088 * @pipe_info: used for the pipe_number info 2089 * 2090 * Checks the packet length, configures the length in the netbuff, 2091 * and calls the upper layer callback. 2092 * 2093 * return: None 2094 */ 2095 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 2096 qdf_nbuf_t netbuf, int nbytes, 2097 struct HIF_CE_pipe_info *pipe_info) { 2098 if (nbytes <= pipe_info->buf_sz) { 2099 qdf_nbuf_set_pktlen(netbuf, nbytes); 2100 msg_callbacks-> 2101 rxCompletionHandler(msg_callbacks->Context, 2102 netbuf, pipe_info->pipe_num); 2103 } else { 2104 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", 2105 __func__, netbuf, nbytes); 2106 2107 qdf_nbuf_free(netbuf); 2108 } 2109 } 2110 2111 /* Called by lower (CE) layer when data is received from the Target. */ 2112 static void 2113 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 2114 void *transfer_context, qdf_dma_addr_t CE_data, 2115 unsigned int nbytes, unsigned int transfer_id, 2116 unsigned int flags) 2117 { 2118 struct HIF_CE_pipe_info *pipe_info = 2119 (struct HIF_CE_pipe_info *)ce_context; 2120 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2121 struct CE_state *ce_state = (struct CE_state *) copyeng; 2122 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2123 #ifdef HIF_PCI 2124 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state); 2125 #endif 2126 struct hif_msg_callbacks *msg_callbacks = 2127 &pipe_info->pipe_callbacks; 2128 2129 do { 2130 #ifdef HIF_PCI 2131 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); 2132 #endif 2133 qdf_nbuf_unmap_single(scn->qdf_dev, 2134 (qdf_nbuf_t) transfer_context, 2135 QDF_DMA_FROM_DEVICE); 2136 2137 atomic_inc(&pipe_info->recv_bufs_needed); 2138 hif_post_recv_buffers_for_pipe(pipe_info); 2139 if (scn->target_status == TARGET_STATUS_RESET) 2140 qdf_nbuf_free(transfer_context); 2141 else 2142 hif_ce_do_recv(msg_callbacks, transfer_context, 2143 nbytes, pipe_info); 2144 2145 /* Set up force_break flag if num of receices reaches 2146 * MAX_NUM_OF_RECEIVES 2147 */ 2148 ce_state->receive_count++; 2149 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 2150 ce_state->force_break = 1; 2151 break; 2152 } 2153 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 2154 &CE_data, &nbytes, &transfer_id, 2155 &flags) == QDF_STATUS_SUCCESS); 2156 2157 } 2158 2159 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 2160 2161 void 2162 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 2163 struct hif_msg_callbacks *callbacks) 2164 { 2165 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2166 2167 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 2168 spin_lock_init(&pcie_access_log_lock); 2169 #endif 2170 /* Save callbacks for later installation */ 2171 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 2172 sizeof(hif_state->msg_callbacks_pending)); 2173 2174 } 2175 2176 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 2177 { 2178 struct CE_handle *ce_diag = hif_state->ce_diag; 2179 int pipe_num; 2180 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2181 struct hif_msg_callbacks *hif_msg_callbacks = 2182 &hif_state->msg_callbacks_current; 2183 2184 /* daemonize("hif_compl_thread"); */ 2185 2186 if (scn->ce_count == 0) { 2187 HIF_ERROR("%s: Invalid ce_count", __func__); 2188 return -EINVAL; 2189 } 2190 2191 if (!hif_msg_callbacks || 2192 !hif_msg_callbacks->rxCompletionHandler || 2193 !hif_msg_callbacks->txCompletionHandler) { 2194 HIF_ERROR("%s: no completion handler registered", __func__); 2195 return -EFAULT; 2196 } 2197 2198 A_TARGET_ACCESS_LIKELY(scn); 2199 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2200 struct CE_attr attr; 2201 struct HIF_CE_pipe_info *pipe_info; 2202 2203 pipe_info = &hif_state->pipe_info[pipe_num]; 2204 if (pipe_info->ce_hdl == ce_diag) 2205 continue; /* Handle Diagnostic CE specially */ 2206 attr = hif_state->host_ce_config[pipe_num]; 2207 if (attr.src_nentries) { 2208 /* pipe used to send to target */ 2209 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", 2210 __func__, pipe_num, pipe_info); 2211 ce_send_cb_register(pipe_info->ce_hdl, 2212 hif_pci_ce_send_done, pipe_info, 2213 attr.flags & CE_ATTR_DISABLE_INTR); 2214 pipe_info->num_sends_allowed = attr.src_nentries - 1; 2215 } 2216 if (attr.dest_nentries) { 2217 /* pipe used to receive from target */ 2218 ce_recv_cb_register(pipe_info->ce_hdl, 2219 hif_pci_ce_recv_data, pipe_info, 2220 attr.flags & CE_ATTR_DISABLE_INTR); 2221 } 2222 2223 if (attr.src_nentries) 2224 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 2225 2226 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 2227 sizeof(pipe_info->pipe_callbacks)); 2228 } 2229 2230 A_TARGET_ACCESS_UNLIKELY(scn); 2231 return 0; 2232 } 2233 2234 /* 2235 * Install pending msg callbacks. 2236 * 2237 * TBDXXX: This hack is needed because upper layers install msg callbacks 2238 * for use with HTC before BMI is done; yet this HIF implementation 2239 * needs to continue to use BMI msg callbacks. Really, upper layers 2240 * should not register HTC callbacks until AFTER BMI phase. 2241 */ 2242 static void hif_msg_callbacks_install(struct hif_softc *scn) 2243 { 2244 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2245 2246 qdf_mem_copy(&hif_state->msg_callbacks_current, 2247 &hif_state->msg_callbacks_pending, 2248 sizeof(hif_state->msg_callbacks_pending)); 2249 } 2250 2251 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 2252 uint8_t *DLPipe) 2253 { 2254 int ul_is_polled, dl_is_polled; 2255 2256 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 2257 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 2258 } 2259 2260 /** 2261 * hif_dump_pipe_debug_count() - Log error count 2262 * @scn: hif_softc pointer. 2263 * 2264 * Output the pipe error counts of each pipe to log file 2265 * 2266 * Return: N/A 2267 */ 2268 void hif_dump_pipe_debug_count(struct hif_softc *scn) 2269 { 2270 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2271 int pipe_num; 2272 2273 if (hif_state == NULL) { 2274 HIF_ERROR("%s hif_state is NULL", __func__); 2275 return; 2276 } 2277 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2278 struct HIF_CE_pipe_info *pipe_info; 2279 2280 pipe_info = &hif_state->pipe_info[pipe_num]; 2281 2282 if (pipe_info->nbuf_alloc_err_count > 0 || 2283 pipe_info->nbuf_dma_err_count > 0 || 2284 pipe_info->nbuf_ce_enqueue_err_count) 2285 HIF_ERROR( 2286 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 2287 __func__, pipe_info->pipe_num, 2288 atomic_read(&pipe_info->recv_bufs_needed), 2289 pipe_info->nbuf_alloc_err_count, 2290 pipe_info->nbuf_dma_err_count, 2291 pipe_info->nbuf_ce_enqueue_err_count); 2292 } 2293 } 2294 2295 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 2296 void *nbuf, uint32_t *error_cnt, 2297 enum hif_ce_event_type failure_type, 2298 const char *failure_type_string) 2299 { 2300 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 2301 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 2302 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2303 int ce_id = CE_state->id; 2304 uint32_t error_cnt_tmp; 2305 2306 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2307 error_cnt_tmp = ++(*error_cnt); 2308 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2309 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", 2310 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 2311 failure_type_string); 2312 hif_record_ce_desc_event(scn, ce_id, failure_type, 2313 NULL, nbuf, bufs_needed_tmp, 0); 2314 /* if we fail to allocate the last buffer for an rx pipe, 2315 * there is no trigger to refill the ce and we will 2316 * eventually crash 2317 */ 2318 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) 2319 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 2320 2321 } 2322 2323 2324 2325 2326 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 2327 { 2328 struct CE_handle *ce_hdl; 2329 qdf_size_t buf_sz; 2330 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2331 QDF_STATUS status; 2332 uint32_t bufs_posted = 0; 2333 2334 buf_sz = pipe_info->buf_sz; 2335 if (buf_sz == 0) { 2336 /* Unused Copy Engine */ 2337 return QDF_STATUS_SUCCESS; 2338 } 2339 2340 ce_hdl = pipe_info->ce_hdl; 2341 2342 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2343 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 2344 qdf_dma_addr_t CE_data; /* CE space buffer address */ 2345 qdf_nbuf_t nbuf; 2346 2347 atomic_dec(&pipe_info->recv_bufs_needed); 2348 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2349 2350 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 2351 if (!nbuf) { 2352 hif_post_recv_buffers_failure(pipe_info, nbuf, 2353 &pipe_info->nbuf_alloc_err_count, 2354 HIF_RX_NBUF_ALLOC_FAILURE, 2355 "HIF_RX_NBUF_ALLOC_FAILURE"); 2356 return QDF_STATUS_E_NOMEM; 2357 } 2358 2359 /* 2360 * qdf_nbuf_peek_header(nbuf, &data, &unused); 2361 * CE_data = dma_map_single(dev, data, buf_sz, ); 2362 * DMA_FROM_DEVICE); 2363 */ 2364 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 2365 QDF_DMA_FROM_DEVICE); 2366 2367 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2368 hif_post_recv_buffers_failure(pipe_info, nbuf, 2369 &pipe_info->nbuf_dma_err_count, 2370 HIF_RX_NBUF_MAP_FAILURE, 2371 "HIF_RX_NBUF_MAP_FAILURE"); 2372 qdf_nbuf_free(nbuf); 2373 return status; 2374 } 2375 2376 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 2377 2378 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 2379 buf_sz, DMA_FROM_DEVICE); 2380 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 2381 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2382 hif_post_recv_buffers_failure(pipe_info, nbuf, 2383 &pipe_info->nbuf_ce_enqueue_err_count, 2384 HIF_RX_NBUF_ENQUEUE_FAILURE, 2385 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 2386 2387 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 2388 QDF_DMA_FROM_DEVICE); 2389 qdf_nbuf_free(nbuf); 2390 return status; 2391 } 2392 2393 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2394 bufs_posted++; 2395 } 2396 pipe_info->nbuf_alloc_err_count = 2397 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 2398 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 2399 pipe_info->nbuf_dma_err_count = 2400 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 2401 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 2402 pipe_info->nbuf_ce_enqueue_err_count = 2403 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 2404 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 2405 2406 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2407 2408 return QDF_STATUS_SUCCESS; 2409 } 2410 2411 /* 2412 * Try to post all desired receive buffers for all pipes. 2413 * Returns 0 for non fastpath rx copy engine as 2414 * oom_allocation_work will be scheduled to recover any 2415 * failures, non-zero if unable to completely replenish 2416 * receive buffers for fastpath rx Copy engine. 2417 */ 2418 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 2419 { 2420 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2421 int pipe_num; 2422 struct CE_state *ce_state = NULL; 2423 QDF_STATUS qdf_status; 2424 2425 A_TARGET_ACCESS_LIKELY(scn); 2426 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2427 struct HIF_CE_pipe_info *pipe_info; 2428 2429 ce_state = scn->ce_id_to_state[pipe_num]; 2430 pipe_info = &hif_state->pipe_info[pipe_num]; 2431 2432 if (hif_is_nss_wifi_enabled(scn) && 2433 ce_state && (ce_state->htt_rx_data)) 2434 continue; 2435 2436 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 2437 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 2438 ce_state->htt_rx_data && 2439 scn->fastpath_mode_on) { 2440 A_TARGET_ACCESS_UNLIKELY(scn); 2441 return qdf_status; 2442 } 2443 } 2444 2445 A_TARGET_ACCESS_UNLIKELY(scn); 2446 2447 return QDF_STATUS_SUCCESS; 2448 } 2449 2450 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 2451 { 2452 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2453 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2454 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2455 2456 hif_update_fastpath_recv_bufs_cnt(scn); 2457 2458 hif_msg_callbacks_install(scn); 2459 2460 if (hif_completion_thread_startup(hif_state)) 2461 return QDF_STATUS_E_FAILURE; 2462 2463 /* enable buffer cleanup */ 2464 hif_state->started = true; 2465 2466 /* Post buffers once to start things off. */ 2467 qdf_status = hif_post_recv_buffers(scn); 2468 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2469 /* cleanup is done in hif_ce_disable */ 2470 HIF_ERROR("%s:failed to post buffers", __func__); 2471 return qdf_status; 2472 } 2473 2474 return qdf_status; 2475 } 2476 2477 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2478 { 2479 struct hif_softc *scn; 2480 struct CE_handle *ce_hdl; 2481 uint32_t buf_sz; 2482 struct HIF_CE_state *hif_state; 2483 qdf_nbuf_t netbuf; 2484 qdf_dma_addr_t CE_data; 2485 void *per_CE_context; 2486 2487 buf_sz = pipe_info->buf_sz; 2488 /* Unused Copy Engine */ 2489 if (buf_sz == 0) 2490 return; 2491 2492 2493 hif_state = pipe_info->HIF_CE_state; 2494 if (!hif_state->started) 2495 return; 2496 2497 scn = HIF_GET_SOFTC(hif_state); 2498 ce_hdl = pipe_info->ce_hdl; 2499 2500 if (scn->qdf_dev == NULL) 2501 return; 2502 while (ce_revoke_recv_next 2503 (ce_hdl, &per_CE_context, (void **)&netbuf, 2504 &CE_data) == QDF_STATUS_SUCCESS) { 2505 if (netbuf) { 2506 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 2507 QDF_DMA_FROM_DEVICE); 2508 qdf_nbuf_free(netbuf); 2509 } 2510 } 2511 } 2512 2513 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2514 { 2515 struct CE_handle *ce_hdl; 2516 struct HIF_CE_state *hif_state; 2517 struct hif_softc *scn; 2518 qdf_nbuf_t netbuf; 2519 void *per_CE_context; 2520 qdf_dma_addr_t CE_data; 2521 unsigned int nbytes; 2522 unsigned int id; 2523 uint32_t buf_sz; 2524 uint32_t toeplitz_hash_result; 2525 2526 buf_sz = pipe_info->buf_sz; 2527 if (buf_sz == 0) { 2528 /* Unused Copy Engine */ 2529 return; 2530 } 2531 2532 hif_state = pipe_info->HIF_CE_state; 2533 if (!hif_state->started) { 2534 return; 2535 } 2536 2537 scn = HIF_GET_SOFTC(hif_state); 2538 2539 ce_hdl = pipe_info->ce_hdl; 2540 2541 while (ce_cancel_send_next 2542 (ce_hdl, &per_CE_context, 2543 (void **)&netbuf, &CE_data, &nbytes, 2544 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 2545 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 2546 /* 2547 * Packets enqueued by htt_h2t_ver_req_msg() and 2548 * htt_h2t_rx_ring_cfg_msg_ll() have already been 2549 * freed in htt_htc_misc_pkt_pool_free() in 2550 * wlantl_close(), so do not free them here again 2551 * by checking whether it's the endpoint 2552 * which they are queued in. 2553 */ 2554 if (id == scn->htc_htt_tx_endpoint) 2555 return; 2556 /* Indicate the completion to higher 2557 * layer to free the buffer 2558 */ 2559 if (pipe_info->pipe_callbacks.txCompletionHandler) 2560 pipe_info->pipe_callbacks. 2561 txCompletionHandler(pipe_info-> 2562 pipe_callbacks.Context, 2563 netbuf, id, toeplitz_hash_result); 2564 } 2565 } 2566 } 2567 2568 /* 2569 * Cleanup residual buffers for device shutdown: 2570 * buffers that were enqueued for receive 2571 * buffers that were to be sent 2572 * Note: Buffers that had completed but which were 2573 * not yet processed are on a completion queue. They 2574 * are handled when the completion thread shuts down. 2575 */ 2576 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 2577 { 2578 int pipe_num; 2579 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2580 struct CE_state *ce_state; 2581 2582 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2583 struct HIF_CE_pipe_info *pipe_info; 2584 2585 ce_state = scn->ce_id_to_state[pipe_num]; 2586 if (hif_is_nss_wifi_enabled(scn) && ce_state && 2587 ((ce_state->htt_tx_data) || 2588 (ce_state->htt_rx_data))) { 2589 continue; 2590 } 2591 2592 pipe_info = &hif_state->pipe_info[pipe_num]; 2593 hif_recv_buffer_cleanup_on_pipe(pipe_info); 2594 hif_send_buffer_cleanup_on_pipe(pipe_info); 2595 } 2596 } 2597 2598 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 2599 { 2600 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2601 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2602 2603 hif_buffer_cleanup(hif_state); 2604 } 2605 2606 static void hif_destroy_oom_work(struct hif_softc *scn) 2607 { 2608 struct CE_state *ce_state; 2609 int ce_id; 2610 2611 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2612 ce_state = scn->ce_id_to_state[ce_id]; 2613 if (ce_state) 2614 qdf_destroy_work(scn->qdf_dev, 2615 &ce_state->oom_allocation_work); 2616 } 2617 } 2618 2619 void hif_ce_stop(struct hif_softc *scn) 2620 { 2621 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2622 int pipe_num; 2623 2624 /* 2625 * before cleaning up any memory, ensure irq & 2626 * bottom half contexts will not be re-entered 2627 */ 2628 hif_disable_isr(&scn->osc); 2629 hif_destroy_oom_work(scn); 2630 scn->hif_init_done = false; 2631 2632 /* 2633 * At this point, asynchronous threads are stopped, 2634 * The Target should not DMA nor interrupt, Host code may 2635 * not initiate anything more. So we just need to clean 2636 * up Host-side state. 2637 */ 2638 2639 if (scn->athdiag_procfs_inited) { 2640 athdiag_procfs_remove(); 2641 scn->athdiag_procfs_inited = false; 2642 } 2643 2644 hif_buffer_cleanup(hif_state); 2645 2646 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2647 struct HIF_CE_pipe_info *pipe_info; 2648 struct CE_attr attr; 2649 struct CE_handle *ce_diag = hif_state->ce_diag; 2650 2651 pipe_info = &hif_state->pipe_info[pipe_num]; 2652 if (pipe_info->ce_hdl) { 2653 if (pipe_info->ce_hdl != ce_diag) { 2654 attr = hif_state->host_ce_config[pipe_num]; 2655 if (attr.src_nentries) 2656 qdf_spinlock_destroy(&pipe_info-> 2657 completion_freeq_lock); 2658 } 2659 ce_fini(pipe_info->ce_hdl); 2660 pipe_info->ce_hdl = NULL; 2661 pipe_info->buf_sz = 0; 2662 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2663 } 2664 } 2665 2666 if (hif_state->sleep_timer_init) { 2667 qdf_timer_stop(&hif_state->sleep_timer); 2668 qdf_timer_free(&hif_state->sleep_timer); 2669 hif_state->sleep_timer_init = false; 2670 } 2671 2672 hif_state->started = false; 2673 } 2674 2675 #ifdef QCN7605_SUPPORT 2676 static inline 2677 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg 2678 **target_shadow_reg_cfg_ret, 2679 uint32_t *shadow_cfg_sz_ret) 2680 { 2681 if (target_shadow_reg_cfg_ret) 2682 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605; 2683 if (shadow_cfg_sz_ret) 2684 *shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605); 2685 } 2686 #else 2687 static inline 2688 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg 2689 **target_shadow_reg_cfg_ret, 2690 uint32_t *shadow_cfg_sz_ret) 2691 { 2692 HIF_ERROR("QCN7605 not supported"); 2693 } 2694 #endif 2695 2696 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 2697 struct shadow_reg_cfg 2698 **target_shadow_reg_cfg_ret, 2699 uint32_t *shadow_cfg_sz_ret) 2700 { 2701 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 2702 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 2703 2704 switch (tgt_info->target_type) { 2705 case TARGET_TYPE_QCN7605: 2706 hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret, 2707 shadow_cfg_sz_ret); 2708 break; 2709 default: 2710 if (target_shadow_reg_cfg_ret) 2711 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 2712 if (shadow_cfg_sz_ret) 2713 *shadow_cfg_sz_ret = shadow_cfg_sz; 2714 } 2715 } 2716 2717 /** 2718 * hif_get_target_ce_config() - get copy engine configuration 2719 * @target_ce_config_ret: basic copy engine configuration 2720 * @target_ce_config_sz_ret: size of the basic configuration in bytes 2721 * @target_service_to_ce_map_ret: service mapping for the copy engines 2722 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 2723 * @target_shadow_reg_cfg_ret: shadow register configuration 2724 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 2725 * 2726 * providing accessor to these values outside of this file. 2727 * currently these are stored in static pointers to const sections. 2728 * there are multiple configurations that are selected from at compile time. 2729 * Runtime selection would need to consider mode, target type and bus type. 2730 * 2731 * Return: return by parameter. 2732 */ 2733 void hif_get_target_ce_config(struct hif_softc *scn, 2734 struct CE_pipe_config **target_ce_config_ret, 2735 uint32_t *target_ce_config_sz_ret, 2736 struct service_to_pipe **target_service_to_ce_map_ret, 2737 uint32_t *target_service_to_ce_map_sz_ret, 2738 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 2739 uint32_t *shadow_cfg_sz_ret) 2740 { 2741 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2742 2743 *target_ce_config_ret = hif_state->target_ce_config; 2744 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 2745 2746 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 2747 target_service_to_ce_map_sz_ret); 2748 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 2749 shadow_cfg_sz_ret); 2750 } 2751 2752 #ifdef CONFIG_SHADOW_V2 2753 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2754 { 2755 int i; 2756 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2757 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); 2758 2759 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 2760 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2761 "%s: i %d, val %x", __func__, i, 2762 cfg->shadow_reg_v2_cfg[i].addr); 2763 } 2764 } 2765 2766 #else 2767 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2768 { 2769 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2770 "%s: CONFIG_SHADOW_V2 not defined", __func__); 2771 } 2772 #endif 2773 2774 /** 2775 * hif_wlan_enable(): call the platform driver to enable wlan 2776 * @scn: HIF Context 2777 * 2778 * This function passes the con_mode and CE configuration to 2779 * platform driver to enable wlan. 2780 * 2781 * Return: linux error code 2782 */ 2783 int hif_wlan_enable(struct hif_softc *scn) 2784 { 2785 struct pld_wlan_enable_cfg cfg; 2786 enum pld_driver_mode mode; 2787 uint32_t con_mode = hif_get_conparam(scn); 2788 2789 hif_get_target_ce_config(scn, 2790 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 2791 &cfg.num_ce_tgt_cfg, 2792 (struct service_to_pipe **)&cfg.ce_svc_cfg, 2793 &cfg.num_ce_svc_pipe_cfg, 2794 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 2795 &cfg.num_shadow_reg_cfg); 2796 2797 /* translate from structure size to array size */ 2798 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 2799 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 2800 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 2801 2802 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, 2803 &cfg.num_shadow_reg_v2_cfg); 2804 2805 hif_print_hal_shadow_register_cfg(&cfg); 2806 2807 if (QDF_GLOBAL_FTM_MODE == con_mode) 2808 mode = PLD_FTM; 2809 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 2810 mode = PLD_COLDBOOT_CALIBRATION; 2811 else if (QDF_IS_EPPING_ENABLED(con_mode)) 2812 mode = PLD_EPPING; 2813 else 2814 mode = PLD_MISSION; 2815 2816 if (BYPASS_QMI) 2817 return 0; 2818 else 2819 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, 2820 mode, QWLAN_VERSIONSTR); 2821 } 2822 2823 #ifdef WLAN_FEATURE_EPPING 2824 2825 #define CE_EPPING_USES_IRQ true 2826 2827 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) 2828 { 2829 if (CE_EPPING_USES_IRQ) 2830 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 2831 else 2832 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 2833 hif_state->target_ce_config = target_ce_config_wlan_epping; 2834 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 2835 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 2836 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 2837 } 2838 #endif 2839 2840 #ifdef QCN7605_SUPPORT 2841 static inline 2842 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 2843 struct HIF_CE_state *hif_state) 2844 { 2845 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 2846 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 2847 hif_state->target_ce_config_sz = 2848 sizeof(target_ce_config_wlan_qcn7605); 2849 scn->ce_count = QCN7605_CE_COUNT; 2850 } 2851 #else 2852 static inline 2853 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 2854 struct HIF_CE_state *hif_state) 2855 { 2856 HIF_ERROR("QCN7605 not supported"); 2857 } 2858 #endif 2859 2860 #ifdef CE_SVC_CMN_INIT 2861 #ifdef QCA_WIFI_SUPPORT_SRNG 2862 static inline void hif_ce_service_init(void) 2863 { 2864 ce_service_srng_init(); 2865 } 2866 #else 2867 static inline void hif_ce_service_init(void) 2868 { 2869 ce_service_legacy_init(); 2870 } 2871 #endif 2872 #else 2873 static inline void hif_ce_service_init(void) 2874 { 2875 } 2876 #endif 2877 2878 2879 /** 2880 * hif_ce_prepare_config() - load the correct static tables. 2881 * @scn: hif context 2882 * 2883 * Epping uses different static attribute tables than mission mode. 2884 */ 2885 void hif_ce_prepare_config(struct hif_softc *scn) 2886 { 2887 uint32_t mode = hif_get_conparam(scn); 2888 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 2889 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 2890 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2891 2892 hif_ce_service_init(); 2893 hif_state->ce_services = ce_services_attach(scn); 2894 2895 scn->ce_count = HOST_CE_COUNT; 2896 /* if epping is enabled we need to use the epping configuration. */ 2897 if (QDF_IS_EPPING_ENABLED(mode)) { 2898 hif_ce_prepare_epping_config(hif_state); 2899 } 2900 2901 switch (tgt_info->target_type) { 2902 default: 2903 hif_state->host_ce_config = host_ce_config_wlan; 2904 hif_state->target_ce_config = target_ce_config_wlan; 2905 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 2906 break; 2907 case TARGET_TYPE_QCN7605: 2908 hif_set_ce_config_qcn7605(scn, hif_state); 2909 break; 2910 case TARGET_TYPE_AR900B: 2911 case TARGET_TYPE_QCA9984: 2912 case TARGET_TYPE_IPQ4019: 2913 case TARGET_TYPE_QCA9888: 2914 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 2915 hif_state->host_ce_config = 2916 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 2917 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 2918 hif_state->host_ce_config = 2919 host_lowdesc_ce_cfg_wlan_ar900b; 2920 } else { 2921 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 2922 } 2923 2924 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 2925 hif_state->target_ce_config_sz = 2926 sizeof(target_ce_config_wlan_ar900b); 2927 2928 break; 2929 2930 case TARGET_TYPE_AR9888: 2931 case TARGET_TYPE_AR9888V2: 2932 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 2933 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 2934 } else { 2935 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 2936 } 2937 2938 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 2939 hif_state->target_ce_config_sz = 2940 sizeof(target_ce_config_wlan_ar9888); 2941 2942 break; 2943 2944 case TARGET_TYPE_QCA8074: 2945 case TARGET_TYPE_QCA8074V2: 2946 case TARGET_TYPE_QCA6018: 2947 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 2948 hif_state->host_ce_config = 2949 host_ce_config_wlan_qca8074_pci; 2950 hif_state->target_ce_config = 2951 target_ce_config_wlan_qca8074_pci; 2952 hif_state->target_ce_config_sz = 2953 sizeof(target_ce_config_wlan_qca8074_pci); 2954 } else { 2955 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 2956 hif_state->target_ce_config = 2957 target_ce_config_wlan_qca8074; 2958 hif_state->target_ce_config_sz = 2959 sizeof(target_ce_config_wlan_qca8074); 2960 } 2961 break; 2962 case TARGET_TYPE_QCA6290: 2963 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 2964 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 2965 hif_state->target_ce_config_sz = 2966 sizeof(target_ce_config_wlan_qca6290); 2967 2968 scn->ce_count = QCA_6290_CE_COUNT; 2969 break; 2970 case TARGET_TYPE_QCA6390: 2971 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 2972 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 2973 hif_state->target_ce_config_sz = 2974 sizeof(target_ce_config_wlan_qca6390); 2975 2976 scn->ce_count = QCA_6390_CE_COUNT; 2977 break; 2978 } 2979 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 2980 } 2981 2982 /** 2983 * hif_ce_open() - do ce specific allocations 2984 * @hif_sc: pointer to hif context 2985 * 2986 * return: 0 for success or QDF_STATUS_E_NOMEM 2987 */ 2988 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 2989 { 2990 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 2991 2992 qdf_spinlock_create(&hif_state->irq_reg_lock); 2993 qdf_spinlock_create(&hif_state->keep_awake_lock); 2994 return QDF_STATUS_SUCCESS; 2995 } 2996 2997 /** 2998 * hif_ce_close() - do ce specific free 2999 * @hif_sc: pointer to hif context 3000 */ 3001 void hif_ce_close(struct hif_softc *hif_sc) 3002 { 3003 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3004 3005 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 3006 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 3007 } 3008 3009 /** 3010 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 3011 * @hif_sc: hif context 3012 * 3013 * uses state variables to support cleaning up when hif_config_ce fails. 3014 */ 3015 void hif_unconfig_ce(struct hif_softc *hif_sc) 3016 { 3017 int pipe_num; 3018 struct HIF_CE_pipe_info *pipe_info; 3019 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3020 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 3021 3022 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3023 pipe_info = &hif_state->pipe_info[pipe_num]; 3024 if (pipe_info->ce_hdl) { 3025 ce_unregister_irq(hif_state, (1 << pipe_num)); 3026 } 3027 } 3028 deinit_tasklet_workers(hif_hdl); 3029 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3030 pipe_info = &hif_state->pipe_info[pipe_num]; 3031 if (pipe_info->ce_hdl) { 3032 ce_fini(pipe_info->ce_hdl); 3033 pipe_info->ce_hdl = NULL; 3034 pipe_info->buf_sz = 0; 3035 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 3036 } 3037 } 3038 if (hif_sc->athdiag_procfs_inited) { 3039 athdiag_procfs_remove(); 3040 hif_sc->athdiag_procfs_inited = false; 3041 } 3042 } 3043 3044 #ifdef CONFIG_BYPASS_QMI 3045 #ifdef QCN7605_SUPPORT 3046 /** 3047 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 3048 * @scn: pointer to HIF structure 3049 * 3050 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 3051 * 3052 * Return: void 3053 */ 3054 static void hif_post_static_buf_to_target(struct hif_softc *scn) 3055 { 3056 void *target_va; 3057 phys_addr_t target_pa; 3058 struct ce_info *ce_info_ptr; 3059 uint32_t msi_data_start; 3060 uint32_t msi_data_count; 3061 uint32_t msi_irq_start; 3062 uint32_t i = 0; 3063 int ret; 3064 3065 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, 3066 scn->qdf_dev->dev, 3067 FW_SHARED_MEM + 3068 sizeof(struct ce_info), 3069 &target_pa); 3070 if (!target_va) 3071 return; 3072 3073 ce_info_ptr = (struct ce_info *)target_va; 3074 3075 if (scn->vaddr_rri_on_ddr) { 3076 ce_info_ptr->rri_over_ddr_low_paddr = 3077 BITS0_TO_31(scn->paddr_rri_on_ddr); 3078 ce_info_ptr->rri_over_ddr_high_paddr = 3079 BITS32_TO_35(scn->paddr_rri_on_ddr); 3080 } 3081 3082 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3083 &msi_data_count, &msi_data_start, 3084 &msi_irq_start); 3085 if (ret) { 3086 hif_err("Failed to get CE msi config"); 3087 return; 3088 } 3089 3090 for (i = 0; i < CE_COUNT_MAX; i++) { 3091 ce_info_ptr->cfg[i].ce_id = i; 3092 ce_info_ptr->cfg[i].msi_vector = 3093 (i % msi_data_count) + msi_irq_start; 3094 } 3095 3096 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3097 hif_info("target va %pK target pa %pa", target_va, &target_pa); 3098 } 3099 #else 3100 /** 3101 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 3102 * @scn: pointer to HIF structure 3103 * 3104 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 3105 * 3106 * Return: void 3107 */ 3108 static void hif_post_static_buf_to_target(struct hif_softc *scn) 3109 { 3110 void *target_va; 3111 phys_addr_t target_pa; 3112 3113 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 3114 FW_SHARED_MEM, &target_pa); 3115 if (NULL == target_va) { 3116 HIF_TRACE("Memory allocation failed could not post target buf"); 3117 return; 3118 } 3119 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3120 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa); 3121 } 3122 #endif 3123 3124 #else 3125 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 3126 { 3127 } 3128 #endif 3129 3130 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 3131 bool wait_for_it) 3132 { 3133 /* todo */ 3134 return 0; 3135 } 3136 3137 /** 3138 * hif_config_ce() - configure copy engines 3139 * @scn: hif context 3140 * 3141 * Prepares fw, copy engine hardware and host sw according 3142 * to the attributes selected by hif_ce_prepare_config. 3143 * 3144 * also calls athdiag_procfs_init 3145 * 3146 * return: 0 for success nonzero for failure. 3147 */ 3148 int hif_config_ce(struct hif_softc *scn) 3149 { 3150 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3151 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3152 struct HIF_CE_pipe_info *pipe_info; 3153 int pipe_num; 3154 struct CE_state *ce_state = NULL; 3155 3156 #ifdef ADRASTEA_SHADOW_REGISTERS 3157 int i; 3158 #endif 3159 QDF_STATUS rv = QDF_STATUS_SUCCESS; 3160 3161 scn->notice_send = true; 3162 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 3163 3164 hif_post_static_buf_to_target(scn); 3165 3166 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 3167 3168 hif_config_rri_on_ddr(scn); 3169 3170 if (ce_srng_based(scn)) 3171 scn->bus_ops.hif_target_sleep_state_adjust = 3172 &hif_srng_sleep_state_adjust; 3173 3174 /* Initialise the CE debug history sysfs interface inputs ce_id and 3175 * index. Disable data storing 3176 */ 3177 reset_ce_debug_history(scn); 3178 3179 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3180 struct CE_attr *attr; 3181 3182 pipe_info = &hif_state->pipe_info[pipe_num]; 3183 pipe_info->pipe_num = pipe_num; 3184 pipe_info->HIF_CE_state = hif_state; 3185 attr = &hif_state->host_ce_config[pipe_num]; 3186 3187 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 3188 ce_state = scn->ce_id_to_state[pipe_num]; 3189 if (!ce_state) { 3190 A_TARGET_ACCESS_UNLIKELY(scn); 3191 goto err; 3192 } 3193 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 3194 QDF_ASSERT(pipe_info->ce_hdl != NULL); 3195 if (pipe_info->ce_hdl == NULL) { 3196 rv = QDF_STATUS_E_FAILURE; 3197 A_TARGET_ACCESS_UNLIKELY(scn); 3198 goto err; 3199 } 3200 3201 ce_state->lro_data = qdf_lro_init(); 3202 3203 if (attr->flags & CE_ATTR_DIAG) { 3204 /* Reserve the ultimate CE for 3205 * Diagnostic Window support 3206 */ 3207 hif_state->ce_diag = pipe_info->ce_hdl; 3208 continue; 3209 } 3210 3211 if (hif_is_nss_wifi_enabled(scn) && ce_state && 3212 (ce_state->htt_rx_data)) 3213 continue; 3214 3215 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); 3216 if (attr->dest_nentries > 0) { 3217 atomic_set(&pipe_info->recv_bufs_needed, 3218 init_buffer_count(attr->dest_nentries - 1)); 3219 /*SRNG based CE has one entry less */ 3220 if (ce_srng_based(scn)) 3221 atomic_dec(&pipe_info->recv_bufs_needed); 3222 } else { 3223 atomic_set(&pipe_info->recv_bufs_needed, 0); 3224 } 3225 ce_tasklet_init(hif_state, (1 << pipe_num)); 3226 ce_register_irq(hif_state, (1 << pipe_num)); 3227 } 3228 3229 if (athdiag_procfs_init(scn) != 0) { 3230 A_TARGET_ACCESS_UNLIKELY(scn); 3231 goto err; 3232 } 3233 scn->athdiag_procfs_inited = true; 3234 3235 HIF_DBG("%s: ce_init done", __func__); 3236 3237 init_tasklet_workers(hif_hdl); 3238 3239 HIF_DBG("%s: X, ret = %d", __func__, rv); 3240 3241 #ifdef ADRASTEA_SHADOW_REGISTERS 3242 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); 3243 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 3244 HIF_DBG("%s Shadow Register%d is mapped to address %x", 3245 __func__, i, 3246 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 3247 } 3248 #endif 3249 3250 return rv != QDF_STATUS_SUCCESS; 3251 3252 err: 3253 /* Failure, so clean up */ 3254 hif_unconfig_ce(scn); 3255 HIF_TRACE("%s: X, ret = %d", __func__, rv); 3256 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 3257 } 3258 3259 #ifdef IPA_OFFLOAD 3260 /** 3261 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 3262 * @scn: bus context 3263 * @ce_sr_base_paddr: copyengine source ring base physical address 3264 * @ce_sr_ring_size: copyengine source ring size 3265 * @ce_reg_paddr: copyengine register physical address 3266 * 3267 * IPA micro controller data path offload feature enabled, 3268 * HIF should release copy engine related resource information to IPA UC 3269 * IPA UC will access hardware resource with released information 3270 * 3271 * Return: None 3272 */ 3273 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 3274 qdf_shared_mem_t **ce_sr, 3275 uint32_t *ce_sr_ring_size, 3276 qdf_dma_addr_t *ce_reg_paddr) 3277 { 3278 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3279 struct HIF_CE_pipe_info *pipe_info = 3280 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 3281 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3282 3283 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 3284 ce_reg_paddr); 3285 } 3286 #endif /* IPA_OFFLOAD */ 3287 3288 3289 #ifdef ADRASTEA_SHADOW_REGISTERS 3290 3291 /* 3292 * Current shadow register config 3293 * 3294 * ----------------------------------------------------------- 3295 * Shadow Register | CE | src/dst write index 3296 * ----------------------------------------------------------- 3297 * 0 | 0 | src 3298 * 1 No Config - Doesn't point to anything 3299 * 2 No Config - Doesn't point to anything 3300 * 3 | 3 | src 3301 * 4 | 4 | src 3302 * 5 | 5 | src 3303 * 6 No Config - Doesn't point to anything 3304 * 7 | 7 | src 3305 * 8 No Config - Doesn't point to anything 3306 * 9 No Config - Doesn't point to anything 3307 * 10 No Config - Doesn't point to anything 3308 * 11 No Config - Doesn't point to anything 3309 * ----------------------------------------------------------- 3310 * 12 No Config - Doesn't point to anything 3311 * 13 | 1 | dst 3312 * 14 | 2 | dst 3313 * 15 No Config - Doesn't point to anything 3314 * 16 No Config - Doesn't point to anything 3315 * 17 No Config - Doesn't point to anything 3316 * 18 No Config - Doesn't point to anything 3317 * 19 | 7 | dst 3318 * 20 | 8 | dst 3319 * 21 No Config - Doesn't point to anything 3320 * 22 No Config - Doesn't point to anything 3321 * 23 No Config - Doesn't point to anything 3322 * ----------------------------------------------------------- 3323 * 3324 * 3325 * ToDo - Move shadow register config to following in the future 3326 * This helps free up a block of shadow registers towards the end. 3327 * Can be used for other purposes 3328 * 3329 * ----------------------------------------------------------- 3330 * Shadow Register | CE | src/dst write index 3331 * ----------------------------------------------------------- 3332 * 0 | 0 | src 3333 * 1 | 3 | src 3334 * 2 | 4 | src 3335 * 3 | 5 | src 3336 * 4 | 7 | src 3337 * ----------------------------------------------------------- 3338 * 5 | 1 | dst 3339 * 6 | 2 | dst 3340 * 7 | 7 | dst 3341 * 8 | 8 | dst 3342 * ----------------------------------------------------------- 3343 * 9 No Config - Doesn't point to anything 3344 * 12 No Config - Doesn't point to anything 3345 * 13 No Config - Doesn't point to anything 3346 * 14 No Config - Doesn't point to anything 3347 * 15 No Config - Doesn't point to anything 3348 * 16 No Config - Doesn't point to anything 3349 * 17 No Config - Doesn't point to anything 3350 * 18 No Config - Doesn't point to anything 3351 * 19 No Config - Doesn't point to anything 3352 * 20 No Config - Doesn't point to anything 3353 * 21 No Config - Doesn't point to anything 3354 * 22 No Config - Doesn't point to anything 3355 * 23 No Config - Doesn't point to anything 3356 * ----------------------------------------------------------- 3357 */ 3358 #ifndef QCN7605_SUPPORT 3359 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3360 { 3361 u32 addr = 0; 3362 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3363 3364 switch (ce) { 3365 case 0: 3366 addr = SHADOW_VALUE0; 3367 break; 3368 case 3: 3369 addr = SHADOW_VALUE3; 3370 break; 3371 case 4: 3372 addr = SHADOW_VALUE4; 3373 break; 3374 case 5: 3375 addr = SHADOW_VALUE5; 3376 break; 3377 case 7: 3378 addr = SHADOW_VALUE7; 3379 break; 3380 default: 3381 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3382 QDF_ASSERT(0); 3383 } 3384 return addr; 3385 3386 } 3387 3388 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3389 { 3390 u32 addr = 0; 3391 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3392 3393 switch (ce) { 3394 case 1: 3395 addr = SHADOW_VALUE13; 3396 break; 3397 case 2: 3398 addr = SHADOW_VALUE14; 3399 break; 3400 case 5: 3401 addr = SHADOW_VALUE17; 3402 break; 3403 case 7: 3404 addr = SHADOW_VALUE19; 3405 break; 3406 case 8: 3407 addr = SHADOW_VALUE20; 3408 break; 3409 case 9: 3410 addr = SHADOW_VALUE21; 3411 break; 3412 case 10: 3413 addr = SHADOW_VALUE22; 3414 break; 3415 case 11: 3416 addr = SHADOW_VALUE23; 3417 break; 3418 default: 3419 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3420 QDF_ASSERT(0); 3421 } 3422 3423 return addr; 3424 3425 } 3426 #else 3427 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3428 { 3429 u32 addr = 0; 3430 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3431 3432 switch (ce) { 3433 case 0: 3434 addr = SHADOW_VALUE0; 3435 break; 3436 case 4: 3437 addr = SHADOW_VALUE4; 3438 break; 3439 case 5: 3440 addr = SHADOW_VALUE5; 3441 break; 3442 default: 3443 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3444 QDF_ASSERT(0); 3445 } 3446 return addr; 3447 } 3448 3449 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3450 { 3451 u32 addr = 0; 3452 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3453 3454 switch (ce) { 3455 case 1: 3456 addr = SHADOW_VALUE13; 3457 break; 3458 case 2: 3459 addr = SHADOW_VALUE14; 3460 break; 3461 case 3: 3462 addr = SHADOW_VALUE15; 3463 break; 3464 case 5: 3465 addr = SHADOW_VALUE17; 3466 break; 3467 case 7: 3468 addr = SHADOW_VALUE19; 3469 break; 3470 case 8: 3471 addr = SHADOW_VALUE20; 3472 break; 3473 case 9: 3474 addr = SHADOW_VALUE21; 3475 break; 3476 case 10: 3477 addr = SHADOW_VALUE22; 3478 break; 3479 case 11: 3480 addr = SHADOW_VALUE23; 3481 break; 3482 default: 3483 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3484 QDF_ASSERT(0); 3485 } 3486 3487 return addr; 3488 } 3489 #endif 3490 #endif 3491 3492 #if defined(FEATURE_LRO) 3493 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 3494 { 3495 struct CE_state *ce_state; 3496 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3497 3498 ce_state = scn->ce_id_to_state[ctx_id]; 3499 3500 return ce_state->lro_data; 3501 } 3502 #endif 3503 3504 /** 3505 * hif_map_service_to_pipe() - returns the ce ids pertaining to 3506 * this service 3507 * @scn: hif_softc pointer. 3508 * @svc_id: Service ID for which the mapping is needed. 3509 * @ul_pipe: address of the container in which ul pipe is returned. 3510 * @dl_pipe: address of the container in which dl pipe is returned. 3511 * @ul_is_polled: address of the container in which a bool 3512 * indicating if the UL CE for this service 3513 * is polled is returned. 3514 * @dl_is_polled: address of the container in which a bool 3515 * indicating if the DL CE for this service 3516 * is polled is returned. 3517 * 3518 * Return: Indicates whether the service has been found in the table. 3519 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 3520 * There will be warning logs if either leg has not been updated 3521 * because it missed the entry in the table (but this is not an err). 3522 */ 3523 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 3524 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 3525 int *dl_is_polled) 3526 { 3527 int status = QDF_STATUS_E_INVAL; 3528 unsigned int i; 3529 struct service_to_pipe element; 3530 struct service_to_pipe *tgt_svc_map_to_use; 3531 uint32_t sz_tgt_svc_map_to_use; 3532 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3533 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3534 bool dl_updated = false; 3535 bool ul_updated = false; 3536 3537 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 3538 &sz_tgt_svc_map_to_use); 3539 3540 *dl_is_polled = 0; /* polling for received messages not supported */ 3541 3542 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 3543 3544 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 3545 if (element.service_id == svc_id) { 3546 if (element.pipedir == PIPEDIR_OUT) { 3547 *ul_pipe = element.pipenum; 3548 *ul_is_polled = 3549 (hif_state->host_ce_config[*ul_pipe].flags & 3550 CE_ATTR_DISABLE_INTR) != 0; 3551 ul_updated = true; 3552 } else if (element.pipedir == PIPEDIR_IN) { 3553 *dl_pipe = element.pipenum; 3554 dl_updated = true; 3555 } 3556 status = QDF_STATUS_SUCCESS; 3557 } 3558 } 3559 if (ul_updated == false) 3560 HIF_DBG("ul pipe is NOT updated for service %d", svc_id); 3561 if (dl_updated == false) 3562 HIF_DBG("dl pipe is NOT updated for service %d", svc_id); 3563 3564 return status; 3565 } 3566 3567 #ifdef SHADOW_REG_DEBUG 3568 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 3569 uint32_t CE_ctrl_addr) 3570 { 3571 uint32_t read_from_hw, srri_from_ddr = 0; 3572 3573 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 3574 3575 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3576 3577 if (read_from_hw != srri_from_ddr) { 3578 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3579 __func__, srri_from_ddr, read_from_hw, 3580 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3581 QDF_ASSERT(0); 3582 } 3583 return srri_from_ddr; 3584 } 3585 3586 3587 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 3588 uint32_t CE_ctrl_addr) 3589 { 3590 uint32_t read_from_hw, drri_from_ddr = 0; 3591 3592 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 3593 3594 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3595 3596 if (read_from_hw != drri_from_ddr) { 3597 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3598 drri_from_ddr, read_from_hw, 3599 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3600 QDF_ASSERT(0); 3601 } 3602 return drri_from_ddr; 3603 } 3604 3605 #endif 3606 3607 #ifdef ADRASTEA_RRI_ON_DDR 3608 /** 3609 * hif_get_src_ring_read_index(): Called to get the SRRI 3610 * 3611 * @scn: hif_softc pointer 3612 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3613 * 3614 * This function returns the SRRI to the caller. For CEs that 3615 * dont have interrupts enabled, we look at the DDR based SRRI 3616 * 3617 * Return: SRRI 3618 */ 3619 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 3620 uint32_t CE_ctrl_addr) 3621 { 3622 struct CE_attr attr; 3623 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3624 3625 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3626 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3627 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3628 } else { 3629 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3630 return A_TARGET_READ(scn, 3631 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 3632 else 3633 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 3634 CE_ctrl_addr); 3635 } 3636 } 3637 3638 /** 3639 * hif_get_dst_ring_read_index(): Called to get the DRRI 3640 * 3641 * @scn: hif_softc pointer 3642 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 3643 * 3644 * This function returns the DRRI to the caller. For CEs that 3645 * dont have interrupts enabled, we look at the DDR based DRRI 3646 * 3647 * Return: DRRI 3648 */ 3649 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 3650 uint32_t CE_ctrl_addr) 3651 { 3652 struct CE_attr attr; 3653 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3654 3655 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 3656 3657 if (attr.flags & CE_ATTR_DISABLE_INTR) { 3658 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 3659 } else { 3660 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 3661 return A_TARGET_READ(scn, 3662 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 3663 else 3664 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 3665 CE_ctrl_addr); 3666 } 3667 } 3668 3669 /** 3670 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3671 * 3672 * @scn: hif_softc pointer 3673 * 3674 * This function allocates non cached memory on ddr and sends 3675 * the physical address of this memory to the CE hardware. The 3676 * hardware updates the RRI on this particular location. 3677 * 3678 * Return: None 3679 */ 3680 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3681 { 3682 unsigned int i; 3683 uint32_t high_paddr, low_paddr; 3684 qdf_dma_addr_t paddr_rri_on_ddr = 0; 3685 3686 scn->vaddr_rri_on_ddr = 3687 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 3688 scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)), 3689 &paddr_rri_on_ddr); 3690 3691 if (!scn->vaddr_rri_on_ddr) { 3692 HIF_DBG("dmaable page alloc fail"); 3693 return; 3694 } 3695 3696 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 3697 low_paddr = BITS0_TO_31(paddr_rri_on_ddr); 3698 high_paddr = BITS32_TO_35(paddr_rri_on_ddr); 3699 3700 HIF_DBG("%s using srri and drri from DDR", __func__); 3701 3702 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 3703 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 3704 3705 for (i = 0; i < CE_COUNT; i++) 3706 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 3707 3708 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t)); 3709 3710 } 3711 #else 3712 3713 /** 3714 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 3715 * 3716 * @scn: hif_softc pointer 3717 * 3718 * This is a dummy implementation for platforms that don't 3719 * support this functionality. 3720 * 3721 * Return: None 3722 */ 3723 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 3724 { 3725 } 3726 #endif 3727 3728 /** 3729 * hif_dump_ce_registers() - dump ce registers 3730 * @scn: hif_opaque_softc pointer. 3731 * 3732 * Output the copy engine registers 3733 * 3734 * Return: 0 for success or error code 3735 */ 3736 int hif_dump_ce_registers(struct hif_softc *scn) 3737 { 3738 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3739 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 3740 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 3741 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 3742 uint16_t i; 3743 QDF_STATUS status; 3744 3745 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 3746 if (scn->ce_id_to_state[i] == NULL) { 3747 HIF_DBG("CE%d not used.", i); 3748 continue; 3749 } 3750 3751 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 3752 (uint8_t *) &ce_reg_values[0], 3753 ce_reg_word_size * sizeof(uint32_t)); 3754 3755 if (status != QDF_STATUS_SUCCESS) { 3756 HIF_ERROR("Dumping CE register failed!"); 3757 return -EACCES; 3758 } 3759 HIF_ERROR("CE%d=>\n", i); 3760 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 3761 (uint8_t *) &ce_reg_values[0], 3762 ce_reg_word_size * sizeof(uint32_t)); 3763 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 3764 + SR_WR_INDEX_ADDRESS), 3765 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 3766 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 3767 + CURRENT_SRRI_ADDRESS), 3768 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 3769 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 3770 + DST_WR_INDEX_ADDRESS), 3771 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 3772 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 3773 + CURRENT_DRRI_ADDRESS), 3774 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 3775 qdf_print("---"); 3776 } 3777 return 0; 3778 } 3779 qdf_export_symbol(hif_dump_ce_registers); 3780 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 3781 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 3782 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 3783 { 3784 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3785 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3786 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 3787 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3788 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3789 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 3790 struct CE_ring_state *src_ring = ce_state->src_ring; 3791 struct CE_ring_state *dest_ring = ce_state->dest_ring; 3792 3793 if (src_ring) { 3794 hif_info->ul_pipe.nentries = src_ring->nentries; 3795 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 3796 hif_info->ul_pipe.sw_index = src_ring->sw_index; 3797 hif_info->ul_pipe.write_index = src_ring->write_index; 3798 hif_info->ul_pipe.hw_index = src_ring->hw_index; 3799 hif_info->ul_pipe.base_addr_CE_space = 3800 src_ring->base_addr_CE_space; 3801 hif_info->ul_pipe.base_addr_owner_space = 3802 src_ring->base_addr_owner_space; 3803 } 3804 3805 3806 if (dest_ring) { 3807 hif_info->dl_pipe.nentries = dest_ring->nentries; 3808 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 3809 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 3810 hif_info->dl_pipe.write_index = dest_ring->write_index; 3811 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 3812 hif_info->dl_pipe.base_addr_CE_space = 3813 dest_ring->base_addr_CE_space; 3814 hif_info->dl_pipe.base_addr_owner_space = 3815 dest_ring->base_addr_owner_space; 3816 } 3817 3818 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 3819 hif_info->ctrl_addr = ce_state->ctrl_addr; 3820 3821 return hif_info; 3822 } 3823 qdf_export_symbol(hif_get_addl_pipe_info); 3824 3825 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 3826 { 3827 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3828 3829 scn->nss_wifi_ol_mode = mode; 3830 return 0; 3831 } 3832 qdf_export_symbol(hif_set_nss_wifiol_mode); 3833 #endif 3834 3835 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 3836 { 3837 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3838 scn->hif_attribute = hif_attrib; 3839 } 3840 3841 3842 /* disable interrupts (only applicable for legacy copy engine currently */ 3843 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 3844 { 3845 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3846 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 3847 uint32_t ctrl_addr = CE_state->ctrl_addr; 3848 3849 Q_TARGET_ACCESS_BEGIN(scn); 3850 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 3851 Q_TARGET_ACCESS_END(scn); 3852 } 3853 qdf_export_symbol(hif_disable_interrupt); 3854 3855 /** 3856 * hif_fw_event_handler() - hif fw event handler 3857 * @hif_state: pointer to hif ce state structure 3858 * 3859 * Process fw events and raise HTC callback to process fw events. 3860 * 3861 * Return: none 3862 */ 3863 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 3864 { 3865 struct hif_msg_callbacks *msg_callbacks = 3866 &hif_state->msg_callbacks_current; 3867 3868 if (!msg_callbacks->fwEventHandler) 3869 return; 3870 3871 msg_callbacks->fwEventHandler(msg_callbacks->Context, 3872 QDF_STATUS_E_FAILURE); 3873 } 3874 3875 #ifndef QCA_WIFI_3_0 3876 /** 3877 * hif_fw_interrupt_handler() - FW interrupt handler 3878 * @irq: irq number 3879 * @arg: the user pointer 3880 * 3881 * Called from the PCI interrupt handler when a 3882 * firmware-generated interrupt to the Host. 3883 * 3884 * only registered for legacy ce devices 3885 * 3886 * Return: status of handled irq 3887 */ 3888 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3889 { 3890 struct hif_softc *scn = arg; 3891 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3892 uint32_t fw_indicator_address, fw_indicator; 3893 3894 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 3895 return ATH_ISR_NOSCHED; 3896 3897 fw_indicator_address = hif_state->fw_indicator_address; 3898 /* For sudden unplug this will return ~0 */ 3899 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 3900 3901 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 3902 /* ACK: clear Target-side pending event */ 3903 A_TARGET_WRITE(scn, fw_indicator_address, 3904 fw_indicator & ~FW_IND_EVENT_PENDING); 3905 if (Q_TARGET_ACCESS_END(scn) < 0) 3906 return ATH_ISR_SCHED; 3907 3908 if (hif_state->started) { 3909 hif_fw_event_handler(hif_state); 3910 } else { 3911 /* 3912 * Probable Target failure before we're prepared 3913 * to handle it. Generally unexpected. 3914 * fw_indicator used as bitmap, and defined as below: 3915 * FW_IND_EVENT_PENDING 0x1 3916 * FW_IND_INITIALIZED 0x2 3917 * FW_IND_NEEDRECOVER 0x4 3918 */ 3919 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 3920 ("%s: Early firmware event indicated 0x%x\n", 3921 __func__, fw_indicator)); 3922 } 3923 } else { 3924 if (Q_TARGET_ACCESS_END(scn) < 0) 3925 return ATH_ISR_SCHED; 3926 } 3927 3928 return ATH_ISR_SCHED; 3929 } 3930 #else 3931 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3932 { 3933 return ATH_ISR_SCHED; 3934 } 3935 #endif /* #ifdef QCA_WIFI_3_0 */ 3936 3937 3938 /** 3939 * hif_wlan_disable(): call the platform driver to disable wlan 3940 * @scn: HIF Context 3941 * 3942 * This function passes the con_mode to platform driver to disable 3943 * wlan. 3944 * 3945 * Return: void 3946 */ 3947 void hif_wlan_disable(struct hif_softc *scn) 3948 { 3949 enum pld_driver_mode mode; 3950 uint32_t con_mode = hif_get_conparam(scn); 3951 3952 if (scn->target_status == TARGET_STATUS_RESET) 3953 return; 3954 3955 if (QDF_GLOBAL_FTM_MODE == con_mode) 3956 mode = PLD_FTM; 3957 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3958 mode = PLD_EPPING; 3959 else 3960 mode = PLD_MISSION; 3961 3962 pld_wlan_disable(scn->qdf_dev->dev, mode); 3963 } 3964 3965 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 3966 { 3967 QDF_STATUS status; 3968 uint8_t ul_pipe, dl_pipe; 3969 int ul_is_polled, dl_is_polled; 3970 3971 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 3972 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 3973 HTC_CTRL_RSVD_SVC, 3974 &ul_pipe, &dl_pipe, 3975 &ul_is_polled, &dl_is_polled); 3976 if (status) { 3977 HIF_ERROR("%s: failed to map pipe: %d", __func__, status); 3978 return qdf_status_to_os_return(status); 3979 } 3980 3981 *ce_id = dl_pipe; 3982 3983 return 0; 3984 } 3985