1 /* 2 * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "targcfg.h" 20 #include "qdf_lock.h" 21 #include "qdf_status.h" 22 #include "qdf_status.h" 23 #include <qdf_atomic.h> /* qdf_atomic_read */ 24 #include <targaddrs.h> 25 #include "hif_io32.h" 26 #include <hif.h> 27 #include <target_type.h> 28 #include "regtable.h" 29 #define ATH_MODULE_NAME hif 30 #include <a_debug.h> 31 #include "hif_main.h" 32 #include "ce_api.h" 33 #include "qdf_trace.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "ce_internal.h" 37 #include "ce_reg.h" 38 #include "ce_assignment.h" 39 #include "ce_tasklet.h" 40 #ifndef CONFIG_WIN 41 #include "qwlan_version.h" 42 #endif 43 #include "qdf_module.h" 44 45 #define CE_POLL_TIMEOUT 10 /* ms */ 46 47 #define AGC_DUMP 1 48 #define CHANINFO_DUMP 2 49 #define BB_WATCHDOG_DUMP 3 50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 51 #define PCIE_ACCESS_DUMP 4 52 #endif 53 #include "mp_dev.h" 54 55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \ 56 !defined(QCA_WIFI_SUPPORT_SRNG) 57 #define QCA_WIFI_SUPPORT_SRNG 58 #endif 59 60 /* Forward references */ 61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 62 63 /* 64 * Fix EV118783, poll to check whether a BMI response comes 65 * other than waiting for the interruption which may be lost. 66 */ 67 /* #define BMI_RSP_POLLING */ 68 #define BMI_RSP_TO_MILLISEC 1000 69 70 #ifdef CONFIG_BYPASS_QMI 71 #define BYPASS_QMI 1 72 #else 73 #define BYPASS_QMI 0 74 #endif 75 76 #ifdef CONFIG_WIN 77 #if ENABLE_10_4_FW_HDR 78 #define WDI_IPA_SERVICE_GROUP 5 79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 82 #endif /* ENABLE_10_4_FW_HDR */ 83 #endif 84 85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); 86 static void hif_config_rri_on_ddr(struct hif_softc *scn); 87 88 /** 89 * hif_target_access_log_dump() - dump access log 90 * 91 * dump access log 92 * 93 * Return: n/a 94 */ 95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 96 static void hif_target_access_log_dump(void) 97 { 98 hif_target_dump_access_log(); 99 } 100 #endif 101 102 103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 104 uint8_t cmd_id, bool start) 105 { 106 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 107 108 switch (cmd_id) { 109 case AGC_DUMP: 110 if (start) 111 priv_start_agc(scn); 112 else 113 priv_dump_agc(scn); 114 break; 115 case CHANINFO_DUMP: 116 if (start) 117 priv_start_cap_chaninfo(scn); 118 else 119 priv_dump_chaninfo(scn); 120 break; 121 case BB_WATCHDOG_DUMP: 122 priv_dump_bbwatchdog(scn); 123 break; 124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 125 case PCIE_ACCESS_DUMP: 126 hif_target_access_log_dump(); 127 break; 128 #endif 129 default: 130 HIF_ERROR("%s: Invalid htc dump command", __func__); 131 break; 132 } 133 } 134 135 static void ce_poll_timeout(void *arg) 136 { 137 struct CE_state *CE_state = (struct CE_state *)arg; 138 139 if (CE_state->timer_inited) { 140 ce_per_engine_service(CE_state->scn, CE_state->id); 141 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 142 } 143 } 144 145 static unsigned int roundup_pwr2(unsigned int n) 146 { 147 int i; 148 unsigned int test_pwr2; 149 150 if (!(n & (n - 1))) 151 return n; /* already a power of 2 */ 152 153 test_pwr2 = 4; 154 for (i = 0; i < 29; i++) { 155 if (test_pwr2 > n) 156 return test_pwr2; 157 test_pwr2 = test_pwr2 << 1; 158 } 159 160 QDF_ASSERT(0); /* n too large */ 161 return 0; 162 } 163 164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 166 167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 168 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 169 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 170 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 171 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 172 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 173 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 174 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 175 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 176 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 177 #ifdef QCA_WIFI_3_0_ADRASTEA 178 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 179 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 180 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 181 #endif 182 }; 183 184 #ifdef QCN7605_SUPPORT 185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 186 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 187 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 188 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 189 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 190 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 191 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 192 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 193 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 194 }; 195 #endif 196 197 #ifdef WLAN_FEATURE_EPPING 198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 199 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 200 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 201 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 202 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 203 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 204 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 205 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 206 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 207 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 208 }; 209 #endif 210 211 /* CE_PCI TABLE */ 212 /* 213 * NOTE: the table below is out of date, though still a useful reference. 214 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 215 * mapping of HTC services to HIF pipes. 216 */ 217 /* 218 * This authoritative table defines Copy Engine configuration and the mapping 219 * of services/endpoints to CEs. A subset of this information is passed to 220 * the Target during startup as a prerequisite to entering BMI phase. 221 * See: 222 * target_service_to_ce_map - Target-side mapping 223 * hif_map_service_to_pipe - Host-side mapping 224 * target_ce_config - Target-side configuration 225 * host_ce_config - Host-side configuration 226 ============================================================================ 227 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 228 | | | ctio | Size | Frequency 229 | | | n | | 230 ============================================================================ 231 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 232 descriptor | | | | O(100B) | and regular 233 download | | | | | 234 ---------------------------------------------------------------------------- 235 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 236 indication | | | | O(10B) | regular 237 upload | | | | | 238 ---------------------------------------------------------------------------- 239 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 240 upload | | | | O(1000B) | (frequent 241 e.g. noise | | | | | during IP1.0 242 packets | | | | | testing) 243 ---------------------------------------------------------------------------- 244 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 245 download | | | | O(1000B) | (frequent 246 e.g. | | | | | during IP1.0 247 misdirecte | | | | | testing) 248 d EAPOL | | | | | 249 packets | | | | | 250 ---------------------------------------------------------------------------- 251 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 252 | DATA_VO (uplink) | | | | 253 ---------------------------------------------------------------------------- 254 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 255 | DATA_VO (downlink) | | | | 256 ---------------------------------------------------------------------------- 257 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 258 | | | | O(100B) | 259 ---------------------------------------------------------------------------- 260 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 261 messages | (downlink) | | | O(100B) | 262 | | | | | 263 ---------------------------------------------------------------------------- 264 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 265 | HTC_RAW_STREAMS | | | | 266 | (uplink) | | | | 267 ---------------------------------------------------------------------------- 268 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 269 | HTC_RAW_STREAMS | | | | 270 | (downlink) | | | | 271 ---------------------------------------------------------------------------- 272 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 273 | | | | | infrequent 274 ============================================================================ 275 */ 276 277 /* 278 * Map from service/endpoint to Copy Engine. 279 * This table is derived from the CE_PCI TABLE, above. 280 * It is passed to the Target at startup for use by firmware. 281 */ 282 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 283 { 284 WMI_DATA_VO_SVC, 285 PIPEDIR_OUT, /* out = UL = host -> target */ 286 3, 287 }, 288 { 289 WMI_DATA_VO_SVC, 290 PIPEDIR_IN, /* in = DL = target -> host */ 291 2, 292 }, 293 { 294 WMI_DATA_BK_SVC, 295 PIPEDIR_OUT, /* out = UL = host -> target */ 296 3, 297 }, 298 { 299 WMI_DATA_BK_SVC, 300 PIPEDIR_IN, /* in = DL = target -> host */ 301 2, 302 }, 303 { 304 WMI_DATA_BE_SVC, 305 PIPEDIR_OUT, /* out = UL = host -> target */ 306 3, 307 }, 308 { 309 WMI_DATA_BE_SVC, 310 PIPEDIR_IN, /* in = DL = target -> host */ 311 2, 312 }, 313 { 314 WMI_DATA_VI_SVC, 315 PIPEDIR_OUT, /* out = UL = host -> target */ 316 3, 317 }, 318 { 319 WMI_DATA_VI_SVC, 320 PIPEDIR_IN, /* in = DL = target -> host */ 321 2, 322 }, 323 { 324 WMI_CONTROL_SVC, 325 PIPEDIR_OUT, /* out = UL = host -> target */ 326 3, 327 }, 328 { 329 WMI_CONTROL_SVC, 330 PIPEDIR_IN, /* in = DL = target -> host */ 331 2, 332 }, 333 { 334 HTC_CTRL_RSVD_SVC, 335 PIPEDIR_OUT, /* out = UL = host -> target */ 336 0, /* could be moved to 3 (share with WMI) */ 337 }, 338 { 339 HTC_CTRL_RSVD_SVC, 340 PIPEDIR_IN, /* in = DL = target -> host */ 341 2, 342 }, 343 { 344 HTC_RAW_STREAMS_SVC, /* not currently used */ 345 PIPEDIR_OUT, /* out = UL = host -> target */ 346 0, 347 }, 348 { 349 HTC_RAW_STREAMS_SVC, /* not currently used */ 350 PIPEDIR_IN, /* in = DL = target -> host */ 351 2, 352 }, 353 { 354 HTT_DATA_MSG_SVC, 355 PIPEDIR_OUT, /* out = UL = host -> target */ 356 4, 357 }, 358 { 359 HTT_DATA_MSG_SVC, 360 PIPEDIR_IN, /* in = DL = target -> host */ 361 1, 362 }, 363 { 364 WDI_IPA_TX_SVC, 365 PIPEDIR_OUT, /* in = DL = target -> host */ 366 5, 367 }, 368 #if defined(QCA_WIFI_3_0_ADRASTEA) 369 { 370 HTT_DATA2_MSG_SVC, 371 PIPEDIR_IN, /* in = DL = target -> host */ 372 9, 373 }, 374 { 375 HTT_DATA3_MSG_SVC, 376 PIPEDIR_IN, /* in = DL = target -> host */ 377 10, 378 }, 379 { 380 PACKET_LOG_SVC, 381 PIPEDIR_IN, /* in = DL = target -> host */ 382 11, 383 }, 384 #endif 385 /* (Additions here) */ 386 387 { /* Must be last */ 388 0, 389 0, 390 0, 391 }, 392 }; 393 394 /* PIPEDIR_OUT = HOST to Target */ 395 /* PIPEDIR_IN = TARGET to HOST */ 396 #if (defined(QCA_WIFI_QCA8074)) 397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 398 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 399 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 400 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 401 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 402 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 403 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 404 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 405 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 406 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 407 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 408 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 409 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 410 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 411 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 412 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 413 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 414 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 415 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 416 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 417 /* (Additions here) */ 418 { 0, 0, 0, }, 419 }; 420 #else 421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 422 }; 423 #endif 424 425 #if (defined(QCA_WIFI_QCA8074V2)) 426 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 427 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 428 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 429 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 430 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 431 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 432 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 433 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 434 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 435 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 436 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 437 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 438 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 439 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 440 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 441 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 442 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 443 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 444 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 445 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 446 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 447 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 448 /* (Additions here) */ 449 { 0, 0, 0, }, 450 }; 451 #else 452 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 453 }; 454 #endif 455 456 #if (defined(QCA_WIFI_QCA6018)) 457 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 458 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 459 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 460 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 461 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 462 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 463 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 464 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 465 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 466 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 467 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 468 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 469 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 470 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 471 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 472 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 473 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 474 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 475 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 476 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 477 /* (Additions here) */ 478 { 0, 0, 0, }, 479 }; 480 #else 481 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 482 }; 483 #endif 484 485 /* PIPEDIR_OUT = HOST to Target */ 486 /* PIPEDIR_IN = TARGET to HOST */ 487 #ifdef QCN7605_SUPPORT 488 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 489 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 490 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 491 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 492 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 493 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 494 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 495 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 496 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 497 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 498 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 499 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 500 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 501 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 502 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 503 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 504 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 505 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 506 #ifdef IPA_OFFLOAD 507 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 508 #else 509 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 510 #endif 511 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 512 /* (Additions here) */ 513 { 0, 0, 0, }, 514 }; 515 #endif 516 517 #if (defined(QCA_WIFI_QCA6290)) 518 #ifdef CONFIG_WIN 519 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 520 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 521 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 522 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 523 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 524 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 525 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 526 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 527 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 528 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 529 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 530 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 531 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 532 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 533 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 534 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 535 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 536 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 537 /* (Additions here) */ 538 { 0, 0, 0, }, 539 }; 540 #else 541 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 542 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 543 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 544 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 545 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 546 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 547 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 548 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 549 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 550 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 551 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 552 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 553 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 554 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 555 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 556 /* (Additions here) */ 557 { 0, 0, 0, }, 558 }; 559 #endif 560 #else 561 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 562 }; 563 #endif 564 565 #if (defined(QCA_WIFI_QCA6390)) 566 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 567 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 568 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 569 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 570 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 571 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 572 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 573 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 574 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 575 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 576 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 577 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 578 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 579 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 580 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 581 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 582 /* (Additions here) */ 583 { 0, 0, 0, }, 584 }; 585 #else 586 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 587 }; 588 #endif 589 590 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 591 { 592 WMI_DATA_VO_SVC, 593 PIPEDIR_OUT, /* out = UL = host -> target */ 594 3, 595 }, 596 { 597 WMI_DATA_VO_SVC, 598 PIPEDIR_IN, /* in = DL = target -> host */ 599 2, 600 }, 601 { 602 WMI_DATA_BK_SVC, 603 PIPEDIR_OUT, /* out = UL = host -> target */ 604 3, 605 }, 606 { 607 WMI_DATA_BK_SVC, 608 PIPEDIR_IN, /* in = DL = target -> host */ 609 2, 610 }, 611 { 612 WMI_DATA_BE_SVC, 613 PIPEDIR_OUT, /* out = UL = host -> target */ 614 3, 615 }, 616 { 617 WMI_DATA_BE_SVC, 618 PIPEDIR_IN, /* in = DL = target -> host */ 619 2, 620 }, 621 { 622 WMI_DATA_VI_SVC, 623 PIPEDIR_OUT, /* out = UL = host -> target */ 624 3, 625 }, 626 { 627 WMI_DATA_VI_SVC, 628 PIPEDIR_IN, /* in = DL = target -> host */ 629 2, 630 }, 631 { 632 WMI_CONTROL_SVC, 633 PIPEDIR_OUT, /* out = UL = host -> target */ 634 3, 635 }, 636 { 637 WMI_CONTROL_SVC, 638 PIPEDIR_IN, /* in = DL = target -> host */ 639 2, 640 }, 641 { 642 HTC_CTRL_RSVD_SVC, 643 PIPEDIR_OUT, /* out = UL = host -> target */ 644 0, /* could be moved to 3 (share with WMI) */ 645 }, 646 { 647 HTC_CTRL_RSVD_SVC, 648 PIPEDIR_IN, /* in = DL = target -> host */ 649 1, 650 }, 651 { 652 HTC_RAW_STREAMS_SVC, /* not currently used */ 653 PIPEDIR_OUT, /* out = UL = host -> target */ 654 0, 655 }, 656 { 657 HTC_RAW_STREAMS_SVC, /* not currently used */ 658 PIPEDIR_IN, /* in = DL = target -> host */ 659 1, 660 }, 661 { 662 HTT_DATA_MSG_SVC, 663 PIPEDIR_OUT, /* out = UL = host -> target */ 664 4, 665 }, 666 #ifdef WLAN_FEATURE_FASTPATH 667 { 668 HTT_DATA_MSG_SVC, 669 PIPEDIR_IN, /* in = DL = target -> host */ 670 5, 671 }, 672 #else /* WLAN_FEATURE_FASTPATH */ 673 { 674 HTT_DATA_MSG_SVC, 675 PIPEDIR_IN, /* in = DL = target -> host */ 676 1, 677 }, 678 #endif /* WLAN_FEATURE_FASTPATH */ 679 680 /* (Additions here) */ 681 682 { /* Must be last */ 683 0, 684 0, 685 0, 686 }, 687 }; 688 689 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 690 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 691 692 #ifdef WLAN_FEATURE_EPPING 693 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 694 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 695 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 696 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 697 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 698 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 699 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 700 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 701 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 702 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 703 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 704 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 705 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 706 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 707 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 708 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 709 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 710 {0, 0, 0,}, /* Must be last */ 711 }; 712 713 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 714 **tgt_svc_map_to_use, 715 uint32_t *sz_tgt_svc_map_to_use) 716 { 717 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 718 *sz_tgt_svc_map_to_use = 719 sizeof(target_service_to_ce_map_wlan_epping); 720 } 721 #endif 722 723 #ifdef QCN7605_SUPPORT 724 static inline 725 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 726 uint32_t *sz_tgt_svc_map_to_use) 727 { 728 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 729 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 730 } 731 #else 732 static inline 733 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 734 uint32_t *sz_tgt_svc_map_to_use) 735 { 736 HIF_ERROR("%s: QCN7605 not supported", __func__); 737 } 738 #endif 739 740 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 741 struct service_to_pipe **tgt_svc_map_to_use, 742 uint32_t *sz_tgt_svc_map_to_use) 743 { 744 uint32_t mode = hif_get_conparam(scn); 745 struct hif_target_info *tgt_info = &scn->target_info; 746 747 if (QDF_IS_EPPING_ENABLED(mode)) { 748 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 749 sz_tgt_svc_map_to_use); 750 } else { 751 switch (tgt_info->target_type) { 752 default: 753 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 754 *sz_tgt_svc_map_to_use = 755 sizeof(target_service_to_ce_map_wlan); 756 break; 757 case TARGET_TYPE_QCN7605: 758 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 759 sz_tgt_svc_map_to_use); 760 break; 761 case TARGET_TYPE_AR900B: 762 case TARGET_TYPE_QCA9984: 763 case TARGET_TYPE_IPQ4019: 764 case TARGET_TYPE_QCA9888: 765 case TARGET_TYPE_AR9888: 766 case TARGET_TYPE_AR9888V2: 767 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 768 *sz_tgt_svc_map_to_use = 769 sizeof(target_service_to_ce_map_ar900b); 770 break; 771 case TARGET_TYPE_QCA6290: 772 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 773 *sz_tgt_svc_map_to_use = 774 sizeof(target_service_to_ce_map_qca6290); 775 break; 776 case TARGET_TYPE_QCA6390: 777 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 778 *sz_tgt_svc_map_to_use = 779 sizeof(target_service_to_ce_map_qca6390); 780 break; 781 case TARGET_TYPE_QCA8074: 782 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 783 *sz_tgt_svc_map_to_use = 784 sizeof(target_service_to_ce_map_qca8074); 785 break; 786 case TARGET_TYPE_QCA8074V2: 787 *tgt_svc_map_to_use = 788 target_service_to_ce_map_qca8074_v2; 789 *sz_tgt_svc_map_to_use = 790 sizeof(target_service_to_ce_map_qca8074_v2); 791 break; 792 case TARGET_TYPE_QCA6018: 793 *tgt_svc_map_to_use = 794 target_service_to_ce_map_qca6018; 795 *sz_tgt_svc_map_to_use = 796 sizeof(target_service_to_ce_map_qca6018); 797 break; 798 } 799 } 800 } 801 802 /** 803 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 804 * @ce_state : pointer to the state context of the CE 805 * 806 * Description: 807 * Sets htt_rx_data attribute of the state structure if the 808 * CE serves one of the HTT DATA services. 809 * 810 * Return: 811 * false (attribute set to false) 812 * true (attribute set to true); 813 */ 814 static bool ce_mark_datapath(struct CE_state *ce_state) 815 { 816 struct service_to_pipe *svc_map; 817 uint32_t map_sz, map_len; 818 int i; 819 bool rc = false; 820 821 if (ce_state != NULL) { 822 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 823 &map_sz); 824 825 map_len = map_sz / sizeof(struct service_to_pipe); 826 for (i = 0; i < map_len; i++) { 827 if ((svc_map[i].pipenum == ce_state->id) && 828 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 829 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 830 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 831 /* HTT CEs are unidirectional */ 832 if (svc_map[i].pipedir == PIPEDIR_IN) 833 ce_state->htt_rx_data = true; 834 else 835 ce_state->htt_tx_data = true; 836 rc = true; 837 } 838 } 839 } 840 return rc; 841 } 842 843 /** 844 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 845 * @ce_id: ce in question 846 * @ring: ring state being examined 847 * @type: "src_ring" or "dest_ring" string for identifying the ring 848 * 849 * Warns on non-zero index values. 850 * Causes a kernel panic if the ring is not empty durring initialization. 851 */ 852 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 853 char *type) 854 { 855 if (ring->write_index != 0 || ring->sw_index != 0) 856 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", 857 ce_id, type, ring->sw_index, ring->write_index); 858 if (ring->write_index != ring->sw_index) 859 QDF_BUG(0); 860 } 861 862 #ifdef IPA_OFFLOAD 863 /** 864 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 865 * @scn: softc instance 866 * @ce_id: ce in question 867 * @base_addr: pointer to copyengine ring base address 868 * @ce_ring: copyengine instance 869 * @nentries: number of entries should be allocated 870 * @desc_size: ce desc size 871 * 872 * Return: QDF_STATUS_SUCCESS - for success 873 */ 874 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 875 qdf_dma_addr_t *base_addr, 876 struct CE_ring_state *ce_ring, 877 unsigned int nentries, uint32_t desc_size) 878 { 879 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 880 !ce_srng_based(scn)) { 881 if (!scn->ipa_ce_ring) { 882 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( 883 scn->qdf_dev, 884 nentries * desc_size + CE_DESC_RING_ALIGN); 885 if (!scn->ipa_ce_ring) { 886 HIF_ERROR( 887 "%s: Failed to allocate memory for IPA ce ring", 888 __func__); 889 return QDF_STATUS_E_NOMEM; 890 } 891 } 892 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 893 &scn->ipa_ce_ring->mem_info); 894 ce_ring->base_addr_owner_space_unaligned = 895 scn->ipa_ce_ring->vaddr; 896 } else { 897 ce_ring->base_addr_owner_space_unaligned = 898 qdf_mem_alloc_consistent(scn->qdf_dev, 899 scn->qdf_dev->dev, 900 (nentries * desc_size + 901 CE_DESC_RING_ALIGN), 902 base_addr); 903 if (!ce_ring->base_addr_owner_space_unaligned) { 904 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 905 __func__, CE_id); 906 return QDF_STATUS_E_NOMEM; 907 } 908 } 909 return QDF_STATUS_SUCCESS; 910 } 911 912 /** 913 * ce_free_desc_ring() - Frees copyengine descriptor ring 914 * @scn: softc instance 915 * @ce_id: ce in question 916 * @ce_ring: copyengine instance 917 * @desc_size: ce desc size 918 * 919 * Return: None 920 */ 921 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 922 struct CE_ring_state *ce_ring, uint32_t desc_size) 923 { 924 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 925 !ce_srng_based(scn)) { 926 if (scn->ipa_ce_ring) { 927 qdf_mem_shared_mem_free(scn->qdf_dev, 928 scn->ipa_ce_ring); 929 scn->ipa_ce_ring = NULL; 930 } 931 ce_ring->base_addr_owner_space_unaligned = NULL; 932 } else { 933 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 934 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 935 ce_ring->base_addr_owner_space_unaligned, 936 ce_ring->base_addr_CE_space, 0); 937 ce_ring->base_addr_owner_space_unaligned = NULL; 938 } 939 } 940 #else 941 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 942 qdf_dma_addr_t *base_addr, 943 struct CE_ring_state *ce_ring, 944 unsigned int nentries, uint32_t desc_size) 945 { 946 ce_ring->base_addr_owner_space_unaligned = 947 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 948 (nentries * desc_size + 949 CE_DESC_RING_ALIGN), base_addr); 950 if (!ce_ring->base_addr_owner_space_unaligned) { 951 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 952 __func__, CE_id); 953 return QDF_STATUS_E_NOMEM; 954 } 955 return QDF_STATUS_SUCCESS; 956 } 957 958 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 959 struct CE_ring_state *ce_ring, uint32_t desc_size) 960 { 961 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 962 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 963 ce_ring->base_addr_owner_space_unaligned, 964 ce_ring->base_addr_CE_space, 0); 965 ce_ring->base_addr_owner_space_unaligned = NULL; 966 } 967 #endif /* IPA_OFFLOAD */ 968 969 /* 970 * TODO: Need to explore the possibility of having this as part of a 971 * target context instead of a global array. 972 */ 973 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 974 975 void ce_service_register_module(enum ce_target_type target_type, 976 struct ce_ops* (*ce_attach)(void)) 977 { 978 if (target_type < CE_MAX_TARGET_TYPE) 979 ce_attach_register[target_type] = ce_attach; 980 } 981 982 qdf_export_symbol(ce_service_register_module); 983 984 /** 985 * ce_srng_based() - Does this target use srng 986 * @ce_state : pointer to the state context of the CE 987 * 988 * Description: 989 * returns true if the target is SRNG based 990 * 991 * Return: 992 * false (attribute set to false) 993 * true (attribute set to true); 994 */ 995 bool ce_srng_based(struct hif_softc *scn) 996 { 997 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 998 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 999 1000 switch (tgt_info->target_type) { 1001 case TARGET_TYPE_QCA8074: 1002 case TARGET_TYPE_QCA8074V2: 1003 case TARGET_TYPE_QCA6290: 1004 case TARGET_TYPE_QCA6390: 1005 case TARGET_TYPE_QCA6018: 1006 return true; 1007 default: 1008 return false; 1009 } 1010 return false; 1011 } 1012 qdf_export_symbol(ce_srng_based); 1013 1014 #ifdef QCA_WIFI_SUPPORT_SRNG 1015 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1016 { 1017 struct ce_ops *ops = NULL; 1018 1019 if (ce_srng_based(scn)) { 1020 if (ce_attach_register[CE_SVC_SRNG]) 1021 ops = ce_attach_register[CE_SVC_SRNG](); 1022 } else if (ce_attach_register[CE_SVC_LEGACY]) { 1023 ops = ce_attach_register[CE_SVC_LEGACY](); 1024 } 1025 1026 return ops; 1027 } 1028 1029 1030 #else /* QCA_LITHIUM */ 1031 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1032 { 1033 if (ce_attach_register[CE_SVC_LEGACY]) 1034 return ce_attach_register[CE_SVC_LEGACY](); 1035 1036 return NULL; 1037 } 1038 #endif /* QCA_LITHIUM */ 1039 1040 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 1041 struct pld_shadow_reg_v2_cfg **shadow_config, 1042 int *num_shadow_registers_configured) { 1043 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1044 1045 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1046 scn, shadow_config, num_shadow_registers_configured); 1047 } 1048 1049 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1050 uint8_t ring_type) 1051 { 1052 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1053 1054 return hif_state->ce_services->ce_get_desc_size(ring_type); 1055 } 1056 1057 1058 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 1059 uint8_t ring_type, uint32_t nentries) 1060 { 1061 uint32_t ce_nbytes; 1062 char *ptr; 1063 qdf_dma_addr_t base_addr; 1064 struct CE_ring_state *ce_ring; 1065 uint32_t desc_size; 1066 struct hif_softc *scn = CE_state->scn; 1067 1068 ce_nbytes = sizeof(struct CE_ring_state) 1069 + (nentries * sizeof(void *)); 1070 ptr = qdf_mem_malloc(ce_nbytes); 1071 if (!ptr) 1072 return NULL; 1073 1074 ce_ring = (struct CE_ring_state *)ptr; 1075 ptr += sizeof(struct CE_ring_state); 1076 ce_ring->nentries = nentries; 1077 ce_ring->nentries_mask = nentries - 1; 1078 1079 ce_ring->low_water_mark_nentries = 0; 1080 ce_ring->high_water_mark_nentries = nentries; 1081 ce_ring->per_transfer_context = (void **)ptr; 1082 1083 desc_size = ce_get_desc_size(scn, ring_type); 1084 1085 /* Legacy platforms that do not support cache 1086 * coherent DMA are unsupported 1087 */ 1088 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 1089 ce_ring, nentries, 1090 desc_size) != 1091 QDF_STATUS_SUCCESS) { 1092 HIF_ERROR("%s: ring has no DMA mem", 1093 __func__); 1094 qdf_mem_free(ce_ring); 1095 return NULL; 1096 } 1097 ce_ring->base_addr_CE_space_unaligned = base_addr; 1098 1099 /* Correctly initialize memory to 0 to 1100 * prevent garbage data crashing system 1101 * when download firmware 1102 */ 1103 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 1104 nentries * desc_size + 1105 CE_DESC_RING_ALIGN); 1106 1107 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 1108 1109 ce_ring->base_addr_CE_space = 1110 (ce_ring->base_addr_CE_space_unaligned + 1111 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 1112 1113 ce_ring->base_addr_owner_space = (void *) 1114 (((size_t) ce_ring->base_addr_owner_space_unaligned + 1115 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 1116 } else { 1117 ce_ring->base_addr_CE_space = 1118 ce_ring->base_addr_CE_space_unaligned; 1119 ce_ring->base_addr_owner_space = 1120 ce_ring->base_addr_owner_space_unaligned; 1121 } 1122 1123 return ce_ring; 1124 } 1125 1126 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 1127 uint32_t ce_id, struct CE_ring_state *ring, 1128 struct CE_attr *attr) 1129 { 1130 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1131 1132 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 1133 ring, attr); 1134 } 1135 1136 int hif_ce_bus_early_suspend(struct hif_softc *scn) 1137 { 1138 uint8_t ul_pipe, dl_pipe; 1139 int ce_id, status, ul_is_polled, dl_is_polled; 1140 struct CE_state *ce_state; 1141 1142 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 1143 &ul_pipe, &dl_pipe, 1144 &ul_is_polled, &dl_is_polled); 1145 if (status) { 1146 HIF_ERROR("%s: pipe_mapping failure", __func__); 1147 return status; 1148 } 1149 1150 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1151 if (ce_id == ul_pipe) 1152 continue; 1153 if (ce_id == dl_pipe) 1154 continue; 1155 1156 ce_state = scn->ce_id_to_state[ce_id]; 1157 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1158 if (ce_state->state == CE_RUNNING) 1159 ce_state->state = CE_PAUSED; 1160 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1161 } 1162 1163 return status; 1164 } 1165 1166 int hif_ce_bus_late_resume(struct hif_softc *scn) 1167 { 1168 int ce_id; 1169 struct CE_state *ce_state; 1170 int write_index = 0; 1171 bool index_updated; 1172 1173 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1174 ce_state = scn->ce_id_to_state[ce_id]; 1175 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1176 if (ce_state->state == CE_PENDING) { 1177 write_index = ce_state->src_ring->write_index; 1178 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 1179 write_index); 1180 ce_state->state = CE_RUNNING; 1181 index_updated = true; 1182 } else { 1183 index_updated = false; 1184 } 1185 1186 if (ce_state->state == CE_PAUSED) 1187 ce_state->state = CE_RUNNING; 1188 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1189 1190 if (index_updated) 1191 hif_record_ce_desc_event(scn, ce_id, 1192 RESUME_WRITE_INDEX_UPDATE, 1193 NULL, NULL, write_index, 0); 1194 } 1195 1196 return 0; 1197 } 1198 1199 /** 1200 * ce_oom_recovery() - try to recover rx ce from oom condition 1201 * @context: CE_state of the CE with oom rx ring 1202 * 1203 * the executing work Will continue to be rescheduled until 1204 * at least 1 descriptor is successfully posted to the rx ring. 1205 * 1206 * return: none 1207 */ 1208 static void ce_oom_recovery(void *context) 1209 { 1210 struct CE_state *ce_state = context; 1211 struct hif_softc *scn = ce_state->scn; 1212 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1213 struct HIF_CE_pipe_info *pipe_info = 1214 &ce_softc->pipe_info[ce_state->id]; 1215 1216 hif_post_recv_buffers_for_pipe(pipe_info); 1217 } 1218 1219 #ifdef HIF_CE_DEBUG_DATA_BUF 1220 /** 1221 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1222 * the CE descriptors. 1223 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1224 * @scn: hif scn handle 1225 * ce_id: Copy Engine Id 1226 * 1227 * Return: QDF_STATUS 1228 */ 1229 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1230 { 1231 struct hif_ce_desc_event *event = NULL; 1232 struct hif_ce_desc_event *hist_ev = NULL; 1233 uint32_t index = 0; 1234 1235 hist_ev = 1236 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1237 1238 if (!hist_ev) 1239 return QDF_STATUS_E_NOMEM; 1240 1241 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1242 event = &hist_ev[index]; 1243 event->data = 1244 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 1245 if (event->data == NULL) 1246 return QDF_STATUS_E_NOMEM; 1247 } 1248 return QDF_STATUS_SUCCESS; 1249 } 1250 1251 /** 1252 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 1253 * the CE descriptors. 1254 * @scn: hif scn handle 1255 * ce_id: Copy Engine Id 1256 * 1257 * Return: 1258 */ 1259 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1260 { 1261 struct hif_ce_desc_event *event = NULL; 1262 struct hif_ce_desc_event *hist_ev = NULL; 1263 uint32_t index = 0; 1264 1265 hist_ev = 1266 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1267 1268 if (!hist_ev) 1269 return; 1270 1271 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1272 event = &hist_ev[index]; 1273 if (event->data != NULL) 1274 qdf_mem_free(event->data); 1275 event->data = NULL; 1276 event = NULL; 1277 } 1278 } 1279 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1280 1281 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) /* MCL */ 1282 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX]; 1283 1284 /** 1285 * alloc_mem_ce_debug_history() - Allocate CE descriptor history 1286 * @scn: hif scn handle 1287 * @ce_id: Copy Engine Id 1288 * 1289 * Return: QDF_STATUS 1290 */ 1291 static QDF_STATUS 1292 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 1293 { 1294 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1295 1296 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id]; 1297 ce_hist->enable[ce_id] = 1; 1298 1299 return QDF_STATUS_SUCCESS; 1300 } 1301 1302 /** 1303 * free_mem_ce_debug_history() - Free CE descriptor history 1304 * @scn: hif scn handle 1305 * @ce_id: Copy Engine Id 1306 * 1307 * Return: None 1308 */ 1309 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 1310 { 1311 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1312 1313 ce_hist->enable[ce_id] = 0; 1314 ce_hist->hist_ev[ce_id] = NULL; 1315 } 1316 1317 #elif defined(HIF_CE_DEBUG_DATA_BUF) /* WIN */ 1318 1319 static QDF_STATUS 1320 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1321 { 1322 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 1323 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 1324 1325 if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) { 1326 scn->hif_ce_desc_hist.enable[CE_id] = 0; 1327 return QDF_STATUS_E_NOMEM; 1328 } else { 1329 scn->hif_ce_desc_hist.enable[CE_id] = 1; 1330 return QDF_STATUS_SUCCESS; 1331 } 1332 } 1333 1334 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1335 { 1336 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1337 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; 1338 1339 if (!hist_ev) 1340 return; 1341 1342 if (ce_hist->data_enable[CE_id] == 1) { 1343 ce_hist->data_enable[CE_id] = 0; 1344 free_mem_ce_debug_hist_data(scn, CE_id); 1345 } 1346 1347 ce_hist->enable[CE_id] = 0; 1348 qdf_mem_free(ce_hist->hist_ev[CE_id]); 1349 ce_hist->hist_ev[CE_id] = NULL; 1350 } 1351 1352 #else /* Disabled */ 1353 1354 static inline QDF_STATUS 1355 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1356 { 1357 return QDF_STATUS_SUCCESS; 1358 } 1359 1360 static inline void 1361 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 1362 #endif 1363 1364 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1365 /** 1366 * reset_ce_debug_history() - reset the index and ce id used for dumping the 1367 * CE records on the console using sysfs. 1368 * @scn: hif scn handle 1369 * 1370 * Return: 1371 */ 1372 static inline void reset_ce_debug_history(struct hif_softc *scn) 1373 { 1374 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1375 /* Initialise the CE debug history sysfs interface inputs ce_id and 1376 * index. Disable data storing 1377 */ 1378 ce_hist->hist_index = 0; 1379 ce_hist->hist_id = 0; 1380 } 1381 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1382 static inline void reset_ce_debug_history(struct hif_softc *scn) { } 1383 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1384 1385 void ce_enable_polling(void *cestate) 1386 { 1387 struct CE_state *CE_state = (struct CE_state *)cestate; 1388 1389 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1390 CE_state->timer_inited = true; 1391 } 1392 1393 void ce_disable_polling(void *cestate) 1394 { 1395 struct CE_state *CE_state = (struct CE_state *)cestate; 1396 1397 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1398 CE_state->timer_inited = false; 1399 } 1400 1401 /* 1402 * Initialize a Copy Engine based on caller-supplied attributes. 1403 * This may be called once to initialize both source and destination 1404 * rings or it may be called twice for separate source and destination 1405 * initialization. It may be that only one side or the other is 1406 * initialized by software/firmware. 1407 * 1408 * This should be called durring the initialization sequence before 1409 * interupts are enabled, so we don't have to worry about thread safety. 1410 */ 1411 struct CE_handle *ce_init(struct hif_softc *scn, 1412 unsigned int CE_id, struct CE_attr *attr) 1413 { 1414 struct CE_state *CE_state; 1415 uint32_t ctrl_addr; 1416 unsigned int nentries; 1417 bool malloc_CE_state = false; 1418 bool malloc_src_ring = false; 1419 int status; 1420 1421 QDF_ASSERT(CE_id < scn->ce_count); 1422 ctrl_addr = CE_BASE_ADDRESS(CE_id); 1423 CE_state = scn->ce_id_to_state[CE_id]; 1424 1425 if (!CE_state) { 1426 CE_state = 1427 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 1428 if (!CE_state) 1429 return NULL; 1430 1431 malloc_CE_state = true; 1432 qdf_spinlock_create(&CE_state->ce_index_lock); 1433 1434 CE_state->id = CE_id; 1435 CE_state->ctrl_addr = ctrl_addr; 1436 CE_state->state = CE_RUNNING; 1437 CE_state->attr_flags = attr->flags; 1438 } 1439 CE_state->scn = scn; 1440 CE_state->service = ce_engine_service_reg; 1441 1442 qdf_atomic_init(&CE_state->rx_pending); 1443 if (attr == NULL) { 1444 /* Already initialized; caller wants the handle */ 1445 return (struct CE_handle *)CE_state; 1446 } 1447 1448 if (CE_state->src_sz_max) 1449 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 1450 else 1451 CE_state->src_sz_max = attr->src_sz_max; 1452 1453 ce_init_ce_desc_event_log(scn, CE_id, 1454 attr->src_nentries + attr->dest_nentries); 1455 1456 /* source ring setup */ 1457 nentries = attr->src_nentries; 1458 if (nentries) { 1459 struct CE_ring_state *src_ring; 1460 1461 nentries = roundup_pwr2(nentries); 1462 if (CE_state->src_ring) { 1463 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 1464 } else { 1465 src_ring = CE_state->src_ring = 1466 ce_alloc_ring_state(CE_state, 1467 CE_RING_SRC, 1468 nentries); 1469 if (!src_ring) { 1470 /* cannot allocate src ring. If the 1471 * CE_state is allocated locally free 1472 * CE_State and return error. 1473 */ 1474 HIF_ERROR("%s: src ring has no mem", __func__); 1475 if (malloc_CE_state) { 1476 /* allocated CE_state locally */ 1477 qdf_mem_free(CE_state); 1478 malloc_CE_state = false; 1479 } 1480 return NULL; 1481 } 1482 /* we can allocate src ring. Mark that the src ring is 1483 * allocated locally 1484 */ 1485 malloc_src_ring = true; 1486 1487 /* 1488 * Also allocate a shadow src ring in 1489 * regular mem to use for faster access. 1490 */ 1491 src_ring->shadow_base_unaligned = 1492 qdf_mem_malloc(nentries * 1493 sizeof(struct CE_src_desc) + 1494 CE_DESC_RING_ALIGN); 1495 if (!src_ring->shadow_base_unaligned) 1496 goto error_no_dma_mem; 1497 1498 src_ring->shadow_base = (struct CE_src_desc *) 1499 (((size_t) src_ring->shadow_base_unaligned + 1500 CE_DESC_RING_ALIGN - 1) & 1501 ~(CE_DESC_RING_ALIGN - 1)); 1502 1503 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 1504 src_ring, attr); 1505 if (status < 0) 1506 goto error_target_access; 1507 1508 ce_ring_test_initial_indexes(CE_id, src_ring, 1509 "src_ring"); 1510 } 1511 } 1512 1513 /* destination ring setup */ 1514 nentries = attr->dest_nentries; 1515 if (nentries) { 1516 struct CE_ring_state *dest_ring; 1517 1518 nentries = roundup_pwr2(nentries); 1519 if (CE_state->dest_ring) { 1520 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 1521 } else { 1522 dest_ring = CE_state->dest_ring = 1523 ce_alloc_ring_state(CE_state, 1524 CE_RING_DEST, 1525 nentries); 1526 if (!dest_ring) { 1527 /* cannot allocate dst ring. If the CE_state 1528 * or src ring is allocated locally free 1529 * CE_State and src ring and return error. 1530 */ 1531 HIF_ERROR("%s: dest ring has no mem", 1532 __func__); 1533 goto error_no_dma_mem; 1534 } 1535 1536 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 1537 dest_ring, attr); 1538 if (status < 0) 1539 goto error_target_access; 1540 1541 ce_ring_test_initial_indexes(CE_id, dest_ring, 1542 "dest_ring"); 1543 1544 /* For srng based target, init status ring here */ 1545 if (ce_srng_based(CE_state->scn)) { 1546 CE_state->status_ring = 1547 ce_alloc_ring_state(CE_state, 1548 CE_RING_STATUS, 1549 nentries); 1550 if (CE_state->status_ring == NULL) { 1551 /*Allocation failed. Cleanup*/ 1552 qdf_mem_free(CE_state->dest_ring); 1553 if (malloc_src_ring) { 1554 qdf_mem_free 1555 (CE_state->src_ring); 1556 CE_state->src_ring = NULL; 1557 malloc_src_ring = false; 1558 } 1559 if (malloc_CE_state) { 1560 /* allocated CE_state locally */ 1561 scn->ce_id_to_state[CE_id] = 1562 NULL; 1563 qdf_mem_free(CE_state); 1564 malloc_CE_state = false; 1565 } 1566 1567 return NULL; 1568 } 1569 1570 status = ce_ring_setup(scn, CE_RING_STATUS, 1571 CE_id, CE_state->status_ring, 1572 attr); 1573 if (status < 0) 1574 goto error_target_access; 1575 1576 } 1577 1578 /* epping */ 1579 /* poll timer */ 1580 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 1581 qdf_timer_init(scn->qdf_dev, 1582 &CE_state->poll_timer, 1583 ce_poll_timeout, 1584 CE_state, 1585 QDF_TIMER_TYPE_WAKE_APPS); 1586 ce_enable_polling(CE_state); 1587 qdf_timer_mod(&CE_state->poll_timer, 1588 CE_POLL_TIMEOUT); 1589 } 1590 } 1591 } 1592 1593 if (!ce_srng_based(scn)) { 1594 /* Enable CE error interrupts */ 1595 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1596 goto error_target_access; 1597 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 1598 if (Q_TARGET_ACCESS_END(scn) < 0) 1599 goto error_target_access; 1600 } 1601 1602 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 1603 ce_oom_recovery, CE_state); 1604 1605 /* update the htt_data attribute */ 1606 ce_mark_datapath(CE_state); 1607 scn->ce_id_to_state[CE_id] = CE_state; 1608 1609 alloc_mem_ce_debug_history(scn, CE_id); 1610 1611 return (struct CE_handle *)CE_state; 1612 1613 error_target_access: 1614 error_no_dma_mem: 1615 ce_fini((struct CE_handle *)CE_state); 1616 return NULL; 1617 } 1618 1619 /** 1620 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 1621 * @hif_ctx: HIF Context 1622 * 1623 * API to check if polling is enabled on all CEs. Returns true when polling 1624 * is enabled on all CEs. 1625 * 1626 * Return: bool 1627 */ 1628 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 1629 { 1630 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1631 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1632 struct CE_attr *attr; 1633 int id; 1634 1635 for (id = 0; id < scn->ce_count; id++) { 1636 attr = &hif_state->host_ce_config[id]; 1637 if (attr && (attr->dest_nentries) && 1638 !(attr->flags & CE_ATTR_ENABLE_POLL)) 1639 return false; 1640 } 1641 return true; 1642 } 1643 qdf_export_symbol(hif_is_polled_mode_enabled); 1644 1645 #ifdef WLAN_FEATURE_FASTPATH 1646 /** 1647 * hif_enable_fastpath() Update that we have enabled fastpath mode 1648 * @hif_ctx: HIF context 1649 * 1650 * For use in data path 1651 * 1652 * Retrun: void 1653 */ 1654 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 1655 { 1656 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1657 1658 if (ce_srng_based(scn)) { 1659 HIF_INFO("%s, srng rings do not support fastpath", __func__); 1660 return; 1661 } 1662 HIF_DBG("%s, Enabling fastpath mode", __func__); 1663 scn->fastpath_mode_on = true; 1664 } 1665 1666 /** 1667 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 1668 * @hif_ctx: HIF Context 1669 * 1670 * For use in data path to skip HTC 1671 * 1672 * Return: bool 1673 */ 1674 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 1675 { 1676 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1677 1678 return scn->fastpath_mode_on; 1679 } 1680 1681 /** 1682 * hif_get_ce_handle - API to get CE handle for FastPath mode 1683 * @hif_ctx: HIF Context 1684 * @id: CopyEngine Id 1685 * 1686 * API to return CE handle for fastpath mode 1687 * 1688 * Return: void 1689 */ 1690 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 1691 { 1692 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1693 1694 return scn->ce_id_to_state[id]; 1695 } 1696 qdf_export_symbol(hif_get_ce_handle); 1697 1698 /** 1699 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 1700 * No processing is required inside this function. 1701 * @ce_hdl: Cope engine handle 1702 * Using an assert, this function makes sure that, 1703 * the TX CE has been processed completely. 1704 * 1705 * This is called while dismantling CE structures. No other thread 1706 * should be using these structures while dismantling is occurring 1707 * therfore no locking is needed. 1708 * 1709 * Return: none 1710 */ 1711 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 1712 { 1713 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1714 struct CE_ring_state *src_ring = ce_state->src_ring; 1715 struct hif_softc *sc = ce_state->scn; 1716 uint32_t sw_index, write_index; 1717 1718 if (hif_is_nss_wifi_enabled(sc)) 1719 return; 1720 1721 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 1722 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", 1723 __func__, __LINE__); 1724 sw_index = src_ring->sw_index; 1725 write_index = src_ring->sw_index; 1726 1727 /* At this point Tx CE should be clean */ 1728 qdf_assert_always(sw_index == write_index); 1729 } 1730 } 1731 1732 /** 1733 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 1734 * @ce_hdl: Handle to CE 1735 * 1736 * These buffers are never allocated on the fly, but 1737 * are allocated only once during HIF start and freed 1738 * only once during HIF stop. 1739 * NOTE: 1740 * The assumption here is there is no in-flight DMA in progress 1741 * currently, so that buffers can be freed up safely. 1742 * 1743 * Return: NONE 1744 */ 1745 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 1746 { 1747 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1748 struct CE_ring_state *dst_ring = ce_state->dest_ring; 1749 qdf_nbuf_t nbuf; 1750 int i; 1751 1752 if (ce_state->scn->fastpath_mode_on == false) 1753 return; 1754 1755 if (!ce_state->htt_rx_data) 1756 return; 1757 1758 /* 1759 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 1760 * this CE is completely full: does not leave one blank space, to 1761 * distinguish between empty queue & full queue. So free all the 1762 * entries. 1763 */ 1764 for (i = 0; i < dst_ring->nentries; i++) { 1765 nbuf = dst_ring->per_transfer_context[i]; 1766 1767 /* 1768 * The reasons for doing this check are: 1769 * 1) Protect against calling cleanup before allocating buffers 1770 * 2) In a corner case, FASTPATH_mode_on may be set, but we 1771 * could have a partially filled ring, because of a memory 1772 * allocation failure in the middle of allocating ring. 1773 * This check accounts for that case, checking 1774 * fastpath_mode_on flag or started flag would not have 1775 * covered that case. This is not in performance path, 1776 * so OK to do this. 1777 */ 1778 if (nbuf) { 1779 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 1780 QDF_DMA_FROM_DEVICE); 1781 qdf_nbuf_free(nbuf); 1782 } 1783 } 1784 } 1785 1786 /** 1787 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 1788 * @scn: HIF handle 1789 * 1790 * Datapath Rx CEs are special case, where we reuse all the message buffers. 1791 * Hence we have to post all the entries in the pipe, even, in the beginning 1792 * unlike for other CE pipes where one less than dest_nentries are filled in 1793 * the beginning. 1794 * 1795 * Return: None 1796 */ 1797 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1798 { 1799 int pipe_num; 1800 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1801 1802 if (scn->fastpath_mode_on == false) 1803 return; 1804 1805 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 1806 struct HIF_CE_pipe_info *pipe_info = 1807 &hif_state->pipe_info[pipe_num]; 1808 struct CE_state *ce_state = 1809 scn->ce_id_to_state[pipe_info->pipe_num]; 1810 1811 if (ce_state->htt_rx_data) 1812 atomic_inc(&pipe_info->recv_bufs_needed); 1813 } 1814 } 1815 #else 1816 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1817 { 1818 } 1819 1820 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 1821 { 1822 return false; 1823 } 1824 #endif /* WLAN_FEATURE_FASTPATH */ 1825 1826 void ce_fini(struct CE_handle *copyeng) 1827 { 1828 struct CE_state *CE_state = (struct CE_state *)copyeng; 1829 unsigned int CE_id = CE_state->id; 1830 struct hif_softc *scn = CE_state->scn; 1831 uint32_t desc_size; 1832 1833 bool inited = CE_state->timer_inited; 1834 CE_state->state = CE_UNUSED; 1835 scn->ce_id_to_state[CE_id] = NULL; 1836 /* Set the flag to false first to stop processing in ce_poll_timeout */ 1837 ce_disable_polling(CE_state); 1838 1839 qdf_lro_deinit(CE_state->lro_data); 1840 1841 if (CE_state->src_ring) { 1842 /* Cleanup the datapath Tx ring */ 1843 ce_h2t_tx_ce_cleanup(copyeng); 1844 1845 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 1846 if (CE_state->src_ring->shadow_base_unaligned) 1847 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 1848 if (CE_state->src_ring->base_addr_owner_space_unaligned) 1849 ce_free_desc_ring(scn, CE_state->id, 1850 CE_state->src_ring, 1851 desc_size); 1852 qdf_mem_free(CE_state->src_ring); 1853 } 1854 if (CE_state->dest_ring) { 1855 /* Cleanup the datapath Rx ring */ 1856 ce_t2h_msg_ce_cleanup(copyeng); 1857 1858 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 1859 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 1860 ce_free_desc_ring(scn, CE_state->id, 1861 CE_state->dest_ring, 1862 desc_size); 1863 qdf_mem_free(CE_state->dest_ring); 1864 1865 /* epping */ 1866 if (inited) { 1867 qdf_timer_free(&CE_state->poll_timer); 1868 } 1869 } 1870 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 1871 /* Cleanup the datapath Tx ring */ 1872 ce_h2t_tx_ce_cleanup(copyeng); 1873 1874 if (CE_state->status_ring->shadow_base_unaligned) 1875 qdf_mem_free( 1876 CE_state->status_ring->shadow_base_unaligned); 1877 1878 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 1879 if (CE_state->status_ring->base_addr_owner_space_unaligned) 1880 ce_free_desc_ring(scn, CE_state->id, 1881 CE_state->status_ring, 1882 desc_size); 1883 qdf_mem_free(CE_state->status_ring); 1884 } 1885 1886 free_mem_ce_debug_history(scn, CE_id); 1887 reset_ce_debug_history(scn); 1888 ce_deinit_ce_desc_event_log(scn, CE_id); 1889 1890 qdf_spinlock_destroy(&CE_state->ce_index_lock); 1891 qdf_mem_free(CE_state); 1892 } 1893 1894 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 1895 { 1896 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1897 1898 qdf_mem_zero(&hif_state->msg_callbacks_pending, 1899 sizeof(hif_state->msg_callbacks_pending)); 1900 qdf_mem_zero(&hif_state->msg_callbacks_current, 1901 sizeof(hif_state->msg_callbacks_current)); 1902 } 1903 1904 /* Send the first nbytes bytes of the buffer */ 1905 QDF_STATUS 1906 hif_send_head(struct hif_opaque_softc *hif_ctx, 1907 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 1908 qdf_nbuf_t nbuf, unsigned int data_attr) 1909 { 1910 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1911 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1912 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1913 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 1914 int bytes = nbytes, nfrags = 0; 1915 struct ce_sendlist sendlist; 1916 int status, i = 0; 1917 unsigned int mux_id = 0; 1918 1919 if (nbytes > qdf_nbuf_len(nbuf)) { 1920 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes, 1921 (uint32_t)qdf_nbuf_len(nbuf)); 1922 QDF_ASSERT(0); 1923 } 1924 1925 transfer_id = 1926 (mux_id & MUX_ID_MASK) | 1927 (transfer_id & TRANSACTION_ID_MASK); 1928 data_attr &= DESC_DATA_FLAG_MASK; 1929 /* 1930 * The common case involves sending multiple fragments within a 1931 * single download (the tx descriptor and the tx frame header). 1932 * So, optimize for the case of multiple fragments by not even 1933 * checking whether it's necessary to use a sendlist. 1934 * The overhead of using a sendlist for a single buffer download 1935 * is not a big deal, since it happens rarely (for WMI messages). 1936 */ 1937 ce_sendlist_init(&sendlist); 1938 do { 1939 qdf_dma_addr_t frag_paddr; 1940 int frag_bytes; 1941 1942 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 1943 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 1944 /* 1945 * Clear the packet offset for all but the first CE desc. 1946 */ 1947 if (i++ > 0) 1948 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 1949 1950 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 1951 frag_bytes > 1952 bytes ? bytes : frag_bytes, 1953 qdf_nbuf_get_frag_is_wordstream 1954 (nbuf, 1955 nfrags) ? 0 : 1956 CE_SEND_FLAG_SWAP_DISABLE, 1957 data_attr); 1958 if (status != QDF_STATUS_SUCCESS) { 1959 HIF_ERROR("%s: error, frag_num %d larger than limit", 1960 __func__, nfrags); 1961 return status; 1962 } 1963 bytes -= frag_bytes; 1964 nfrags++; 1965 } while (bytes > 0); 1966 1967 /* Make sure we have resources to handle this request */ 1968 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 1969 if (pipe_info->num_sends_allowed < nfrags) { 1970 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1971 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 1972 return QDF_STATUS_E_RESOURCES; 1973 } 1974 pipe_info->num_sends_allowed -= nfrags; 1975 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 1976 1977 if (qdf_unlikely(ce_hdl == NULL)) { 1978 HIF_ERROR("%s: error CE handle is null", __func__); 1979 return A_ERROR; 1980 } 1981 1982 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 1983 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 1984 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 1985 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 1986 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 1987 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 1988 1989 return status; 1990 } 1991 1992 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 1993 int force) 1994 { 1995 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1996 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1997 1998 if (!force) { 1999 int resources; 2000 /* 2001 * Decide whether to actually poll for completions, or just 2002 * wait for a later chance. If there seem to be plenty of 2003 * resources left, then just wait, since checking involves 2004 * reading a CE register, which is a relatively expensive 2005 * operation. 2006 */ 2007 resources = hif_get_free_queue_number(hif_ctx, pipe); 2008 /* 2009 * If at least 50% of the total resources are still available, 2010 * don't bother checking again yet. 2011 */ 2012 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 2013 1)) 2014 return; 2015 } 2016 #if ATH_11AC_TXCOMPACT 2017 ce_per_engine_servicereap(scn, pipe); 2018 #else 2019 ce_per_engine_service(scn, pipe); 2020 #endif 2021 } 2022 2023 uint16_t 2024 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2025 { 2026 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2027 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2028 uint16_t rv; 2029 2030 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2031 rv = pipe_info->num_sends_allowed; 2032 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2033 return rv; 2034 } 2035 2036 /* Called by lower (CE) layer when a send to Target completes. */ 2037 static void 2038 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 2039 void *transfer_context, qdf_dma_addr_t CE_data, 2040 unsigned int nbytes, unsigned int transfer_id, 2041 unsigned int sw_index, unsigned int hw_index, 2042 unsigned int toeplitz_hash_result) 2043 { 2044 struct HIF_CE_pipe_info *pipe_info = 2045 (struct HIF_CE_pipe_info *)ce_context; 2046 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2047 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2048 unsigned int sw_idx = sw_index, hw_idx = hw_index; 2049 struct hif_msg_callbacks *msg_callbacks = 2050 &pipe_info->pipe_callbacks; 2051 2052 do { 2053 /* 2054 * The upper layer callback will be triggered 2055 * when last fragment is complteted. 2056 */ 2057 if (transfer_context != CE_SENDLIST_ITEM_CTXT) { 2058 if (scn->target_status == TARGET_STATUS_RESET) { 2059 2060 qdf_nbuf_unmap_single(scn->qdf_dev, 2061 transfer_context, 2062 QDF_DMA_TO_DEVICE); 2063 qdf_nbuf_free(transfer_context); 2064 } else 2065 msg_callbacks->txCompletionHandler( 2066 msg_callbacks->Context, 2067 transfer_context, transfer_id, 2068 toeplitz_hash_result); 2069 } 2070 2071 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2072 pipe_info->num_sends_allowed++; 2073 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2074 } while (ce_completed_send_next(copyeng, 2075 &ce_context, &transfer_context, 2076 &CE_data, &nbytes, &transfer_id, 2077 &sw_idx, &hw_idx, 2078 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 2079 } 2080 2081 /** 2082 * hif_ce_do_recv(): send message from copy engine to upper layers 2083 * @msg_callbacks: structure containing callback and callback context 2084 * @netbuff: skb containing message 2085 * @nbytes: number of bytes in the message 2086 * @pipe_info: used for the pipe_number info 2087 * 2088 * Checks the packet length, configures the length in the netbuff, 2089 * and calls the upper layer callback. 2090 * 2091 * return: None 2092 */ 2093 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 2094 qdf_nbuf_t netbuf, int nbytes, 2095 struct HIF_CE_pipe_info *pipe_info) { 2096 if (nbytes <= pipe_info->buf_sz) { 2097 qdf_nbuf_set_pktlen(netbuf, nbytes); 2098 msg_callbacks-> 2099 rxCompletionHandler(msg_callbacks->Context, 2100 netbuf, pipe_info->pipe_num); 2101 } else { 2102 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", 2103 __func__, netbuf, nbytes); 2104 2105 qdf_nbuf_free(netbuf); 2106 } 2107 } 2108 2109 /* Called by lower (CE) layer when data is received from the Target. */ 2110 static void 2111 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 2112 void *transfer_context, qdf_dma_addr_t CE_data, 2113 unsigned int nbytes, unsigned int transfer_id, 2114 unsigned int flags) 2115 { 2116 struct HIF_CE_pipe_info *pipe_info = 2117 (struct HIF_CE_pipe_info *)ce_context; 2118 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2119 struct CE_state *ce_state = (struct CE_state *) copyeng; 2120 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2121 #ifdef HIF_PCI 2122 struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state); 2123 #endif 2124 struct hif_msg_callbacks *msg_callbacks = 2125 &pipe_info->pipe_callbacks; 2126 2127 do { 2128 #ifdef HIF_PCI 2129 hif_pm_runtime_mark_last_busy(hif_pci_sc->dev); 2130 #endif 2131 qdf_nbuf_unmap_single(scn->qdf_dev, 2132 (qdf_nbuf_t) transfer_context, 2133 QDF_DMA_FROM_DEVICE); 2134 2135 atomic_inc(&pipe_info->recv_bufs_needed); 2136 hif_post_recv_buffers_for_pipe(pipe_info); 2137 if (scn->target_status == TARGET_STATUS_RESET) 2138 qdf_nbuf_free(transfer_context); 2139 else 2140 hif_ce_do_recv(msg_callbacks, transfer_context, 2141 nbytes, pipe_info); 2142 2143 /* Set up force_break flag if num of receices reaches 2144 * MAX_NUM_OF_RECEIVES 2145 */ 2146 ce_state->receive_count++; 2147 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 2148 ce_state->force_break = 1; 2149 break; 2150 } 2151 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 2152 &CE_data, &nbytes, &transfer_id, 2153 &flags) == QDF_STATUS_SUCCESS); 2154 2155 } 2156 2157 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 2158 2159 void 2160 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 2161 struct hif_msg_callbacks *callbacks) 2162 { 2163 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2164 2165 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 2166 spin_lock_init(&pcie_access_log_lock); 2167 #endif 2168 /* Save callbacks for later installation */ 2169 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 2170 sizeof(hif_state->msg_callbacks_pending)); 2171 2172 } 2173 2174 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 2175 { 2176 struct CE_handle *ce_diag = hif_state->ce_diag; 2177 int pipe_num; 2178 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2179 struct hif_msg_callbacks *hif_msg_callbacks = 2180 &hif_state->msg_callbacks_current; 2181 2182 /* daemonize("hif_compl_thread"); */ 2183 2184 if (scn->ce_count == 0) { 2185 HIF_ERROR("%s: Invalid ce_count", __func__); 2186 return -EINVAL; 2187 } 2188 2189 if (!hif_msg_callbacks || 2190 !hif_msg_callbacks->rxCompletionHandler || 2191 !hif_msg_callbacks->txCompletionHandler) { 2192 HIF_ERROR("%s: no completion handler registered", __func__); 2193 return -EFAULT; 2194 } 2195 2196 A_TARGET_ACCESS_LIKELY(scn); 2197 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2198 struct CE_attr attr; 2199 struct HIF_CE_pipe_info *pipe_info; 2200 2201 pipe_info = &hif_state->pipe_info[pipe_num]; 2202 if (pipe_info->ce_hdl == ce_diag) 2203 continue; /* Handle Diagnostic CE specially */ 2204 attr = hif_state->host_ce_config[pipe_num]; 2205 if (attr.src_nentries) { 2206 /* pipe used to send to target */ 2207 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", 2208 __func__, pipe_num, pipe_info); 2209 ce_send_cb_register(pipe_info->ce_hdl, 2210 hif_pci_ce_send_done, pipe_info, 2211 attr.flags & CE_ATTR_DISABLE_INTR); 2212 pipe_info->num_sends_allowed = attr.src_nentries - 1; 2213 } 2214 if (attr.dest_nentries) { 2215 /* pipe used to receive from target */ 2216 ce_recv_cb_register(pipe_info->ce_hdl, 2217 hif_pci_ce_recv_data, pipe_info, 2218 attr.flags & CE_ATTR_DISABLE_INTR); 2219 } 2220 2221 if (attr.src_nentries) 2222 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 2223 2224 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 2225 sizeof(pipe_info->pipe_callbacks)); 2226 } 2227 2228 A_TARGET_ACCESS_UNLIKELY(scn); 2229 return 0; 2230 } 2231 2232 /* 2233 * Install pending msg callbacks. 2234 * 2235 * TBDXXX: This hack is needed because upper layers install msg callbacks 2236 * for use with HTC before BMI is done; yet this HIF implementation 2237 * needs to continue to use BMI msg callbacks. Really, upper layers 2238 * should not register HTC callbacks until AFTER BMI phase. 2239 */ 2240 static void hif_msg_callbacks_install(struct hif_softc *scn) 2241 { 2242 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2243 2244 qdf_mem_copy(&hif_state->msg_callbacks_current, 2245 &hif_state->msg_callbacks_pending, 2246 sizeof(hif_state->msg_callbacks_pending)); 2247 } 2248 2249 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 2250 uint8_t *DLPipe) 2251 { 2252 int ul_is_polled, dl_is_polled; 2253 2254 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 2255 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 2256 } 2257 2258 /** 2259 * hif_dump_pipe_debug_count() - Log error count 2260 * @scn: hif_softc pointer. 2261 * 2262 * Output the pipe error counts of each pipe to log file 2263 * 2264 * Return: N/A 2265 */ 2266 void hif_dump_pipe_debug_count(struct hif_softc *scn) 2267 { 2268 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2269 int pipe_num; 2270 2271 if (hif_state == NULL) { 2272 HIF_ERROR("%s hif_state is NULL", __func__); 2273 return; 2274 } 2275 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2276 struct HIF_CE_pipe_info *pipe_info; 2277 2278 pipe_info = &hif_state->pipe_info[pipe_num]; 2279 2280 if (pipe_info->nbuf_alloc_err_count > 0 || 2281 pipe_info->nbuf_dma_err_count > 0 || 2282 pipe_info->nbuf_ce_enqueue_err_count) 2283 HIF_ERROR( 2284 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 2285 __func__, pipe_info->pipe_num, 2286 atomic_read(&pipe_info->recv_bufs_needed), 2287 pipe_info->nbuf_alloc_err_count, 2288 pipe_info->nbuf_dma_err_count, 2289 pipe_info->nbuf_ce_enqueue_err_count); 2290 } 2291 } 2292 2293 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 2294 void *nbuf, uint32_t *error_cnt, 2295 enum hif_ce_event_type failure_type, 2296 const char *failure_type_string) 2297 { 2298 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 2299 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 2300 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2301 int ce_id = CE_state->id; 2302 uint32_t error_cnt_tmp; 2303 2304 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2305 error_cnt_tmp = ++(*error_cnt); 2306 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2307 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", 2308 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 2309 failure_type_string); 2310 hif_record_ce_desc_event(scn, ce_id, failure_type, 2311 NULL, nbuf, bufs_needed_tmp, 0); 2312 /* if we fail to allocate the last buffer for an rx pipe, 2313 * there is no trigger to refill the ce and we will 2314 * eventually crash 2315 */ 2316 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) 2317 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 2318 2319 } 2320 2321 2322 2323 2324 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 2325 { 2326 struct CE_handle *ce_hdl; 2327 qdf_size_t buf_sz; 2328 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2329 QDF_STATUS status; 2330 uint32_t bufs_posted = 0; 2331 2332 buf_sz = pipe_info->buf_sz; 2333 if (buf_sz == 0) { 2334 /* Unused Copy Engine */ 2335 return QDF_STATUS_SUCCESS; 2336 } 2337 2338 ce_hdl = pipe_info->ce_hdl; 2339 2340 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2341 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 2342 qdf_dma_addr_t CE_data; /* CE space buffer address */ 2343 qdf_nbuf_t nbuf; 2344 2345 atomic_dec(&pipe_info->recv_bufs_needed); 2346 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2347 2348 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 2349 if (!nbuf) { 2350 hif_post_recv_buffers_failure(pipe_info, nbuf, 2351 &pipe_info->nbuf_alloc_err_count, 2352 HIF_RX_NBUF_ALLOC_FAILURE, 2353 "HIF_RX_NBUF_ALLOC_FAILURE"); 2354 return QDF_STATUS_E_NOMEM; 2355 } 2356 2357 /* 2358 * qdf_nbuf_peek_header(nbuf, &data, &unused); 2359 * CE_data = dma_map_single(dev, data, buf_sz, ); 2360 * DMA_FROM_DEVICE); 2361 */ 2362 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 2363 QDF_DMA_FROM_DEVICE); 2364 2365 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2366 hif_post_recv_buffers_failure(pipe_info, nbuf, 2367 &pipe_info->nbuf_dma_err_count, 2368 HIF_RX_NBUF_MAP_FAILURE, 2369 "HIF_RX_NBUF_MAP_FAILURE"); 2370 qdf_nbuf_free(nbuf); 2371 return status; 2372 } 2373 2374 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 2375 2376 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 2377 buf_sz, DMA_FROM_DEVICE); 2378 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 2379 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2380 hif_post_recv_buffers_failure(pipe_info, nbuf, 2381 &pipe_info->nbuf_ce_enqueue_err_count, 2382 HIF_RX_NBUF_ENQUEUE_FAILURE, 2383 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 2384 2385 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 2386 QDF_DMA_FROM_DEVICE); 2387 qdf_nbuf_free(nbuf); 2388 return status; 2389 } 2390 2391 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2392 bufs_posted++; 2393 } 2394 pipe_info->nbuf_alloc_err_count = 2395 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 2396 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 2397 pipe_info->nbuf_dma_err_count = 2398 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 2399 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 2400 pipe_info->nbuf_ce_enqueue_err_count = 2401 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 2402 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 2403 2404 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2405 2406 return QDF_STATUS_SUCCESS; 2407 } 2408 2409 /* 2410 * Try to post all desired receive buffers for all pipes. 2411 * Returns 0 for non fastpath rx copy engine as 2412 * oom_allocation_work will be scheduled to recover any 2413 * failures, non-zero if unable to completely replenish 2414 * receive buffers for fastpath rx Copy engine. 2415 */ 2416 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 2417 { 2418 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2419 int pipe_num; 2420 struct CE_state *ce_state = NULL; 2421 QDF_STATUS qdf_status; 2422 2423 A_TARGET_ACCESS_LIKELY(scn); 2424 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2425 struct HIF_CE_pipe_info *pipe_info; 2426 2427 ce_state = scn->ce_id_to_state[pipe_num]; 2428 pipe_info = &hif_state->pipe_info[pipe_num]; 2429 2430 if (hif_is_nss_wifi_enabled(scn) && 2431 ce_state && (ce_state->htt_rx_data)) 2432 continue; 2433 2434 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 2435 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 2436 ce_state->htt_rx_data && 2437 scn->fastpath_mode_on) { 2438 A_TARGET_ACCESS_UNLIKELY(scn); 2439 return qdf_status; 2440 } 2441 } 2442 2443 A_TARGET_ACCESS_UNLIKELY(scn); 2444 2445 return QDF_STATUS_SUCCESS; 2446 } 2447 2448 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 2449 { 2450 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2451 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2452 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2453 2454 hif_update_fastpath_recv_bufs_cnt(scn); 2455 2456 hif_msg_callbacks_install(scn); 2457 2458 if (hif_completion_thread_startup(hif_state)) 2459 return QDF_STATUS_E_FAILURE; 2460 2461 /* enable buffer cleanup */ 2462 hif_state->started = true; 2463 2464 /* Post buffers once to start things off. */ 2465 qdf_status = hif_post_recv_buffers(scn); 2466 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2467 /* cleanup is done in hif_ce_disable */ 2468 HIF_ERROR("%s:failed to post buffers", __func__); 2469 return qdf_status; 2470 } 2471 2472 return qdf_status; 2473 } 2474 2475 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2476 { 2477 struct hif_softc *scn; 2478 struct CE_handle *ce_hdl; 2479 uint32_t buf_sz; 2480 struct HIF_CE_state *hif_state; 2481 qdf_nbuf_t netbuf; 2482 qdf_dma_addr_t CE_data; 2483 void *per_CE_context; 2484 2485 buf_sz = pipe_info->buf_sz; 2486 /* Unused Copy Engine */ 2487 if (buf_sz == 0) 2488 return; 2489 2490 2491 hif_state = pipe_info->HIF_CE_state; 2492 if (!hif_state->started) 2493 return; 2494 2495 scn = HIF_GET_SOFTC(hif_state); 2496 ce_hdl = pipe_info->ce_hdl; 2497 2498 if (scn->qdf_dev == NULL) 2499 return; 2500 while (ce_revoke_recv_next 2501 (ce_hdl, &per_CE_context, (void **)&netbuf, 2502 &CE_data) == QDF_STATUS_SUCCESS) { 2503 if (netbuf) { 2504 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 2505 QDF_DMA_FROM_DEVICE); 2506 qdf_nbuf_free(netbuf); 2507 } 2508 } 2509 } 2510 2511 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2512 { 2513 struct CE_handle *ce_hdl; 2514 struct HIF_CE_state *hif_state; 2515 struct hif_softc *scn; 2516 qdf_nbuf_t netbuf; 2517 void *per_CE_context; 2518 qdf_dma_addr_t CE_data; 2519 unsigned int nbytes; 2520 unsigned int id; 2521 uint32_t buf_sz; 2522 uint32_t toeplitz_hash_result; 2523 2524 buf_sz = pipe_info->buf_sz; 2525 if (buf_sz == 0) { 2526 /* Unused Copy Engine */ 2527 return; 2528 } 2529 2530 hif_state = pipe_info->HIF_CE_state; 2531 if (!hif_state->started) { 2532 return; 2533 } 2534 2535 scn = HIF_GET_SOFTC(hif_state); 2536 2537 ce_hdl = pipe_info->ce_hdl; 2538 2539 while (ce_cancel_send_next 2540 (ce_hdl, &per_CE_context, 2541 (void **)&netbuf, &CE_data, &nbytes, 2542 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 2543 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 2544 /* 2545 * Packets enqueued by htt_h2t_ver_req_msg() and 2546 * htt_h2t_rx_ring_cfg_msg_ll() have already been 2547 * freed in htt_htc_misc_pkt_pool_free() in 2548 * wlantl_close(), so do not free them here again 2549 * by checking whether it's the endpoint 2550 * which they are queued in. 2551 */ 2552 if (id == scn->htc_htt_tx_endpoint) 2553 return; 2554 /* Indicate the completion to higher 2555 * layer to free the buffer 2556 */ 2557 if (pipe_info->pipe_callbacks.txCompletionHandler) 2558 pipe_info->pipe_callbacks. 2559 txCompletionHandler(pipe_info-> 2560 pipe_callbacks.Context, 2561 netbuf, id, toeplitz_hash_result); 2562 } 2563 } 2564 } 2565 2566 /* 2567 * Cleanup residual buffers for device shutdown: 2568 * buffers that were enqueued for receive 2569 * buffers that were to be sent 2570 * Note: Buffers that had completed but which were 2571 * not yet processed are on a completion queue. They 2572 * are handled when the completion thread shuts down. 2573 */ 2574 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 2575 { 2576 int pipe_num; 2577 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2578 struct CE_state *ce_state; 2579 2580 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2581 struct HIF_CE_pipe_info *pipe_info; 2582 2583 ce_state = scn->ce_id_to_state[pipe_num]; 2584 if (hif_is_nss_wifi_enabled(scn) && ce_state && 2585 ((ce_state->htt_tx_data) || 2586 (ce_state->htt_rx_data))) { 2587 continue; 2588 } 2589 2590 pipe_info = &hif_state->pipe_info[pipe_num]; 2591 hif_recv_buffer_cleanup_on_pipe(pipe_info); 2592 hif_send_buffer_cleanup_on_pipe(pipe_info); 2593 } 2594 } 2595 2596 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 2597 { 2598 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2599 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2600 2601 hif_buffer_cleanup(hif_state); 2602 } 2603 2604 static void hif_destroy_oom_work(struct hif_softc *scn) 2605 { 2606 struct CE_state *ce_state; 2607 int ce_id; 2608 2609 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2610 ce_state = scn->ce_id_to_state[ce_id]; 2611 if (ce_state) 2612 qdf_destroy_work(scn->qdf_dev, 2613 &ce_state->oom_allocation_work); 2614 } 2615 } 2616 2617 void hif_ce_stop(struct hif_softc *scn) 2618 { 2619 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2620 int pipe_num; 2621 2622 /* 2623 * before cleaning up any memory, ensure irq & 2624 * bottom half contexts will not be re-entered 2625 */ 2626 hif_disable_isr(&scn->osc); 2627 hif_destroy_oom_work(scn); 2628 scn->hif_init_done = false; 2629 2630 /* 2631 * At this point, asynchronous threads are stopped, 2632 * The Target should not DMA nor interrupt, Host code may 2633 * not initiate anything more. So we just need to clean 2634 * up Host-side state. 2635 */ 2636 2637 if (scn->athdiag_procfs_inited) { 2638 athdiag_procfs_remove(); 2639 scn->athdiag_procfs_inited = false; 2640 } 2641 2642 hif_buffer_cleanup(hif_state); 2643 2644 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2645 struct HIF_CE_pipe_info *pipe_info; 2646 struct CE_attr attr; 2647 struct CE_handle *ce_diag = hif_state->ce_diag; 2648 2649 pipe_info = &hif_state->pipe_info[pipe_num]; 2650 if (pipe_info->ce_hdl) { 2651 if (pipe_info->ce_hdl != ce_diag) { 2652 attr = hif_state->host_ce_config[pipe_num]; 2653 if (attr.src_nentries) 2654 qdf_spinlock_destroy(&pipe_info-> 2655 completion_freeq_lock); 2656 } 2657 ce_fini(pipe_info->ce_hdl); 2658 pipe_info->ce_hdl = NULL; 2659 pipe_info->buf_sz = 0; 2660 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2661 } 2662 } 2663 2664 if (hif_state->sleep_timer_init) { 2665 qdf_timer_stop(&hif_state->sleep_timer); 2666 qdf_timer_free(&hif_state->sleep_timer); 2667 hif_state->sleep_timer_init = false; 2668 } 2669 2670 hif_state->started = false; 2671 } 2672 2673 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 2674 struct shadow_reg_cfg 2675 **target_shadow_reg_cfg_ret, 2676 uint32_t *shadow_cfg_sz_ret) 2677 { 2678 if (target_shadow_reg_cfg_ret) 2679 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 2680 if (shadow_cfg_sz_ret) 2681 *shadow_cfg_sz_ret = shadow_cfg_sz; 2682 } 2683 2684 /** 2685 * hif_get_target_ce_config() - get copy engine configuration 2686 * @target_ce_config_ret: basic copy engine configuration 2687 * @target_ce_config_sz_ret: size of the basic configuration in bytes 2688 * @target_service_to_ce_map_ret: service mapping for the copy engines 2689 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 2690 * @target_shadow_reg_cfg_ret: shadow register configuration 2691 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 2692 * 2693 * providing accessor to these values outside of this file. 2694 * currently these are stored in static pointers to const sections. 2695 * there are multiple configurations that are selected from at compile time. 2696 * Runtime selection would need to consider mode, target type and bus type. 2697 * 2698 * Return: return by parameter. 2699 */ 2700 void hif_get_target_ce_config(struct hif_softc *scn, 2701 struct CE_pipe_config **target_ce_config_ret, 2702 uint32_t *target_ce_config_sz_ret, 2703 struct service_to_pipe **target_service_to_ce_map_ret, 2704 uint32_t *target_service_to_ce_map_sz_ret, 2705 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 2706 uint32_t *shadow_cfg_sz_ret) 2707 { 2708 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2709 2710 *target_ce_config_ret = hif_state->target_ce_config; 2711 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 2712 2713 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 2714 target_service_to_ce_map_sz_ret); 2715 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 2716 shadow_cfg_sz_ret); 2717 } 2718 2719 #ifdef CONFIG_SHADOW_V2 2720 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2721 { 2722 int i; 2723 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2724 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); 2725 2726 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 2727 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2728 "%s: i %d, val %x", __func__, i, 2729 cfg->shadow_reg_v2_cfg[i].addr); 2730 } 2731 } 2732 2733 #else 2734 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2735 { 2736 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2737 "%s: CONFIG_SHADOW_V2 not defined", __func__); 2738 } 2739 #endif 2740 2741 #ifdef ADRASTEA_RRI_ON_DDR 2742 /** 2743 * hif_get_src_ring_read_index(): Called to get the SRRI 2744 * 2745 * @scn: hif_softc pointer 2746 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 2747 * 2748 * This function returns the SRRI to the caller. For CEs that 2749 * dont have interrupts enabled, we look at the DDR based SRRI 2750 * 2751 * Return: SRRI 2752 */ 2753 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 2754 uint32_t CE_ctrl_addr) 2755 { 2756 struct CE_attr attr; 2757 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2758 2759 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 2760 if (attr.flags & CE_ATTR_DISABLE_INTR) { 2761 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 2762 } else { 2763 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 2764 return A_TARGET_READ(scn, 2765 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 2766 else 2767 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 2768 CE_ctrl_addr); 2769 } 2770 } 2771 2772 /** 2773 * hif_get_dst_ring_read_index(): Called to get the DRRI 2774 * 2775 * @scn: hif_softc pointer 2776 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 2777 * 2778 * This function returns the DRRI to the caller. For CEs that 2779 * dont have interrupts enabled, we look at the DDR based DRRI 2780 * 2781 * Return: DRRI 2782 */ 2783 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 2784 uint32_t CE_ctrl_addr) 2785 { 2786 struct CE_attr attr; 2787 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2788 2789 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 2790 2791 if (attr.flags & CE_ATTR_DISABLE_INTR) { 2792 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 2793 } else { 2794 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 2795 return A_TARGET_READ(scn, 2796 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 2797 else 2798 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 2799 CE_ctrl_addr); 2800 } 2801 } 2802 2803 /** 2804 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr 2805 * @scn: hif_softc pointer 2806 * 2807 * Return: qdf status 2808 */ 2809 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn) 2810 { 2811 qdf_dma_addr_t paddr_rri_on_ddr = 0; 2812 2813 scn->vaddr_rri_on_ddr = 2814 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 2815 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)), 2816 &paddr_rri_on_ddr); 2817 2818 if (!scn->vaddr_rri_on_ddr) { 2819 hif_err("dmaable page alloc fail"); 2820 return QDF_STATUS_E_NOMEM; 2821 } 2822 2823 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 2824 2825 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t)); 2826 2827 return QDF_STATUS_SUCCESS; 2828 } 2829 #endif 2830 2831 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR) 2832 /** 2833 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 2834 * 2835 * @scn: hif_softc pointer 2836 * 2837 * This function allocates non cached memory on ddr and sends 2838 * the physical address of this memory to the CE hardware. The 2839 * hardware updates the RRI on this particular location. 2840 * 2841 * Return: None 2842 */ 2843 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 2844 { 2845 unsigned int i; 2846 uint32_t high_paddr, low_paddr; 2847 2848 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 2849 return; 2850 2851 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr); 2852 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr); 2853 2854 HIF_DBG("%s using srri and drri from DDR", __func__); 2855 2856 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 2857 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 2858 2859 for (i = 0; i < CE_COUNT; i++) 2860 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 2861 } 2862 #else 2863 /** 2864 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 2865 * 2866 * @scn: hif_softc pointer 2867 * 2868 * This is a dummy implementation for platforms that don't 2869 * support this functionality. 2870 * 2871 * Return: None 2872 */ 2873 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 2874 { 2875 } 2876 #endif 2877 2878 /** 2879 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for 2880 * QMI command 2881 * @scn: hif context 2882 * @cfg: wlan enable config 2883 * 2884 * In case of Genoa, rri_over_ddr memory configuration is passed 2885 * to firmware through QMI configure command. 2886 */ 2887 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR) 2888 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 2889 struct pld_wlan_enable_cfg *cfg) 2890 { 2891 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 2892 return; 2893 2894 cfg->rri_over_ddr_cfg_valid = true; 2895 cfg->rri_over_ddr_cfg.base_addr_low = 2896 BITS0_TO_31(scn->paddr_rri_on_ddr); 2897 cfg->rri_over_ddr_cfg.base_addr_high = 2898 BITS32_TO_35(scn->paddr_rri_on_ddr); 2899 } 2900 #else 2901 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 2902 struct pld_wlan_enable_cfg *cfg) 2903 { 2904 } 2905 #endif 2906 2907 /** 2908 * hif_wlan_enable(): call the platform driver to enable wlan 2909 * @scn: HIF Context 2910 * 2911 * This function passes the con_mode and CE configuration to 2912 * platform driver to enable wlan. 2913 * 2914 * Return: linux error code 2915 */ 2916 int hif_wlan_enable(struct hif_softc *scn) 2917 { 2918 struct pld_wlan_enable_cfg cfg; 2919 enum pld_driver_mode mode; 2920 uint32_t con_mode = hif_get_conparam(scn); 2921 2922 hif_get_target_ce_config(scn, 2923 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 2924 &cfg.num_ce_tgt_cfg, 2925 (struct service_to_pipe **)&cfg.ce_svc_cfg, 2926 &cfg.num_ce_svc_pipe_cfg, 2927 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 2928 &cfg.num_shadow_reg_cfg); 2929 2930 /* translate from structure size to array size */ 2931 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 2932 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 2933 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 2934 2935 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, 2936 &cfg.num_shadow_reg_v2_cfg); 2937 2938 hif_print_hal_shadow_register_cfg(&cfg); 2939 2940 hif_update_rri_over_ddr_config(scn, &cfg); 2941 2942 if (QDF_GLOBAL_FTM_MODE == con_mode) 2943 mode = PLD_FTM; 2944 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 2945 mode = PLD_COLDBOOT_CALIBRATION; 2946 else if (QDF_IS_EPPING_ENABLED(con_mode)) 2947 mode = PLD_EPPING; 2948 else 2949 mode = PLD_MISSION; 2950 2951 if (BYPASS_QMI) 2952 return 0; 2953 else 2954 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, 2955 mode, QWLAN_VERSIONSTR); 2956 } 2957 2958 #ifdef WLAN_FEATURE_EPPING 2959 2960 #define CE_EPPING_USES_IRQ true 2961 2962 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) 2963 { 2964 if (CE_EPPING_USES_IRQ) 2965 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 2966 else 2967 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 2968 hif_state->target_ce_config = target_ce_config_wlan_epping; 2969 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 2970 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 2971 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 2972 } 2973 #endif 2974 2975 #ifdef QCN7605_SUPPORT 2976 static inline 2977 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 2978 struct HIF_CE_state *hif_state) 2979 { 2980 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 2981 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 2982 hif_state->target_ce_config_sz = 2983 sizeof(target_ce_config_wlan_qcn7605); 2984 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605; 2985 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605); 2986 scn->ce_count = QCN7605_CE_COUNT; 2987 } 2988 #else 2989 static inline 2990 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 2991 struct HIF_CE_state *hif_state) 2992 { 2993 HIF_ERROR("QCN7605 not supported"); 2994 } 2995 #endif 2996 2997 #ifdef CE_SVC_CMN_INIT 2998 #ifdef QCA_WIFI_SUPPORT_SRNG 2999 static inline void hif_ce_service_init(void) 3000 { 3001 ce_service_srng_init(); 3002 } 3003 #else 3004 static inline void hif_ce_service_init(void) 3005 { 3006 ce_service_legacy_init(); 3007 } 3008 #endif 3009 #else 3010 static inline void hif_ce_service_init(void) 3011 { 3012 } 3013 #endif 3014 3015 3016 /** 3017 * hif_ce_prepare_config() - load the correct static tables. 3018 * @scn: hif context 3019 * 3020 * Epping uses different static attribute tables than mission mode. 3021 */ 3022 void hif_ce_prepare_config(struct hif_softc *scn) 3023 { 3024 uint32_t mode = hif_get_conparam(scn); 3025 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3026 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 3027 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3028 3029 hif_ce_service_init(); 3030 hif_state->ce_services = ce_services_attach(scn); 3031 3032 scn->ce_count = HOST_CE_COUNT; 3033 /* if epping is enabled we need to use the epping configuration. */ 3034 if (QDF_IS_EPPING_ENABLED(mode)) { 3035 hif_ce_prepare_epping_config(hif_state); 3036 return; 3037 } 3038 3039 switch (tgt_info->target_type) { 3040 default: 3041 hif_state->host_ce_config = host_ce_config_wlan; 3042 hif_state->target_ce_config = target_ce_config_wlan; 3043 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 3044 break; 3045 case TARGET_TYPE_QCN7605: 3046 hif_set_ce_config_qcn7605(scn, hif_state); 3047 break; 3048 case TARGET_TYPE_AR900B: 3049 case TARGET_TYPE_QCA9984: 3050 case TARGET_TYPE_IPQ4019: 3051 case TARGET_TYPE_QCA9888: 3052 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 3053 hif_state->host_ce_config = 3054 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 3055 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 3056 hif_state->host_ce_config = 3057 host_lowdesc_ce_cfg_wlan_ar900b; 3058 } else { 3059 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 3060 } 3061 3062 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 3063 hif_state->target_ce_config_sz = 3064 sizeof(target_ce_config_wlan_ar900b); 3065 3066 break; 3067 3068 case TARGET_TYPE_AR9888: 3069 case TARGET_TYPE_AR9888V2: 3070 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 3071 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 3072 } else { 3073 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 3074 } 3075 3076 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 3077 hif_state->target_ce_config_sz = 3078 sizeof(target_ce_config_wlan_ar9888); 3079 3080 break; 3081 3082 case TARGET_TYPE_QCA8074: 3083 case TARGET_TYPE_QCA8074V2: 3084 case TARGET_TYPE_QCA6018: 3085 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 3086 hif_state->host_ce_config = 3087 host_ce_config_wlan_qca8074_pci; 3088 hif_state->target_ce_config = 3089 target_ce_config_wlan_qca8074_pci; 3090 hif_state->target_ce_config_sz = 3091 sizeof(target_ce_config_wlan_qca8074_pci); 3092 } else { 3093 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 3094 hif_state->target_ce_config = 3095 target_ce_config_wlan_qca8074; 3096 hif_state->target_ce_config_sz = 3097 sizeof(target_ce_config_wlan_qca8074); 3098 } 3099 break; 3100 case TARGET_TYPE_QCA6290: 3101 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 3102 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 3103 hif_state->target_ce_config_sz = 3104 sizeof(target_ce_config_wlan_qca6290); 3105 3106 scn->ce_count = QCA_6290_CE_COUNT; 3107 break; 3108 case TARGET_TYPE_QCA6390: 3109 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 3110 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 3111 hif_state->target_ce_config_sz = 3112 sizeof(target_ce_config_wlan_qca6390); 3113 3114 scn->ce_count = QCA_6390_CE_COUNT; 3115 break; 3116 } 3117 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 3118 } 3119 3120 /** 3121 * hif_ce_open() - do ce specific allocations 3122 * @hif_sc: pointer to hif context 3123 * 3124 * return: 0 for success or QDF_STATUS_E_NOMEM 3125 */ 3126 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 3127 { 3128 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3129 3130 qdf_spinlock_create(&hif_state->irq_reg_lock); 3131 qdf_spinlock_create(&hif_state->keep_awake_lock); 3132 return QDF_STATUS_SUCCESS; 3133 } 3134 3135 /** 3136 * hif_ce_close() - do ce specific free 3137 * @hif_sc: pointer to hif context 3138 */ 3139 void hif_ce_close(struct hif_softc *hif_sc) 3140 { 3141 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3142 3143 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 3144 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 3145 } 3146 3147 /** 3148 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 3149 * @hif_sc: hif context 3150 * 3151 * uses state variables to support cleaning up when hif_config_ce fails. 3152 */ 3153 void hif_unconfig_ce(struct hif_softc *hif_sc) 3154 { 3155 int pipe_num; 3156 struct HIF_CE_pipe_info *pipe_info; 3157 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3158 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 3159 3160 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3161 pipe_info = &hif_state->pipe_info[pipe_num]; 3162 if (pipe_info->ce_hdl) { 3163 ce_unregister_irq(hif_state, (1 << pipe_num)); 3164 } 3165 } 3166 deinit_tasklet_workers(hif_hdl); 3167 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3168 pipe_info = &hif_state->pipe_info[pipe_num]; 3169 if (pipe_info->ce_hdl) { 3170 ce_fini(pipe_info->ce_hdl); 3171 pipe_info->ce_hdl = NULL; 3172 pipe_info->buf_sz = 0; 3173 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 3174 } 3175 } 3176 if (hif_sc->athdiag_procfs_inited) { 3177 athdiag_procfs_remove(); 3178 hif_sc->athdiag_procfs_inited = false; 3179 } 3180 } 3181 3182 #ifdef CONFIG_BYPASS_QMI 3183 #ifdef QCN7605_SUPPORT 3184 /** 3185 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 3186 * @scn: pointer to HIF structure 3187 * 3188 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 3189 * 3190 * Return: void 3191 */ 3192 static void hif_post_static_buf_to_target(struct hif_softc *scn) 3193 { 3194 void *target_va; 3195 phys_addr_t target_pa; 3196 struct ce_info *ce_info_ptr; 3197 uint32_t msi_data_start; 3198 uint32_t msi_data_count; 3199 uint32_t msi_irq_start; 3200 uint32_t i = 0; 3201 int ret; 3202 3203 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, 3204 scn->qdf_dev->dev, 3205 FW_SHARED_MEM + 3206 sizeof(struct ce_info), 3207 &target_pa); 3208 if (!target_va) 3209 return; 3210 3211 ce_info_ptr = (struct ce_info *)target_va; 3212 3213 if (scn->vaddr_rri_on_ddr) { 3214 ce_info_ptr->rri_over_ddr_low_paddr = 3215 BITS0_TO_31(scn->paddr_rri_on_ddr); 3216 ce_info_ptr->rri_over_ddr_high_paddr = 3217 BITS32_TO_35(scn->paddr_rri_on_ddr); 3218 } 3219 3220 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3221 &msi_data_count, &msi_data_start, 3222 &msi_irq_start); 3223 if (ret) { 3224 hif_err("Failed to get CE msi config"); 3225 return; 3226 } 3227 3228 for (i = 0; i < CE_COUNT_MAX; i++) { 3229 ce_info_ptr->cfg[i].ce_id = i; 3230 ce_info_ptr->cfg[i].msi_vector = 3231 (i % msi_data_count) + msi_irq_start; 3232 } 3233 3234 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3235 hif_info("target va %pK target pa %pa", target_va, &target_pa); 3236 } 3237 #else 3238 /** 3239 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 3240 * @scn: pointer to HIF structure 3241 * 3242 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 3243 * 3244 * Return: void 3245 */ 3246 static void hif_post_static_buf_to_target(struct hif_softc *scn) 3247 { 3248 void *target_va; 3249 phys_addr_t target_pa; 3250 3251 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 3252 FW_SHARED_MEM, &target_pa); 3253 if (NULL == target_va) { 3254 HIF_TRACE("Memory allocation failed could not post target buf"); 3255 return; 3256 } 3257 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3258 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa); 3259 } 3260 #endif 3261 3262 #else 3263 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 3264 { 3265 } 3266 #endif 3267 3268 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 3269 bool wait_for_it) 3270 { 3271 /* todo */ 3272 return 0; 3273 } 3274 3275 /** 3276 * hif_config_ce() - configure copy engines 3277 * @scn: hif context 3278 * 3279 * Prepares fw, copy engine hardware and host sw according 3280 * to the attributes selected by hif_ce_prepare_config. 3281 * 3282 * also calls athdiag_procfs_init 3283 * 3284 * return: 0 for success nonzero for failure. 3285 */ 3286 int hif_config_ce(struct hif_softc *scn) 3287 { 3288 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3289 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3290 struct HIF_CE_pipe_info *pipe_info; 3291 int pipe_num; 3292 struct CE_state *ce_state = NULL; 3293 3294 #ifdef ADRASTEA_SHADOW_REGISTERS 3295 int i; 3296 #endif 3297 QDF_STATUS rv = QDF_STATUS_SUCCESS; 3298 3299 scn->notice_send = true; 3300 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 3301 3302 hif_post_static_buf_to_target(scn); 3303 3304 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 3305 3306 hif_config_rri_on_ddr(scn); 3307 3308 if (ce_srng_based(scn)) 3309 scn->bus_ops.hif_target_sleep_state_adjust = 3310 &hif_srng_sleep_state_adjust; 3311 3312 /* Initialise the CE debug history sysfs interface inputs ce_id and 3313 * index. Disable data storing 3314 */ 3315 reset_ce_debug_history(scn); 3316 3317 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3318 struct CE_attr *attr; 3319 3320 pipe_info = &hif_state->pipe_info[pipe_num]; 3321 pipe_info->pipe_num = pipe_num; 3322 pipe_info->HIF_CE_state = hif_state; 3323 attr = &hif_state->host_ce_config[pipe_num]; 3324 3325 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 3326 ce_state = scn->ce_id_to_state[pipe_num]; 3327 if (!ce_state) { 3328 A_TARGET_ACCESS_UNLIKELY(scn); 3329 goto err; 3330 } 3331 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 3332 QDF_ASSERT(pipe_info->ce_hdl != NULL); 3333 if (pipe_info->ce_hdl == NULL) { 3334 rv = QDF_STATUS_E_FAILURE; 3335 A_TARGET_ACCESS_UNLIKELY(scn); 3336 goto err; 3337 } 3338 3339 ce_state->lro_data = qdf_lro_init(); 3340 3341 if (attr->flags & CE_ATTR_DIAG) { 3342 /* Reserve the ultimate CE for 3343 * Diagnostic Window support 3344 */ 3345 hif_state->ce_diag = pipe_info->ce_hdl; 3346 continue; 3347 } 3348 3349 if (hif_is_nss_wifi_enabled(scn) && ce_state && 3350 (ce_state->htt_rx_data)) 3351 continue; 3352 3353 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); 3354 if (attr->dest_nentries > 0) { 3355 atomic_set(&pipe_info->recv_bufs_needed, 3356 init_buffer_count(attr->dest_nentries - 1)); 3357 /*SRNG based CE has one entry less */ 3358 if (ce_srng_based(scn)) 3359 atomic_dec(&pipe_info->recv_bufs_needed); 3360 } else { 3361 atomic_set(&pipe_info->recv_bufs_needed, 0); 3362 } 3363 ce_tasklet_init(hif_state, (1 << pipe_num)); 3364 ce_register_irq(hif_state, (1 << pipe_num)); 3365 } 3366 3367 if (athdiag_procfs_init(scn) != 0) { 3368 A_TARGET_ACCESS_UNLIKELY(scn); 3369 goto err; 3370 } 3371 scn->athdiag_procfs_inited = true; 3372 3373 HIF_DBG("%s: ce_init done", __func__); 3374 3375 init_tasklet_workers(hif_hdl); 3376 3377 HIF_DBG("%s: X, ret = %d", __func__, rv); 3378 3379 #ifdef ADRASTEA_SHADOW_REGISTERS 3380 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); 3381 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 3382 HIF_DBG("%s Shadow Register%d is mapped to address %x", 3383 __func__, i, 3384 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 3385 } 3386 #endif 3387 3388 return rv != QDF_STATUS_SUCCESS; 3389 3390 err: 3391 /* Failure, so clean up */ 3392 hif_unconfig_ce(scn); 3393 HIF_TRACE("%s: X, ret = %d", __func__, rv); 3394 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 3395 } 3396 3397 #ifdef IPA_OFFLOAD 3398 /** 3399 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 3400 * @scn: bus context 3401 * @ce_sr_base_paddr: copyengine source ring base physical address 3402 * @ce_sr_ring_size: copyengine source ring size 3403 * @ce_reg_paddr: copyengine register physical address 3404 * 3405 * IPA micro controller data path offload feature enabled, 3406 * HIF should release copy engine related resource information to IPA UC 3407 * IPA UC will access hardware resource with released information 3408 * 3409 * Return: None 3410 */ 3411 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 3412 qdf_shared_mem_t **ce_sr, 3413 uint32_t *ce_sr_ring_size, 3414 qdf_dma_addr_t *ce_reg_paddr) 3415 { 3416 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3417 struct HIF_CE_pipe_info *pipe_info = 3418 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 3419 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3420 3421 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 3422 ce_reg_paddr); 3423 } 3424 #endif /* IPA_OFFLOAD */ 3425 3426 3427 #ifdef ADRASTEA_SHADOW_REGISTERS 3428 3429 /* 3430 * Current shadow register config 3431 * 3432 * ----------------------------------------------------------- 3433 * Shadow Register | CE | src/dst write index 3434 * ----------------------------------------------------------- 3435 * 0 | 0 | src 3436 * 1 No Config - Doesn't point to anything 3437 * 2 No Config - Doesn't point to anything 3438 * 3 | 3 | src 3439 * 4 | 4 | src 3440 * 5 | 5 | src 3441 * 6 No Config - Doesn't point to anything 3442 * 7 | 7 | src 3443 * 8 No Config - Doesn't point to anything 3444 * 9 No Config - Doesn't point to anything 3445 * 10 No Config - Doesn't point to anything 3446 * 11 No Config - Doesn't point to anything 3447 * ----------------------------------------------------------- 3448 * 12 No Config - Doesn't point to anything 3449 * 13 | 1 | dst 3450 * 14 | 2 | dst 3451 * 15 No Config - Doesn't point to anything 3452 * 16 No Config - Doesn't point to anything 3453 * 17 No Config - Doesn't point to anything 3454 * 18 No Config - Doesn't point to anything 3455 * 19 | 7 | dst 3456 * 20 | 8 | dst 3457 * 21 No Config - Doesn't point to anything 3458 * 22 No Config - Doesn't point to anything 3459 * 23 No Config - Doesn't point to anything 3460 * ----------------------------------------------------------- 3461 * 3462 * 3463 * ToDo - Move shadow register config to following in the future 3464 * This helps free up a block of shadow registers towards the end. 3465 * Can be used for other purposes 3466 * 3467 * ----------------------------------------------------------- 3468 * Shadow Register | CE | src/dst write index 3469 * ----------------------------------------------------------- 3470 * 0 | 0 | src 3471 * 1 | 3 | src 3472 * 2 | 4 | src 3473 * 3 | 5 | src 3474 * 4 | 7 | src 3475 * ----------------------------------------------------------- 3476 * 5 | 1 | dst 3477 * 6 | 2 | dst 3478 * 7 | 7 | dst 3479 * 8 | 8 | dst 3480 * ----------------------------------------------------------- 3481 * 9 No Config - Doesn't point to anything 3482 * 12 No Config - Doesn't point to anything 3483 * 13 No Config - Doesn't point to anything 3484 * 14 No Config - Doesn't point to anything 3485 * 15 No Config - Doesn't point to anything 3486 * 16 No Config - Doesn't point to anything 3487 * 17 No Config - Doesn't point to anything 3488 * 18 No Config - Doesn't point to anything 3489 * 19 No Config - Doesn't point to anything 3490 * 20 No Config - Doesn't point to anything 3491 * 21 No Config - Doesn't point to anything 3492 * 22 No Config - Doesn't point to anything 3493 * 23 No Config - Doesn't point to anything 3494 * ----------------------------------------------------------- 3495 */ 3496 #ifndef QCN7605_SUPPORT 3497 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3498 { 3499 u32 addr = 0; 3500 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3501 3502 switch (ce) { 3503 case 0: 3504 addr = SHADOW_VALUE0; 3505 break; 3506 case 3: 3507 addr = SHADOW_VALUE3; 3508 break; 3509 case 4: 3510 addr = SHADOW_VALUE4; 3511 break; 3512 case 5: 3513 addr = SHADOW_VALUE5; 3514 break; 3515 case 7: 3516 addr = SHADOW_VALUE7; 3517 break; 3518 default: 3519 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3520 QDF_ASSERT(0); 3521 } 3522 return addr; 3523 3524 } 3525 3526 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3527 { 3528 u32 addr = 0; 3529 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3530 3531 switch (ce) { 3532 case 1: 3533 addr = SHADOW_VALUE13; 3534 break; 3535 case 2: 3536 addr = SHADOW_VALUE14; 3537 break; 3538 case 5: 3539 addr = SHADOW_VALUE17; 3540 break; 3541 case 7: 3542 addr = SHADOW_VALUE19; 3543 break; 3544 case 8: 3545 addr = SHADOW_VALUE20; 3546 break; 3547 case 9: 3548 addr = SHADOW_VALUE21; 3549 break; 3550 case 10: 3551 addr = SHADOW_VALUE22; 3552 break; 3553 case 11: 3554 addr = SHADOW_VALUE23; 3555 break; 3556 default: 3557 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3558 QDF_ASSERT(0); 3559 } 3560 3561 return addr; 3562 3563 } 3564 #else 3565 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3566 { 3567 u32 addr = 0; 3568 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3569 3570 switch (ce) { 3571 case 0: 3572 addr = SHADOW_VALUE0; 3573 break; 3574 case 4: 3575 addr = SHADOW_VALUE4; 3576 break; 3577 case 5: 3578 addr = SHADOW_VALUE5; 3579 break; 3580 default: 3581 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3582 QDF_ASSERT(0); 3583 } 3584 return addr; 3585 } 3586 3587 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3588 { 3589 u32 addr = 0; 3590 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3591 3592 switch (ce) { 3593 case 1: 3594 addr = SHADOW_VALUE13; 3595 break; 3596 case 2: 3597 addr = SHADOW_VALUE14; 3598 break; 3599 case 3: 3600 addr = SHADOW_VALUE15; 3601 break; 3602 case 5: 3603 addr = SHADOW_VALUE17; 3604 break; 3605 case 7: 3606 addr = SHADOW_VALUE19; 3607 break; 3608 case 8: 3609 addr = SHADOW_VALUE20; 3610 break; 3611 case 9: 3612 addr = SHADOW_VALUE21; 3613 break; 3614 case 10: 3615 addr = SHADOW_VALUE22; 3616 break; 3617 case 11: 3618 addr = SHADOW_VALUE23; 3619 break; 3620 default: 3621 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3622 QDF_ASSERT(0); 3623 } 3624 3625 return addr; 3626 } 3627 #endif 3628 #endif 3629 3630 #if defined(FEATURE_LRO) 3631 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 3632 { 3633 struct CE_state *ce_state; 3634 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3635 3636 ce_state = scn->ce_id_to_state[ctx_id]; 3637 3638 return ce_state->lro_data; 3639 } 3640 #endif 3641 3642 /** 3643 * hif_map_service_to_pipe() - returns the ce ids pertaining to 3644 * this service 3645 * @scn: hif_softc pointer. 3646 * @svc_id: Service ID for which the mapping is needed. 3647 * @ul_pipe: address of the container in which ul pipe is returned. 3648 * @dl_pipe: address of the container in which dl pipe is returned. 3649 * @ul_is_polled: address of the container in which a bool 3650 * indicating if the UL CE for this service 3651 * is polled is returned. 3652 * @dl_is_polled: address of the container in which a bool 3653 * indicating if the DL CE for this service 3654 * is polled is returned. 3655 * 3656 * Return: Indicates whether the service has been found in the table. 3657 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 3658 * There will be warning logs if either leg has not been updated 3659 * because it missed the entry in the table (but this is not an err). 3660 */ 3661 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 3662 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 3663 int *dl_is_polled) 3664 { 3665 int status = QDF_STATUS_E_INVAL; 3666 unsigned int i; 3667 struct service_to_pipe element; 3668 struct service_to_pipe *tgt_svc_map_to_use; 3669 uint32_t sz_tgt_svc_map_to_use; 3670 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3671 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3672 bool dl_updated = false; 3673 bool ul_updated = false; 3674 3675 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 3676 &sz_tgt_svc_map_to_use); 3677 3678 *dl_is_polled = 0; /* polling for received messages not supported */ 3679 3680 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 3681 3682 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 3683 if (element.service_id == svc_id) { 3684 if (element.pipedir == PIPEDIR_OUT) { 3685 *ul_pipe = element.pipenum; 3686 *ul_is_polled = 3687 (hif_state->host_ce_config[*ul_pipe].flags & 3688 CE_ATTR_DISABLE_INTR) != 0; 3689 ul_updated = true; 3690 } else if (element.pipedir == PIPEDIR_IN) { 3691 *dl_pipe = element.pipenum; 3692 dl_updated = true; 3693 } 3694 status = QDF_STATUS_SUCCESS; 3695 } 3696 } 3697 if (ul_updated == false) 3698 HIF_DBG("ul pipe is NOT updated for service %d", svc_id); 3699 if (dl_updated == false) 3700 HIF_DBG("dl pipe is NOT updated for service %d", svc_id); 3701 3702 return status; 3703 } 3704 3705 #ifdef SHADOW_REG_DEBUG 3706 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 3707 uint32_t CE_ctrl_addr) 3708 { 3709 uint32_t read_from_hw, srri_from_ddr = 0; 3710 3711 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 3712 3713 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3714 3715 if (read_from_hw != srri_from_ddr) { 3716 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3717 __func__, srri_from_ddr, read_from_hw, 3718 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3719 QDF_ASSERT(0); 3720 } 3721 return srri_from_ddr; 3722 } 3723 3724 3725 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 3726 uint32_t CE_ctrl_addr) 3727 { 3728 uint32_t read_from_hw, drri_from_ddr = 0; 3729 3730 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 3731 3732 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3733 3734 if (read_from_hw != drri_from_ddr) { 3735 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3736 drri_from_ddr, read_from_hw, 3737 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3738 QDF_ASSERT(0); 3739 } 3740 return drri_from_ddr; 3741 } 3742 3743 #endif 3744 3745 /** 3746 * hif_dump_ce_registers() - dump ce registers 3747 * @scn: hif_opaque_softc pointer. 3748 * 3749 * Output the copy engine registers 3750 * 3751 * Return: 0 for success or error code 3752 */ 3753 int hif_dump_ce_registers(struct hif_softc *scn) 3754 { 3755 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3756 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 3757 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 3758 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 3759 uint16_t i; 3760 QDF_STATUS status; 3761 3762 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 3763 if (scn->ce_id_to_state[i] == NULL) { 3764 HIF_DBG("CE%d not used.", i); 3765 continue; 3766 } 3767 3768 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 3769 (uint8_t *) &ce_reg_values[0], 3770 ce_reg_word_size * sizeof(uint32_t)); 3771 3772 if (status != QDF_STATUS_SUCCESS) { 3773 HIF_ERROR("Dumping CE register failed!"); 3774 return -EACCES; 3775 } 3776 HIF_ERROR("CE%d=>\n", i); 3777 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 3778 (uint8_t *) &ce_reg_values[0], 3779 ce_reg_word_size * sizeof(uint32_t)); 3780 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 3781 + SR_WR_INDEX_ADDRESS), 3782 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 3783 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 3784 + CURRENT_SRRI_ADDRESS), 3785 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 3786 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 3787 + DST_WR_INDEX_ADDRESS), 3788 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 3789 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 3790 + CURRENT_DRRI_ADDRESS), 3791 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 3792 qdf_print("---"); 3793 } 3794 return 0; 3795 } 3796 qdf_export_symbol(hif_dump_ce_registers); 3797 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 3798 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 3799 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 3800 { 3801 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3802 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3803 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 3804 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3805 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3806 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 3807 struct CE_ring_state *src_ring = ce_state->src_ring; 3808 struct CE_ring_state *dest_ring = ce_state->dest_ring; 3809 3810 if (src_ring) { 3811 hif_info->ul_pipe.nentries = src_ring->nentries; 3812 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 3813 hif_info->ul_pipe.sw_index = src_ring->sw_index; 3814 hif_info->ul_pipe.write_index = src_ring->write_index; 3815 hif_info->ul_pipe.hw_index = src_ring->hw_index; 3816 hif_info->ul_pipe.base_addr_CE_space = 3817 src_ring->base_addr_CE_space; 3818 hif_info->ul_pipe.base_addr_owner_space = 3819 src_ring->base_addr_owner_space; 3820 } 3821 3822 3823 if (dest_ring) { 3824 hif_info->dl_pipe.nentries = dest_ring->nentries; 3825 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 3826 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 3827 hif_info->dl_pipe.write_index = dest_ring->write_index; 3828 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 3829 hif_info->dl_pipe.base_addr_CE_space = 3830 dest_ring->base_addr_CE_space; 3831 hif_info->dl_pipe.base_addr_owner_space = 3832 dest_ring->base_addr_owner_space; 3833 } 3834 3835 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 3836 hif_info->ctrl_addr = ce_state->ctrl_addr; 3837 3838 return hif_info; 3839 } 3840 qdf_export_symbol(hif_get_addl_pipe_info); 3841 3842 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 3843 { 3844 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3845 3846 scn->nss_wifi_ol_mode = mode; 3847 return 0; 3848 } 3849 qdf_export_symbol(hif_set_nss_wifiol_mode); 3850 #endif 3851 3852 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 3853 { 3854 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3855 scn->hif_attribute = hif_attrib; 3856 } 3857 3858 3859 /* disable interrupts (only applicable for legacy copy engine currently */ 3860 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 3861 { 3862 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3863 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 3864 uint32_t ctrl_addr = CE_state->ctrl_addr; 3865 3866 Q_TARGET_ACCESS_BEGIN(scn); 3867 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 3868 Q_TARGET_ACCESS_END(scn); 3869 } 3870 qdf_export_symbol(hif_disable_interrupt); 3871 3872 /** 3873 * hif_fw_event_handler() - hif fw event handler 3874 * @hif_state: pointer to hif ce state structure 3875 * 3876 * Process fw events and raise HTC callback to process fw events. 3877 * 3878 * Return: none 3879 */ 3880 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 3881 { 3882 struct hif_msg_callbacks *msg_callbacks = 3883 &hif_state->msg_callbacks_current; 3884 3885 if (!msg_callbacks->fwEventHandler) 3886 return; 3887 3888 msg_callbacks->fwEventHandler(msg_callbacks->Context, 3889 QDF_STATUS_E_FAILURE); 3890 } 3891 3892 #ifndef QCA_WIFI_3_0 3893 /** 3894 * hif_fw_interrupt_handler() - FW interrupt handler 3895 * @irq: irq number 3896 * @arg: the user pointer 3897 * 3898 * Called from the PCI interrupt handler when a 3899 * firmware-generated interrupt to the Host. 3900 * 3901 * only registered for legacy ce devices 3902 * 3903 * Return: status of handled irq 3904 */ 3905 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3906 { 3907 struct hif_softc *scn = arg; 3908 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3909 uint32_t fw_indicator_address, fw_indicator; 3910 3911 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 3912 return ATH_ISR_NOSCHED; 3913 3914 fw_indicator_address = hif_state->fw_indicator_address; 3915 /* For sudden unplug this will return ~0 */ 3916 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 3917 3918 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 3919 /* ACK: clear Target-side pending event */ 3920 A_TARGET_WRITE(scn, fw_indicator_address, 3921 fw_indicator & ~FW_IND_EVENT_PENDING); 3922 if (Q_TARGET_ACCESS_END(scn) < 0) 3923 return ATH_ISR_SCHED; 3924 3925 if (hif_state->started) { 3926 hif_fw_event_handler(hif_state); 3927 } else { 3928 /* 3929 * Probable Target failure before we're prepared 3930 * to handle it. Generally unexpected. 3931 * fw_indicator used as bitmap, and defined as below: 3932 * FW_IND_EVENT_PENDING 0x1 3933 * FW_IND_INITIALIZED 0x2 3934 * FW_IND_NEEDRECOVER 0x4 3935 */ 3936 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 3937 ("%s: Early firmware event indicated 0x%x\n", 3938 __func__, fw_indicator)); 3939 } 3940 } else { 3941 if (Q_TARGET_ACCESS_END(scn) < 0) 3942 return ATH_ISR_SCHED; 3943 } 3944 3945 return ATH_ISR_SCHED; 3946 } 3947 #else 3948 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 3949 { 3950 return ATH_ISR_SCHED; 3951 } 3952 #endif /* #ifdef QCA_WIFI_3_0 */ 3953 3954 3955 /** 3956 * hif_wlan_disable(): call the platform driver to disable wlan 3957 * @scn: HIF Context 3958 * 3959 * This function passes the con_mode to platform driver to disable 3960 * wlan. 3961 * 3962 * Return: void 3963 */ 3964 void hif_wlan_disable(struct hif_softc *scn) 3965 { 3966 enum pld_driver_mode mode; 3967 uint32_t con_mode = hif_get_conparam(scn); 3968 3969 if (scn->target_status == TARGET_STATUS_RESET) 3970 return; 3971 3972 if (QDF_GLOBAL_FTM_MODE == con_mode) 3973 mode = PLD_FTM; 3974 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3975 mode = PLD_EPPING; 3976 else 3977 mode = PLD_MISSION; 3978 3979 pld_wlan_disable(scn->qdf_dev->dev, mode); 3980 } 3981 3982 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 3983 { 3984 QDF_STATUS status; 3985 uint8_t ul_pipe, dl_pipe; 3986 int ul_is_polled, dl_is_polled; 3987 3988 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 3989 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 3990 HTC_CTRL_RSVD_SVC, 3991 &ul_pipe, &dl_pipe, 3992 &ul_is_polled, &dl_is_polled); 3993 if (status) { 3994 HIF_ERROR("%s: failed to map pipe: %d", __func__, status); 3995 return qdf_status_to_os_return(status); 3996 } 3997 3998 *ce_id = dl_pipe; 3999 4000 return 0; 4001 } 4002