1 /* 2 * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "targcfg.h" 20 #include "qdf_lock.h" 21 #include "qdf_status.h" 22 #include "qdf_status.h" 23 #include <qdf_atomic.h> /* qdf_atomic_read */ 24 #include <targaddrs.h> 25 #include "hif_io32.h" 26 #include <hif.h> 27 #include <target_type.h> 28 #include "regtable.h" 29 #define ATH_MODULE_NAME hif 30 #include <a_debug.h> 31 #include "hif_main.h" 32 #include "ce_api.h" 33 #include "qdf_trace.h" 34 #include "pld_common.h" 35 #include "hif_debug.h" 36 #include "ce_internal.h" 37 #include "ce_reg.h" 38 #include "ce_assignment.h" 39 #include "ce_tasklet.h" 40 #include "qdf_module.h" 41 42 #define CE_POLL_TIMEOUT 10 /* ms */ 43 44 #define AGC_DUMP 1 45 #define CHANINFO_DUMP 2 46 #define BB_WATCHDOG_DUMP 3 47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 48 #define PCIE_ACCESS_DUMP 4 49 #endif 50 #include "mp_dev.h" 51 52 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \ 53 defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG) 54 #define QCA_WIFI_SUPPORT_SRNG 55 #endif 56 57 /* Forward references */ 58 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info); 59 60 /* 61 * Fix EV118783, poll to check whether a BMI response comes 62 * other than waiting for the interruption which may be lost. 63 */ 64 /* #define BMI_RSP_POLLING */ 65 #define BMI_RSP_TO_MILLISEC 1000 66 67 #ifdef CONFIG_BYPASS_QMI 68 #define BYPASS_QMI 1 69 #else 70 #define BYPASS_QMI 0 71 #endif 72 73 #ifdef ENABLE_10_4_FW_HDR 74 #if (ENABLE_10_4_FW_HDR == 1) 75 #define WDI_IPA_SERVICE_GROUP 5 76 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0) 77 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1) 78 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2) 79 #endif /* ENABLE_10_4_FW_HDR == 1 */ 80 #endif /* ENABLE_10_4_FW_HDR */ 81 82 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn); 83 static void hif_config_rri_on_ddr(struct hif_softc *scn); 84 85 /** 86 * hif_target_access_log_dump() - dump access log 87 * 88 * dump access log 89 * 90 * Return: n/a 91 */ 92 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 93 static void hif_target_access_log_dump(void) 94 { 95 hif_target_dump_access_log(); 96 } 97 #endif 98 99 100 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx, 101 uint8_t cmd_id, bool start) 102 { 103 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 104 105 switch (cmd_id) { 106 case AGC_DUMP: 107 if (start) 108 priv_start_agc(scn); 109 else 110 priv_dump_agc(scn); 111 break; 112 case CHANINFO_DUMP: 113 if (start) 114 priv_start_cap_chaninfo(scn); 115 else 116 priv_dump_chaninfo(scn); 117 break; 118 case BB_WATCHDOG_DUMP: 119 priv_dump_bbwatchdog(scn); 120 break; 121 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 122 case PCIE_ACCESS_DUMP: 123 hif_target_access_log_dump(); 124 break; 125 #endif 126 default: 127 HIF_ERROR("%s: Invalid htc dump command", __func__); 128 break; 129 } 130 } 131 132 static void ce_poll_timeout(void *arg) 133 { 134 struct CE_state *CE_state = (struct CE_state *)arg; 135 136 if (CE_state->timer_inited) { 137 ce_per_engine_service(CE_state->scn, CE_state->id); 138 qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT); 139 } 140 } 141 142 static unsigned int roundup_pwr2(unsigned int n) 143 { 144 int i; 145 unsigned int test_pwr2; 146 147 if (!(n & (n - 1))) 148 return n; /* already a power of 2 */ 149 150 test_pwr2 = 4; 151 for (i = 0; i < 29; i++) { 152 if (test_pwr2 > n) 153 return test_pwr2; 154 test_pwr2 = test_pwr2 << 1; 155 } 156 157 QDF_ASSERT(0); /* n too large */ 158 return 0; 159 } 160 161 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C 162 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40 163 164 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = { 165 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 166 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 167 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 168 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 169 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 170 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 171 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 172 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 173 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 174 #ifdef QCA_WIFI_3_0_ADRASTEA 175 { 9, ADRASTEA_DST_WR_INDEX_OFFSET}, 176 { 10, ADRASTEA_DST_WR_INDEX_OFFSET}, 177 { 11, ADRASTEA_DST_WR_INDEX_OFFSET}, 178 #endif 179 }; 180 181 #ifdef QCN7605_SUPPORT 182 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = { 183 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 184 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 185 { 5, ADRASTEA_SRC_WR_INDEX_OFFSET}, 186 { 3, ADRASTEA_DST_WR_INDEX_OFFSET}, 187 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 188 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 189 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 190 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 191 }; 192 #endif 193 194 #ifdef WLAN_FEATURE_EPPING 195 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = { 196 { 0, ADRASTEA_SRC_WR_INDEX_OFFSET}, 197 { 3, ADRASTEA_SRC_WR_INDEX_OFFSET}, 198 { 4, ADRASTEA_SRC_WR_INDEX_OFFSET}, 199 { 7, ADRASTEA_SRC_WR_INDEX_OFFSET}, 200 { 1, ADRASTEA_DST_WR_INDEX_OFFSET}, 201 { 2, ADRASTEA_DST_WR_INDEX_OFFSET}, 202 { 5, ADRASTEA_DST_WR_INDEX_OFFSET}, 203 { 7, ADRASTEA_DST_WR_INDEX_OFFSET}, 204 { 8, ADRASTEA_DST_WR_INDEX_OFFSET}, 205 }; 206 #endif 207 208 /* CE_PCI TABLE */ 209 /* 210 * NOTE: the table below is out of date, though still a useful reference. 211 * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual 212 * mapping of HTC services to HIF pipes. 213 */ 214 /* 215 * This authoritative table defines Copy Engine configuration and the mapping 216 * of services/endpoints to CEs. A subset of this information is passed to 217 * the Target during startup as a prerequisite to entering BMI phase. 218 * See: 219 * target_service_to_ce_map - Target-side mapping 220 * hif_map_service_to_pipe - Host-side mapping 221 * target_ce_config - Target-side configuration 222 * host_ce_config - Host-side configuration 223 ============================================================================ 224 Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer 225 | | | ctio | Size | Frequency 226 | | | n | | 227 ============================================================================ 228 tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent 229 descriptor | | | | O(100B) | and regular 230 download | | | | | 231 ---------------------------------------------------------------------------- 232 rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and 233 indication | | | | O(10B) | regular 234 upload | | | | | 235 ---------------------------------------------------------------------------- 236 MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare 237 upload | | | | O(1000B) | (frequent 238 e.g. noise | | | | | during IP1.0 239 packets | | | | | testing) 240 ---------------------------------------------------------------------------- 241 MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare 242 download | | | | O(1000B) | (frequent 243 e.g. | | | | | during IP1.0 244 misdirecte | | | | | testing) 245 d EAPOL | | | | | 246 packets | | | | | 247 ---------------------------------------------------------------------------- 248 n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?) 249 | DATA_VO (uplink) | | | | 250 ---------------------------------------------------------------------------- 251 n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?) 252 | DATA_VO (downlink) | | | | 253 ---------------------------------------------------------------------------- 254 WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent 255 | | | | O(100B) | 256 ---------------------------------------------------------------------------- 257 WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent 258 messages | (downlink) | | | O(100B) | 259 | | | | | 260 ---------------------------------------------------------------------------- 261 n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?) 262 | HTC_RAW_STREAMS | | | | 263 | (uplink) | | | | 264 ---------------------------------------------------------------------------- 265 n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?) 266 | HTC_RAW_STREAMS | | | | 267 | (downlink) | | | | 268 ---------------------------------------------------------------------------- 269 diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window 270 | | | | | infrequent 271 ============================================================================ 272 */ 273 274 /* 275 * Map from service/endpoint to Copy Engine. 276 * This table is derived from the CE_PCI TABLE, above. 277 * It is passed to the Target at startup for use by firmware. 278 */ 279 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 280 { 281 WMI_DATA_VO_SVC, 282 PIPEDIR_OUT, /* out = UL = host -> target */ 283 3, 284 }, 285 { 286 WMI_DATA_VO_SVC, 287 PIPEDIR_IN, /* in = DL = target -> host */ 288 2, 289 }, 290 { 291 WMI_DATA_BK_SVC, 292 PIPEDIR_OUT, /* out = UL = host -> target */ 293 3, 294 }, 295 { 296 WMI_DATA_BK_SVC, 297 PIPEDIR_IN, /* in = DL = target -> host */ 298 2, 299 }, 300 { 301 WMI_DATA_BE_SVC, 302 PIPEDIR_OUT, /* out = UL = host -> target */ 303 3, 304 }, 305 { 306 WMI_DATA_BE_SVC, 307 PIPEDIR_IN, /* in = DL = target -> host */ 308 2, 309 }, 310 { 311 WMI_DATA_VI_SVC, 312 PIPEDIR_OUT, /* out = UL = host -> target */ 313 3, 314 }, 315 { 316 WMI_DATA_VI_SVC, 317 PIPEDIR_IN, /* in = DL = target -> host */ 318 2, 319 }, 320 { 321 WMI_CONTROL_SVC, 322 PIPEDIR_OUT, /* out = UL = host -> target */ 323 3, 324 }, 325 { 326 WMI_CONTROL_SVC, 327 PIPEDIR_IN, /* in = DL = target -> host */ 328 2, 329 }, 330 { 331 HTC_CTRL_RSVD_SVC, 332 PIPEDIR_OUT, /* out = UL = host -> target */ 333 0, /* could be moved to 3 (share with WMI) */ 334 }, 335 { 336 HTC_CTRL_RSVD_SVC, 337 PIPEDIR_IN, /* in = DL = target -> host */ 338 2, 339 }, 340 { 341 HTC_RAW_STREAMS_SVC, /* not currently used */ 342 PIPEDIR_OUT, /* out = UL = host -> target */ 343 0, 344 }, 345 { 346 HTC_RAW_STREAMS_SVC, /* not currently used */ 347 PIPEDIR_IN, /* in = DL = target -> host */ 348 2, 349 }, 350 { 351 HTT_DATA_MSG_SVC, 352 PIPEDIR_OUT, /* out = UL = host -> target */ 353 4, 354 }, 355 { 356 HTT_DATA_MSG_SVC, 357 PIPEDIR_IN, /* in = DL = target -> host */ 358 1, 359 }, 360 { 361 WDI_IPA_TX_SVC, 362 PIPEDIR_OUT, /* in = DL = target -> host */ 363 5, 364 }, 365 #if defined(QCA_WIFI_3_0_ADRASTEA) 366 { 367 HTT_DATA2_MSG_SVC, 368 PIPEDIR_IN, /* in = DL = target -> host */ 369 9, 370 }, 371 { 372 HTT_DATA3_MSG_SVC, 373 PIPEDIR_IN, /* in = DL = target -> host */ 374 10, 375 }, 376 { 377 PACKET_LOG_SVC, 378 PIPEDIR_IN, /* in = DL = target -> host */ 379 11, 380 }, 381 #endif 382 /* (Additions here) */ 383 384 { /* Must be last */ 385 0, 386 0, 387 0, 388 }, 389 }; 390 391 /* PIPEDIR_OUT = HOST to Target */ 392 /* PIPEDIR_IN = TARGET to HOST */ 393 #if (defined(QCA_WIFI_QCA8074)) 394 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 395 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 396 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 397 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 398 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 399 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 400 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 401 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 402 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 403 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 404 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 405 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 406 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 407 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 408 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 409 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 410 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 411 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 412 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 413 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 414 /* (Additions here) */ 415 { 0, 0, 0, }, 416 }; 417 #else 418 static struct service_to_pipe target_service_to_ce_map_qca8074[] = { 419 }; 420 #endif 421 422 #if (defined(QCA_WIFI_QCA8074V2)) 423 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 424 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 425 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 426 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 427 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 428 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 429 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 430 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 431 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 432 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 433 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 434 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 435 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 436 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9}, 437 { WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2}, 438 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 439 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 440 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 441 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 442 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 443 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 444 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 445 /* (Additions here) */ 446 { 0, 0, 0, }, 447 }; 448 #else 449 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = { 450 }; 451 #endif 452 453 #if (defined(QCA_WIFI_QCA6018)) 454 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 455 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 456 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 457 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 458 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 459 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 460 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 461 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 462 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 463 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 464 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 465 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 466 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 467 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 468 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 469 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 470 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 471 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 472 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 473 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 474 /* (Additions here) */ 475 { 0, 0, 0, }, 476 }; 477 #else 478 static struct service_to_pipe target_service_to_ce_map_qca6018[] = { 479 }; 480 #endif 481 482 #if (defined(QCA_WIFI_QCN9000)) 483 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 484 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 485 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 486 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 487 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 488 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 489 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 490 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 491 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 492 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 493 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 494 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 495 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, }, 496 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0}, 497 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 }, 498 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 499 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 500 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 501 /* (Additions here) */ 502 { 0, 0, 0, }, 503 }; 504 #else 505 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = { 506 }; 507 #endif 508 509 /* PIPEDIR_OUT = HOST to Target */ 510 /* PIPEDIR_IN = TARGET to HOST */ 511 #ifdef QCN7605_SUPPORT 512 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = { 513 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, }, 514 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 515 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, }, 516 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 517 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, }, 518 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 519 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, }, 520 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 521 { WMI_CONTROL_SVC, PIPEDIR_OUT, 0, }, 522 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 523 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 524 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 525 { HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, }, 526 { HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, }, 527 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 528 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 529 { HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, }, 530 #ifdef IPA_OFFLOAD 531 { WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, }, 532 #else 533 { HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, }, 534 #endif 535 { PACKET_LOG_SVC, PIPEDIR_IN, 7, }, 536 /* (Additions here) */ 537 { 0, 0, 0, }, 538 }; 539 #endif 540 541 #if (defined(QCA_WIFI_QCA6290)) 542 #ifdef QCA_6290_AP_MODE 543 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 544 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 545 { WMI_DATA_VO_SVC, PIPEDIR_IN , 2, }, 546 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 547 { WMI_DATA_BK_SVC, PIPEDIR_IN , 2, }, 548 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 549 { WMI_DATA_BE_SVC, PIPEDIR_IN , 2, }, 550 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 551 { WMI_DATA_VI_SVC, PIPEDIR_IN , 2, }, 552 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 553 { WMI_CONTROL_SVC, PIPEDIR_IN , 2, }, 554 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 555 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, }, 556 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 557 { HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, }, 558 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7}, 559 { WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2}, 560 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 561 /* (Additions here) */ 562 { 0, 0, 0, }, 563 }; 564 #else 565 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 566 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 567 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 568 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 569 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 570 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 571 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 572 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 573 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 574 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 575 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 576 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 577 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 578 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 579 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 580 /* (Additions here) */ 581 { 0, 0, 0, }, 582 }; 583 #endif 584 #else 585 static struct service_to_pipe target_service_to_ce_map_qca6290[] = { 586 }; 587 #endif 588 589 #if (defined(QCA_WIFI_QCA6390)) 590 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 591 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 592 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 593 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 594 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 595 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 596 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 597 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 598 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 599 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 600 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 601 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 602 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 603 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 604 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 605 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 606 /* (Additions here) */ 607 { 0, 0, 0, }, 608 }; 609 #else 610 static struct service_to_pipe target_service_to_ce_map_qca6390[] = { 611 }; 612 #endif 613 614 static struct service_to_pipe target_service_to_ce_map_qca6490[] = { 615 { WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, }, 616 { WMI_DATA_VO_SVC, PIPEDIR_IN, 2, }, 617 { WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, }, 618 { WMI_DATA_BK_SVC, PIPEDIR_IN, 2, }, 619 { WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, }, 620 { WMI_DATA_BE_SVC, PIPEDIR_IN, 2, }, 621 { WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, }, 622 { WMI_DATA_VI_SVC, PIPEDIR_IN, 2, }, 623 { WMI_CONTROL_SVC, PIPEDIR_OUT, 3, }, 624 { WMI_CONTROL_SVC, PIPEDIR_IN, 2, }, 625 { HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, }, 626 { HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, }, 627 { HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, }, 628 { HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, }, 629 { PACKET_LOG_SVC, PIPEDIR_IN, 5, }, 630 /* (Additions here) */ 631 { 0, 0, 0, }, 632 }; 633 634 static struct service_to_pipe target_service_to_ce_map_ar900b[] = { 635 { 636 WMI_DATA_VO_SVC, 637 PIPEDIR_OUT, /* out = UL = host -> target */ 638 3, 639 }, 640 { 641 WMI_DATA_VO_SVC, 642 PIPEDIR_IN, /* in = DL = target -> host */ 643 2, 644 }, 645 { 646 WMI_DATA_BK_SVC, 647 PIPEDIR_OUT, /* out = UL = host -> target */ 648 3, 649 }, 650 { 651 WMI_DATA_BK_SVC, 652 PIPEDIR_IN, /* in = DL = target -> host */ 653 2, 654 }, 655 { 656 WMI_DATA_BE_SVC, 657 PIPEDIR_OUT, /* out = UL = host -> target */ 658 3, 659 }, 660 { 661 WMI_DATA_BE_SVC, 662 PIPEDIR_IN, /* in = DL = target -> host */ 663 2, 664 }, 665 { 666 WMI_DATA_VI_SVC, 667 PIPEDIR_OUT, /* out = UL = host -> target */ 668 3, 669 }, 670 { 671 WMI_DATA_VI_SVC, 672 PIPEDIR_IN, /* in = DL = target -> host */ 673 2, 674 }, 675 { 676 WMI_CONTROL_SVC, 677 PIPEDIR_OUT, /* out = UL = host -> target */ 678 3, 679 }, 680 { 681 WMI_CONTROL_SVC, 682 PIPEDIR_IN, /* in = DL = target -> host */ 683 2, 684 }, 685 { 686 HTC_CTRL_RSVD_SVC, 687 PIPEDIR_OUT, /* out = UL = host -> target */ 688 0, /* could be moved to 3 (share with WMI) */ 689 }, 690 { 691 HTC_CTRL_RSVD_SVC, 692 PIPEDIR_IN, /* in = DL = target -> host */ 693 1, 694 }, 695 { 696 HTC_RAW_STREAMS_SVC, /* not currently used */ 697 PIPEDIR_OUT, /* out = UL = host -> target */ 698 0, 699 }, 700 { 701 HTC_RAW_STREAMS_SVC, /* not currently used */ 702 PIPEDIR_IN, /* in = DL = target -> host */ 703 1, 704 }, 705 { 706 HTT_DATA_MSG_SVC, 707 PIPEDIR_OUT, /* out = UL = host -> target */ 708 4, 709 }, 710 #ifdef WLAN_FEATURE_FASTPATH 711 { 712 HTT_DATA_MSG_SVC, 713 PIPEDIR_IN, /* in = DL = target -> host */ 714 5, 715 }, 716 #else /* WLAN_FEATURE_FASTPATH */ 717 { 718 HTT_DATA_MSG_SVC, 719 PIPEDIR_IN, /* in = DL = target -> host */ 720 1, 721 }, 722 #endif /* WLAN_FEATURE_FASTPATH */ 723 724 /* (Additions here) */ 725 726 { /* Must be last */ 727 0, 728 0, 729 0, 730 }, 731 }; 732 733 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map; 734 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map); 735 736 #ifdef WLAN_FEATURE_EPPING 737 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = { 738 {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 739 {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 740 {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 741 {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 742 {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 743 {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 744 {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 745 {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 746 {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */ 747 {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 748 {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 749 {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 750 {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */ 751 {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */ 752 {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */ 753 {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */ 754 {0, 0, 0,}, /* Must be last */ 755 }; 756 757 void hif_select_epping_service_to_pipe_map(struct service_to_pipe 758 **tgt_svc_map_to_use, 759 uint32_t *sz_tgt_svc_map_to_use) 760 { 761 *tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping; 762 *sz_tgt_svc_map_to_use = 763 sizeof(target_service_to_ce_map_wlan_epping); 764 } 765 #endif 766 767 #ifdef QCN7605_SUPPORT 768 static inline 769 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 770 uint32_t *sz_tgt_svc_map_to_use) 771 { 772 *tgt_svc_map_to_use = target_service_to_ce_map_qcn7605; 773 *sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605); 774 } 775 #else 776 static inline 777 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use, 778 uint32_t *sz_tgt_svc_map_to_use) 779 { 780 HIF_ERROR("%s: QCN7605 not supported", __func__); 781 } 782 #endif 783 784 static void hif_select_service_to_pipe_map(struct hif_softc *scn, 785 struct service_to_pipe **tgt_svc_map_to_use, 786 uint32_t *sz_tgt_svc_map_to_use) 787 { 788 uint32_t mode = hif_get_conparam(scn); 789 struct hif_target_info *tgt_info = &scn->target_info; 790 791 if (QDF_IS_EPPING_ENABLED(mode)) { 792 hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use, 793 sz_tgt_svc_map_to_use); 794 } else { 795 switch (tgt_info->target_type) { 796 default: 797 *tgt_svc_map_to_use = target_service_to_ce_map_wlan; 798 *sz_tgt_svc_map_to_use = 799 sizeof(target_service_to_ce_map_wlan); 800 break; 801 case TARGET_TYPE_QCN7605: 802 hif_select_ce_map_qcn7605(tgt_svc_map_to_use, 803 sz_tgt_svc_map_to_use); 804 break; 805 case TARGET_TYPE_AR900B: 806 case TARGET_TYPE_QCA9984: 807 case TARGET_TYPE_IPQ4019: 808 case TARGET_TYPE_QCA9888: 809 case TARGET_TYPE_AR9888: 810 case TARGET_TYPE_AR9888V2: 811 *tgt_svc_map_to_use = target_service_to_ce_map_ar900b; 812 *sz_tgt_svc_map_to_use = 813 sizeof(target_service_to_ce_map_ar900b); 814 break; 815 case TARGET_TYPE_QCA6290: 816 *tgt_svc_map_to_use = target_service_to_ce_map_qca6290; 817 *sz_tgt_svc_map_to_use = 818 sizeof(target_service_to_ce_map_qca6290); 819 break; 820 case TARGET_TYPE_QCA6390: 821 *tgt_svc_map_to_use = target_service_to_ce_map_qca6390; 822 *sz_tgt_svc_map_to_use = 823 sizeof(target_service_to_ce_map_qca6390); 824 break; 825 case TARGET_TYPE_QCA6490: 826 *tgt_svc_map_to_use = target_service_to_ce_map_qca6490; 827 *sz_tgt_svc_map_to_use = 828 sizeof(target_service_to_ce_map_qca6490); 829 break; 830 case TARGET_TYPE_QCA8074: 831 *tgt_svc_map_to_use = target_service_to_ce_map_qca8074; 832 *sz_tgt_svc_map_to_use = 833 sizeof(target_service_to_ce_map_qca8074); 834 break; 835 case TARGET_TYPE_QCA8074V2: 836 *tgt_svc_map_to_use = 837 target_service_to_ce_map_qca8074_v2; 838 *sz_tgt_svc_map_to_use = 839 sizeof(target_service_to_ce_map_qca8074_v2); 840 break; 841 case TARGET_TYPE_QCA6018: 842 *tgt_svc_map_to_use = 843 target_service_to_ce_map_qca6018; 844 *sz_tgt_svc_map_to_use = 845 sizeof(target_service_to_ce_map_qca6018); 846 break; 847 case TARGET_TYPE_QCN9000: 848 *tgt_svc_map_to_use = 849 target_service_to_ce_map_qcn9000; 850 *sz_tgt_svc_map_to_use = 851 sizeof(target_service_to_ce_map_qcn9000); 852 break; 853 } 854 } 855 } 856 857 /** 858 * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly 859 * @ce_state : pointer to the state context of the CE 860 * 861 * Description: 862 * Sets htt_rx_data attribute of the state structure if the 863 * CE serves one of the HTT DATA services. 864 * 865 * Return: 866 * false (attribute set to false) 867 * true (attribute set to true); 868 */ 869 static bool ce_mark_datapath(struct CE_state *ce_state) 870 { 871 struct service_to_pipe *svc_map; 872 uint32_t map_sz, map_len; 873 int i; 874 bool rc = false; 875 876 if (ce_state) { 877 hif_select_service_to_pipe_map(ce_state->scn, &svc_map, 878 &map_sz); 879 880 map_len = map_sz / sizeof(struct service_to_pipe); 881 for (i = 0; i < map_len; i++) { 882 if ((svc_map[i].pipenum == ce_state->id) && 883 ((svc_map[i].service_id == HTT_DATA_MSG_SVC) || 884 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) || 885 (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) { 886 /* HTT CEs are unidirectional */ 887 if (svc_map[i].pipedir == PIPEDIR_IN) 888 ce_state->htt_rx_data = true; 889 else 890 ce_state->htt_tx_data = true; 891 rc = true; 892 } 893 } 894 } 895 return rc; 896 } 897 898 /** 899 * ce_ring_test_initial_indexes() - tests the initial ce ring indexes 900 * @ce_id: ce in question 901 * @ring: ring state being examined 902 * @type: "src_ring" or "dest_ring" string for identifying the ring 903 * 904 * Warns on non-zero index values. 905 * Causes a kernel panic if the ring is not empty durring initialization. 906 */ 907 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring, 908 char *type) 909 { 910 if (ring->write_index != 0 || ring->sw_index != 0) 911 HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d", 912 ce_id, type, ring->sw_index, ring->write_index); 913 if (ring->write_index != ring->sw_index) 914 QDF_BUG(0); 915 } 916 917 #ifdef IPA_OFFLOAD 918 /** 919 * ce_alloc_desc_ring() - Allocate copyengine descriptor ring 920 * @scn: softc instance 921 * @ce_id: ce in question 922 * @base_addr: pointer to copyengine ring base address 923 * @ce_ring: copyengine instance 924 * @nentries: number of entries should be allocated 925 * @desc_size: ce desc size 926 * 927 * Return: QDF_STATUS_SUCCESS - for success 928 */ 929 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 930 qdf_dma_addr_t *base_addr, 931 struct CE_ring_state *ce_ring, 932 unsigned int nentries, uint32_t desc_size) 933 { 934 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 935 !ce_srng_based(scn)) { 936 if (!scn->ipa_ce_ring) { 937 scn->ipa_ce_ring = qdf_mem_shared_mem_alloc( 938 scn->qdf_dev, 939 nentries * desc_size + CE_DESC_RING_ALIGN); 940 if (!scn->ipa_ce_ring) { 941 HIF_ERROR( 942 "%s: Failed to allocate memory for IPA ce ring", 943 __func__); 944 return QDF_STATUS_E_NOMEM; 945 } 946 } 947 *base_addr = qdf_mem_get_dma_addr(scn->qdf_dev, 948 &scn->ipa_ce_ring->mem_info); 949 ce_ring->base_addr_owner_space_unaligned = 950 scn->ipa_ce_ring->vaddr; 951 } else { 952 ce_ring->base_addr_owner_space_unaligned = 953 qdf_mem_alloc_consistent(scn->qdf_dev, 954 scn->qdf_dev->dev, 955 (nentries * desc_size + 956 CE_DESC_RING_ALIGN), 957 base_addr); 958 if (!ce_ring->base_addr_owner_space_unaligned) { 959 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 960 __func__, CE_id); 961 return QDF_STATUS_E_NOMEM; 962 } 963 } 964 return QDF_STATUS_SUCCESS; 965 } 966 967 /** 968 * ce_free_desc_ring() - Frees copyengine descriptor ring 969 * @scn: softc instance 970 * @ce_id: ce in question 971 * @ce_ring: copyengine instance 972 * @desc_size: ce desc size 973 * 974 * Return: None 975 */ 976 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 977 struct CE_ring_state *ce_ring, uint32_t desc_size) 978 { 979 if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) && 980 !ce_srng_based(scn)) { 981 if (scn->ipa_ce_ring) { 982 qdf_mem_shared_mem_free(scn->qdf_dev, 983 scn->ipa_ce_ring); 984 scn->ipa_ce_ring = NULL; 985 } 986 ce_ring->base_addr_owner_space_unaligned = NULL; 987 } else { 988 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 989 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 990 ce_ring->base_addr_owner_space_unaligned, 991 ce_ring->base_addr_CE_space, 0); 992 ce_ring->base_addr_owner_space_unaligned = NULL; 993 } 994 } 995 #else 996 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id, 997 qdf_dma_addr_t *base_addr, 998 struct CE_ring_state *ce_ring, 999 unsigned int nentries, uint32_t desc_size) 1000 { 1001 ce_ring->base_addr_owner_space_unaligned = 1002 qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 1003 (nentries * desc_size + 1004 CE_DESC_RING_ALIGN), base_addr); 1005 if (!ce_ring->base_addr_owner_space_unaligned) { 1006 HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u", 1007 __func__, CE_id); 1008 return QDF_STATUS_E_NOMEM; 1009 } 1010 return QDF_STATUS_SUCCESS; 1011 } 1012 1013 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id, 1014 struct CE_ring_state *ce_ring, uint32_t desc_size) 1015 { 1016 qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev, 1017 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN, 1018 ce_ring->base_addr_owner_space_unaligned, 1019 ce_ring->base_addr_CE_space, 0); 1020 ce_ring->base_addr_owner_space_unaligned = NULL; 1021 } 1022 #endif /* IPA_OFFLOAD */ 1023 1024 /* 1025 * TODO: Need to explore the possibility of having this as part of a 1026 * target context instead of a global array. 1027 */ 1028 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void); 1029 1030 void ce_service_register_module(enum ce_target_type target_type, 1031 struct ce_ops* (*ce_attach)(void)) 1032 { 1033 if (target_type < CE_MAX_TARGET_TYPE) 1034 ce_attach_register[target_type] = ce_attach; 1035 } 1036 1037 qdf_export_symbol(ce_service_register_module); 1038 1039 /** 1040 * ce_srng_based() - Does this target use srng 1041 * @ce_state : pointer to the state context of the CE 1042 * 1043 * Description: 1044 * returns true if the target is SRNG based 1045 * 1046 * Return: 1047 * false (attribute set to false) 1048 * true (attribute set to true); 1049 */ 1050 bool ce_srng_based(struct hif_softc *scn) 1051 { 1052 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 1053 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 1054 1055 switch (tgt_info->target_type) { 1056 case TARGET_TYPE_QCA8074: 1057 case TARGET_TYPE_QCA8074V2: 1058 case TARGET_TYPE_QCA6290: 1059 case TARGET_TYPE_QCA6390: 1060 case TARGET_TYPE_QCA6490: 1061 case TARGET_TYPE_QCA6018: 1062 case TARGET_TYPE_QCN9000: 1063 return true; 1064 default: 1065 return false; 1066 } 1067 return false; 1068 } 1069 qdf_export_symbol(ce_srng_based); 1070 1071 #ifdef QCA_WIFI_SUPPORT_SRNG 1072 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1073 { 1074 struct ce_ops *ops = NULL; 1075 1076 if (ce_srng_based(scn)) { 1077 if (ce_attach_register[CE_SVC_SRNG]) 1078 ops = ce_attach_register[CE_SVC_SRNG](); 1079 } else if (ce_attach_register[CE_SVC_LEGACY]) { 1080 ops = ce_attach_register[CE_SVC_LEGACY](); 1081 } 1082 1083 return ops; 1084 } 1085 1086 1087 #else /* QCA_LITHIUM */ 1088 static struct ce_ops *ce_services_attach(struct hif_softc *scn) 1089 { 1090 if (ce_attach_register[CE_SVC_LEGACY]) 1091 return ce_attach_register[CE_SVC_LEGACY](); 1092 1093 return NULL; 1094 } 1095 #endif /* QCA_LITHIUM */ 1096 1097 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn, 1098 struct pld_shadow_reg_v2_cfg **shadow_config, 1099 int *num_shadow_registers_configured) { 1100 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1101 1102 return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg( 1103 scn, shadow_config, num_shadow_registers_configured); 1104 } 1105 1106 static inline uint32_t ce_get_desc_size(struct hif_softc *scn, 1107 uint8_t ring_type) 1108 { 1109 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1110 1111 return hif_state->ce_services->ce_get_desc_size(ring_type); 1112 } 1113 1114 1115 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state, 1116 uint8_t ring_type, uint32_t nentries) 1117 { 1118 uint32_t ce_nbytes; 1119 char *ptr; 1120 qdf_dma_addr_t base_addr; 1121 struct CE_ring_state *ce_ring; 1122 uint32_t desc_size; 1123 struct hif_softc *scn = CE_state->scn; 1124 1125 ce_nbytes = sizeof(struct CE_ring_state) 1126 + (nentries * sizeof(void *)); 1127 ptr = qdf_mem_malloc(ce_nbytes); 1128 if (!ptr) 1129 return NULL; 1130 1131 ce_ring = (struct CE_ring_state *)ptr; 1132 ptr += sizeof(struct CE_ring_state); 1133 ce_ring->nentries = nentries; 1134 ce_ring->nentries_mask = nentries - 1; 1135 1136 ce_ring->low_water_mark_nentries = 0; 1137 ce_ring->high_water_mark_nentries = nentries; 1138 ce_ring->per_transfer_context = (void **)ptr; 1139 1140 desc_size = ce_get_desc_size(scn, ring_type); 1141 1142 /* Legacy platforms that do not support cache 1143 * coherent DMA are unsupported 1144 */ 1145 if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr, 1146 ce_ring, nentries, 1147 desc_size) != 1148 QDF_STATUS_SUCCESS) { 1149 HIF_ERROR("%s: ring has no DMA mem", 1150 __func__); 1151 qdf_mem_free(ce_ring); 1152 return NULL; 1153 } 1154 ce_ring->base_addr_CE_space_unaligned = base_addr; 1155 1156 /* Correctly initialize memory to 0 to 1157 * prevent garbage data crashing system 1158 * when download firmware 1159 */ 1160 qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned, 1161 nentries * desc_size + 1162 CE_DESC_RING_ALIGN); 1163 1164 if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) { 1165 1166 ce_ring->base_addr_CE_space = 1167 (ce_ring->base_addr_CE_space_unaligned + 1168 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1); 1169 1170 ce_ring->base_addr_owner_space = (void *) 1171 (((size_t) ce_ring->base_addr_owner_space_unaligned + 1172 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1)); 1173 } else { 1174 ce_ring->base_addr_CE_space = 1175 ce_ring->base_addr_CE_space_unaligned; 1176 ce_ring->base_addr_owner_space = 1177 ce_ring->base_addr_owner_space_unaligned; 1178 } 1179 1180 return ce_ring; 1181 } 1182 1183 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type, 1184 uint32_t ce_id, struct CE_ring_state *ring, 1185 struct CE_attr *attr) 1186 { 1187 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1188 1189 return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, 1190 ring, attr); 1191 } 1192 1193 int hif_ce_bus_early_suspend(struct hif_softc *scn) 1194 { 1195 uint8_t ul_pipe, dl_pipe; 1196 int ce_id, status, ul_is_polled, dl_is_polled; 1197 struct CE_state *ce_state; 1198 1199 status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC, 1200 &ul_pipe, &dl_pipe, 1201 &ul_is_polled, &dl_is_polled); 1202 if (status) { 1203 HIF_ERROR("%s: pipe_mapping failure", __func__); 1204 return status; 1205 } 1206 1207 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1208 if (ce_id == ul_pipe) 1209 continue; 1210 if (ce_id == dl_pipe) 1211 continue; 1212 1213 ce_state = scn->ce_id_to_state[ce_id]; 1214 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1215 if (ce_state->state == CE_RUNNING) 1216 ce_state->state = CE_PAUSED; 1217 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1218 } 1219 1220 return status; 1221 } 1222 1223 int hif_ce_bus_late_resume(struct hif_softc *scn) 1224 { 1225 int ce_id; 1226 struct CE_state *ce_state; 1227 int write_index = 0; 1228 bool index_updated; 1229 1230 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 1231 ce_state = scn->ce_id_to_state[ce_id]; 1232 qdf_spin_lock_bh(&ce_state->ce_index_lock); 1233 if (ce_state->state == CE_PENDING) { 1234 write_index = ce_state->src_ring->write_index; 1235 CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, 1236 write_index); 1237 ce_state->state = CE_RUNNING; 1238 index_updated = true; 1239 } else { 1240 index_updated = false; 1241 } 1242 1243 if (ce_state->state == CE_PAUSED) 1244 ce_state->state = CE_RUNNING; 1245 qdf_spin_unlock_bh(&ce_state->ce_index_lock); 1246 1247 if (index_updated) 1248 hif_record_ce_desc_event(scn, ce_id, 1249 RESUME_WRITE_INDEX_UPDATE, 1250 NULL, NULL, write_index, 0); 1251 } 1252 1253 return 0; 1254 } 1255 1256 /** 1257 * ce_oom_recovery() - try to recover rx ce from oom condition 1258 * @context: CE_state of the CE with oom rx ring 1259 * 1260 * the executing work Will continue to be rescheduled until 1261 * at least 1 descriptor is successfully posted to the rx ring. 1262 * 1263 * return: none 1264 */ 1265 static void ce_oom_recovery(void *context) 1266 { 1267 struct CE_state *ce_state = context; 1268 struct hif_softc *scn = ce_state->scn; 1269 struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn); 1270 struct HIF_CE_pipe_info *pipe_info = 1271 &ce_softc->pipe_info[ce_state->id]; 1272 1273 hif_post_recv_buffers_for_pipe(pipe_info); 1274 } 1275 1276 #ifdef HIF_CE_DEBUG_DATA_BUF 1277 /** 1278 * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by 1279 * the CE descriptors. 1280 * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE 1281 * @scn: hif scn handle 1282 * ce_id: Copy Engine Id 1283 * 1284 * Return: QDF_STATUS 1285 */ 1286 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1287 { 1288 struct hif_ce_desc_event *event = NULL; 1289 struct hif_ce_desc_event *hist_ev = NULL; 1290 uint32_t index = 0; 1291 1292 hist_ev = 1293 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1294 1295 if (!hist_ev) 1296 return QDF_STATUS_E_NOMEM; 1297 1298 scn->hif_ce_desc_hist.data_enable[ce_id] = true; 1299 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1300 event = &hist_ev[index]; 1301 event->data = 1302 (uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE); 1303 if (!event->data) { 1304 hif_err_rl("ce debug data alloc failed"); 1305 return QDF_STATUS_E_NOMEM; 1306 } 1307 } 1308 return QDF_STATUS_SUCCESS; 1309 } 1310 1311 /** 1312 * free_mem_ce_debug_hist_data() - Free mem of the data pointed by 1313 * the CE descriptors. 1314 * @scn: hif scn handle 1315 * ce_id: Copy Engine Id 1316 * 1317 * Return: 1318 */ 1319 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) 1320 { 1321 struct hif_ce_desc_event *event = NULL; 1322 struct hif_ce_desc_event *hist_ev = NULL; 1323 uint32_t index = 0; 1324 1325 hist_ev = 1326 (struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id]; 1327 1328 if (!hist_ev) 1329 return; 1330 1331 for (index = 0; index < HIF_CE_HISTORY_MAX; index++) { 1332 event = &hist_ev[index]; 1333 if (event->data) 1334 qdf_mem_free(event->data); 1335 event->data = NULL; 1336 event = NULL; 1337 } 1338 1339 } 1340 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1341 1342 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF 1343 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1344 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX]; 1345 1346 /** 1347 * alloc_mem_ce_debug_history() - Allocate CE descriptor history 1348 * @scn: hif scn handle 1349 * @ce_id: Copy Engine Id 1350 * @src_nentries: source ce ring entries 1351 * Return: QDF_STATUS 1352 */ 1353 static QDF_STATUS 1354 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id, 1355 uint32_t src_nentries) 1356 { 1357 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1358 1359 ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id]; 1360 ce_hist->enable[ce_id] = 1; 1361 1362 if (src_nentries) 1363 alloc_mem_ce_debug_hist_data(scn, ce_id); 1364 else 1365 ce_hist->data_enable[ce_id] = false; 1366 1367 return QDF_STATUS_SUCCESS; 1368 } 1369 1370 /** 1371 * free_mem_ce_debug_history() - Free CE descriptor history 1372 * @scn: hif scn handle 1373 * @ce_id: Copy Engine Id 1374 * 1375 * Return: None 1376 */ 1377 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id) 1378 { 1379 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1380 1381 ce_hist->enable[ce_id] = 0; 1382 if (ce_hist->data_enable[ce_id]) { 1383 ce_hist->data_enable[ce_id] = false; 1384 free_mem_ce_debug_hist_data(scn, ce_id); 1385 } 1386 ce_hist->hist_ev[ce_id] = NULL; 1387 } 1388 #else 1389 static inline QDF_STATUS 1390 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 1391 uint32_t src_nentries) 1392 { 1393 return QDF_STATUS_SUCCESS; 1394 } 1395 1396 static inline void 1397 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 1398 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */ 1399 #else 1400 #if defined(HIF_CE_DEBUG_DATA_BUF) 1401 1402 static QDF_STATUS 1403 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 1404 uint32_t src_nentries) 1405 { 1406 scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *) 1407 qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event)); 1408 1409 if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) { 1410 scn->hif_ce_desc_hist.enable[CE_id] = 0; 1411 return QDF_STATUS_E_NOMEM; 1412 } else { 1413 scn->hif_ce_desc_hist.enable[CE_id] = 1; 1414 return QDF_STATUS_SUCCESS; 1415 } 1416 } 1417 1418 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) 1419 { 1420 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1421 struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id]; 1422 1423 if (!hist_ev) 1424 return; 1425 1426 if (ce_hist->data_enable[CE_id]) { 1427 ce_hist->data_enable[CE_id] = false; 1428 free_mem_ce_debug_hist_data(scn, CE_id); 1429 } 1430 1431 ce_hist->enable[CE_id] = 0; 1432 qdf_mem_free(ce_hist->hist_ev[CE_id]); 1433 ce_hist->hist_ev[CE_id] = NULL; 1434 } 1435 1436 #else 1437 1438 static inline QDF_STATUS 1439 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id, 1440 uint32_t src_nentries) 1441 { 1442 return QDF_STATUS_SUCCESS; 1443 } 1444 1445 static inline void 1446 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { } 1447 #endif /* HIF_CE_DEBUG_DATA_BUF */ 1448 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */ 1449 1450 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) 1451 /** 1452 * reset_ce_debug_history() - reset the index and ce id used for dumping the 1453 * CE records on the console using sysfs. 1454 * @scn: hif scn handle 1455 * 1456 * Return: 1457 */ 1458 static inline void reset_ce_debug_history(struct hif_softc *scn) 1459 { 1460 struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist; 1461 /* Initialise the CE debug history sysfs interface inputs ce_id and 1462 * index. Disable data storing 1463 */ 1464 ce_hist->hist_index = 0; 1465 ce_hist->hist_id = 0; 1466 } 1467 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1468 static inline void reset_ce_debug_history(struct hif_softc *scn) { } 1469 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */ 1470 1471 void ce_enable_polling(void *cestate) 1472 { 1473 struct CE_state *CE_state = (struct CE_state *)cestate; 1474 1475 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1476 CE_state->timer_inited = true; 1477 } 1478 1479 void ce_disable_polling(void *cestate) 1480 { 1481 struct CE_state *CE_state = (struct CE_state *)cestate; 1482 1483 if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL) 1484 CE_state->timer_inited = false; 1485 } 1486 1487 /* 1488 * Initialize a Copy Engine based on caller-supplied attributes. 1489 * This may be called once to initialize both source and destination 1490 * rings or it may be called twice for separate source and destination 1491 * initialization. It may be that only one side or the other is 1492 * initialized by software/firmware. 1493 * 1494 * This should be called durring the initialization sequence before 1495 * interupts are enabled, so we don't have to worry about thread safety. 1496 */ 1497 struct CE_handle *ce_init(struct hif_softc *scn, 1498 unsigned int CE_id, struct CE_attr *attr) 1499 { 1500 struct CE_state *CE_state; 1501 uint32_t ctrl_addr; 1502 unsigned int nentries; 1503 bool malloc_CE_state = false; 1504 bool malloc_src_ring = false; 1505 int status; 1506 1507 QDF_ASSERT(CE_id < scn->ce_count); 1508 ctrl_addr = CE_BASE_ADDRESS(CE_id); 1509 CE_state = scn->ce_id_to_state[CE_id]; 1510 1511 if (!CE_state) { 1512 CE_state = 1513 (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state)); 1514 if (!CE_state) 1515 return NULL; 1516 1517 malloc_CE_state = true; 1518 qdf_spinlock_create(&CE_state->ce_index_lock); 1519 1520 CE_state->id = CE_id; 1521 CE_state->ctrl_addr = ctrl_addr; 1522 CE_state->state = CE_RUNNING; 1523 CE_state->attr_flags = attr->flags; 1524 } 1525 CE_state->scn = scn; 1526 CE_state->service = ce_engine_service_reg; 1527 1528 qdf_atomic_init(&CE_state->rx_pending); 1529 if (!attr) { 1530 /* Already initialized; caller wants the handle */ 1531 return (struct CE_handle *)CE_state; 1532 } 1533 1534 if (CE_state->src_sz_max) 1535 QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max); 1536 else 1537 CE_state->src_sz_max = attr->src_sz_max; 1538 1539 ce_init_ce_desc_event_log(scn, CE_id, 1540 attr->src_nentries + attr->dest_nentries); 1541 1542 /* source ring setup */ 1543 nentries = attr->src_nentries; 1544 if (nentries) { 1545 struct CE_ring_state *src_ring; 1546 1547 nentries = roundup_pwr2(nentries); 1548 if (CE_state->src_ring) { 1549 QDF_ASSERT(CE_state->src_ring->nentries == nentries); 1550 } else { 1551 src_ring = CE_state->src_ring = 1552 ce_alloc_ring_state(CE_state, 1553 CE_RING_SRC, 1554 nentries); 1555 if (!src_ring) { 1556 /* cannot allocate src ring. If the 1557 * CE_state is allocated locally free 1558 * CE_State and return error. 1559 */ 1560 HIF_ERROR("%s: src ring has no mem", __func__); 1561 if (malloc_CE_state) { 1562 /* allocated CE_state locally */ 1563 qdf_mem_free(CE_state); 1564 malloc_CE_state = false; 1565 } 1566 return NULL; 1567 } 1568 /* we can allocate src ring. Mark that the src ring is 1569 * allocated locally 1570 */ 1571 malloc_src_ring = true; 1572 1573 /* 1574 * Also allocate a shadow src ring in 1575 * regular mem to use for faster access. 1576 */ 1577 src_ring->shadow_base_unaligned = 1578 qdf_mem_malloc(nentries * 1579 sizeof(struct CE_src_desc) + 1580 CE_DESC_RING_ALIGN); 1581 if (!src_ring->shadow_base_unaligned) 1582 goto error_no_dma_mem; 1583 1584 src_ring->shadow_base = (struct CE_src_desc *) 1585 (((size_t) src_ring->shadow_base_unaligned + 1586 CE_DESC_RING_ALIGN - 1) & 1587 ~(CE_DESC_RING_ALIGN - 1)); 1588 1589 status = ce_ring_setup(scn, CE_RING_SRC, CE_id, 1590 src_ring, attr); 1591 if (status < 0) 1592 goto error_target_access; 1593 1594 ce_ring_test_initial_indexes(CE_id, src_ring, 1595 "src_ring"); 1596 } 1597 } 1598 1599 /* destination ring setup */ 1600 nentries = attr->dest_nentries; 1601 if (nentries) { 1602 struct CE_ring_state *dest_ring; 1603 1604 nentries = roundup_pwr2(nentries); 1605 if (CE_state->dest_ring) { 1606 QDF_ASSERT(CE_state->dest_ring->nentries == nentries); 1607 } else { 1608 dest_ring = CE_state->dest_ring = 1609 ce_alloc_ring_state(CE_state, 1610 CE_RING_DEST, 1611 nentries); 1612 if (!dest_ring) { 1613 /* cannot allocate dst ring. If the CE_state 1614 * or src ring is allocated locally free 1615 * CE_State and src ring and return error. 1616 */ 1617 HIF_ERROR("%s: dest ring has no mem", 1618 __func__); 1619 goto error_no_dma_mem; 1620 } 1621 1622 status = ce_ring_setup(scn, CE_RING_DEST, CE_id, 1623 dest_ring, attr); 1624 if (status < 0) 1625 goto error_target_access; 1626 1627 ce_ring_test_initial_indexes(CE_id, dest_ring, 1628 "dest_ring"); 1629 1630 /* For srng based target, init status ring here */ 1631 if (ce_srng_based(CE_state->scn)) { 1632 CE_state->status_ring = 1633 ce_alloc_ring_state(CE_state, 1634 CE_RING_STATUS, 1635 nentries); 1636 if (!CE_state->status_ring) { 1637 /*Allocation failed. Cleanup*/ 1638 qdf_mem_free(CE_state->dest_ring); 1639 if (malloc_src_ring) { 1640 qdf_mem_free 1641 (CE_state->src_ring); 1642 CE_state->src_ring = NULL; 1643 malloc_src_ring = false; 1644 } 1645 if (malloc_CE_state) { 1646 /* allocated CE_state locally */ 1647 scn->ce_id_to_state[CE_id] = 1648 NULL; 1649 qdf_mem_free(CE_state); 1650 malloc_CE_state = false; 1651 } 1652 1653 return NULL; 1654 } 1655 1656 status = ce_ring_setup(scn, CE_RING_STATUS, 1657 CE_id, CE_state->status_ring, 1658 attr); 1659 if (status < 0) 1660 goto error_target_access; 1661 1662 } 1663 1664 /* epping */ 1665 /* poll timer */ 1666 if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) { 1667 qdf_timer_init(scn->qdf_dev, 1668 &CE_state->poll_timer, 1669 ce_poll_timeout, 1670 CE_state, 1671 QDF_TIMER_TYPE_WAKE_APPS); 1672 ce_enable_polling(CE_state); 1673 qdf_timer_mod(&CE_state->poll_timer, 1674 CE_POLL_TIMEOUT); 1675 } 1676 } 1677 } 1678 1679 if (!ce_srng_based(scn)) { 1680 /* Enable CE error interrupts */ 1681 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 1682 goto error_target_access; 1683 CE_ERROR_INTR_ENABLE(scn, ctrl_addr); 1684 if (Q_TARGET_ACCESS_END(scn) < 0) 1685 goto error_target_access; 1686 } 1687 1688 qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work, 1689 ce_oom_recovery, CE_state); 1690 1691 /* update the htt_data attribute */ 1692 ce_mark_datapath(CE_state); 1693 scn->ce_id_to_state[CE_id] = CE_state; 1694 1695 alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries); 1696 1697 return (struct CE_handle *)CE_state; 1698 1699 error_target_access: 1700 error_no_dma_mem: 1701 ce_fini((struct CE_handle *)CE_state); 1702 return NULL; 1703 } 1704 1705 /** 1706 * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs 1707 * @hif_ctx: HIF Context 1708 * 1709 * API to check if polling is enabled on all CEs. Returns true when polling 1710 * is enabled on all CEs. 1711 * 1712 * Return: bool 1713 */ 1714 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx) 1715 { 1716 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1717 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1718 struct CE_attr *attr; 1719 int id; 1720 1721 for (id = 0; id < scn->ce_count; id++) { 1722 attr = &hif_state->host_ce_config[id]; 1723 if (attr && (attr->dest_nentries) && 1724 !(attr->flags & CE_ATTR_ENABLE_POLL)) 1725 return false; 1726 } 1727 return true; 1728 } 1729 qdf_export_symbol(hif_is_polled_mode_enabled); 1730 1731 #ifdef WLAN_FEATURE_FASTPATH 1732 /** 1733 * hif_enable_fastpath() Update that we have enabled fastpath mode 1734 * @hif_ctx: HIF context 1735 * 1736 * For use in data path 1737 * 1738 * Retrun: void 1739 */ 1740 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx) 1741 { 1742 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1743 1744 if (ce_srng_based(scn)) { 1745 HIF_INFO("%s, srng rings do not support fastpath", __func__); 1746 return; 1747 } 1748 HIF_DBG("%s, Enabling fastpath mode", __func__); 1749 scn->fastpath_mode_on = true; 1750 } 1751 1752 /** 1753 * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled 1754 * @hif_ctx: HIF Context 1755 * 1756 * For use in data path to skip HTC 1757 * 1758 * Return: bool 1759 */ 1760 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx) 1761 { 1762 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1763 1764 return scn->fastpath_mode_on; 1765 } 1766 1767 /** 1768 * hif_get_ce_handle - API to get CE handle for FastPath mode 1769 * @hif_ctx: HIF Context 1770 * @id: CopyEngine Id 1771 * 1772 * API to return CE handle for fastpath mode 1773 * 1774 * Return: void 1775 */ 1776 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id) 1777 { 1778 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1779 1780 return scn->ce_id_to_state[id]; 1781 } 1782 qdf_export_symbol(hif_get_ce_handle); 1783 1784 /** 1785 * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup. 1786 * No processing is required inside this function. 1787 * @ce_hdl: Cope engine handle 1788 * Using an assert, this function makes sure that, 1789 * the TX CE has been processed completely. 1790 * 1791 * This is called while dismantling CE structures. No other thread 1792 * should be using these structures while dismantling is occurring 1793 * therfore no locking is needed. 1794 * 1795 * Return: none 1796 */ 1797 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl) 1798 { 1799 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1800 struct CE_ring_state *src_ring = ce_state->src_ring; 1801 struct hif_softc *sc = ce_state->scn; 1802 uint32_t sw_index, write_index; 1803 1804 if (hif_is_nss_wifi_enabled(sc)) 1805 return; 1806 1807 if (sc->fastpath_mode_on && ce_state->htt_tx_data) { 1808 HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE", 1809 __func__, __LINE__); 1810 sw_index = src_ring->sw_index; 1811 write_index = src_ring->sw_index; 1812 1813 /* At this point Tx CE should be clean */ 1814 qdf_assert_always(sw_index == write_index); 1815 } 1816 } 1817 1818 /** 1819 * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue. 1820 * @ce_hdl: Handle to CE 1821 * 1822 * These buffers are never allocated on the fly, but 1823 * are allocated only once during HIF start and freed 1824 * only once during HIF stop. 1825 * NOTE: 1826 * The assumption here is there is no in-flight DMA in progress 1827 * currently, so that buffers can be freed up safely. 1828 * 1829 * Return: NONE 1830 */ 1831 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl) 1832 { 1833 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 1834 struct CE_ring_state *dst_ring = ce_state->dest_ring; 1835 qdf_nbuf_t nbuf; 1836 int i; 1837 1838 if (ce_state->scn->fastpath_mode_on == false) 1839 return; 1840 1841 if (!ce_state->htt_rx_data) 1842 return; 1843 1844 /* 1845 * when fastpath_mode is on and for datapath CEs. Unlike other CE's, 1846 * this CE is completely full: does not leave one blank space, to 1847 * distinguish between empty queue & full queue. So free all the 1848 * entries. 1849 */ 1850 for (i = 0; i < dst_ring->nentries; i++) { 1851 nbuf = dst_ring->per_transfer_context[i]; 1852 1853 /* 1854 * The reasons for doing this check are: 1855 * 1) Protect against calling cleanup before allocating buffers 1856 * 2) In a corner case, FASTPATH_mode_on may be set, but we 1857 * could have a partially filled ring, because of a memory 1858 * allocation failure in the middle of allocating ring. 1859 * This check accounts for that case, checking 1860 * fastpath_mode_on flag or started flag would not have 1861 * covered that case. This is not in performance path, 1862 * so OK to do this. 1863 */ 1864 if (nbuf) { 1865 qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf, 1866 QDF_DMA_FROM_DEVICE); 1867 qdf_nbuf_free(nbuf); 1868 } 1869 } 1870 } 1871 1872 /** 1873 * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1 1874 * @scn: HIF handle 1875 * 1876 * Datapath Rx CEs are special case, where we reuse all the message buffers. 1877 * Hence we have to post all the entries in the pipe, even, in the beginning 1878 * unlike for other CE pipes where one less than dest_nentries are filled in 1879 * the beginning. 1880 * 1881 * Return: None 1882 */ 1883 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1884 { 1885 int pipe_num; 1886 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 1887 1888 if (scn->fastpath_mode_on == false) 1889 return; 1890 1891 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 1892 struct HIF_CE_pipe_info *pipe_info = 1893 &hif_state->pipe_info[pipe_num]; 1894 struct CE_state *ce_state = 1895 scn->ce_id_to_state[pipe_info->pipe_num]; 1896 1897 if (ce_state->htt_rx_data) 1898 atomic_inc(&pipe_info->recv_bufs_needed); 1899 } 1900 } 1901 #else 1902 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn) 1903 { 1904 } 1905 1906 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn) 1907 { 1908 return false; 1909 } 1910 #endif /* WLAN_FEATURE_FASTPATH */ 1911 1912 void ce_fini(struct CE_handle *copyeng) 1913 { 1914 struct CE_state *CE_state = (struct CE_state *)copyeng; 1915 unsigned int CE_id = CE_state->id; 1916 struct hif_softc *scn = CE_state->scn; 1917 uint32_t desc_size; 1918 1919 bool inited = CE_state->timer_inited; 1920 CE_state->state = CE_UNUSED; 1921 scn->ce_id_to_state[CE_id] = NULL; 1922 /* Set the flag to false first to stop processing in ce_poll_timeout */ 1923 ce_disable_polling(CE_state); 1924 1925 qdf_lro_deinit(CE_state->lro_data); 1926 1927 if (CE_state->src_ring) { 1928 /* Cleanup the datapath Tx ring */ 1929 ce_h2t_tx_ce_cleanup(copyeng); 1930 1931 desc_size = ce_get_desc_size(scn, CE_RING_SRC); 1932 if (CE_state->src_ring->shadow_base_unaligned) 1933 qdf_mem_free(CE_state->src_ring->shadow_base_unaligned); 1934 if (CE_state->src_ring->base_addr_owner_space_unaligned) 1935 ce_free_desc_ring(scn, CE_state->id, 1936 CE_state->src_ring, 1937 desc_size); 1938 qdf_mem_free(CE_state->src_ring); 1939 } 1940 if (CE_state->dest_ring) { 1941 /* Cleanup the datapath Rx ring */ 1942 ce_t2h_msg_ce_cleanup(copyeng); 1943 1944 desc_size = ce_get_desc_size(scn, CE_RING_DEST); 1945 if (CE_state->dest_ring->base_addr_owner_space_unaligned) 1946 ce_free_desc_ring(scn, CE_state->id, 1947 CE_state->dest_ring, 1948 desc_size); 1949 qdf_mem_free(CE_state->dest_ring); 1950 1951 /* epping */ 1952 if (inited) { 1953 qdf_timer_free(&CE_state->poll_timer); 1954 } 1955 } 1956 if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) { 1957 /* Cleanup the datapath Tx ring */ 1958 ce_h2t_tx_ce_cleanup(copyeng); 1959 1960 if (CE_state->status_ring->shadow_base_unaligned) 1961 qdf_mem_free( 1962 CE_state->status_ring->shadow_base_unaligned); 1963 1964 desc_size = ce_get_desc_size(scn, CE_RING_STATUS); 1965 if (CE_state->status_ring->base_addr_owner_space_unaligned) 1966 ce_free_desc_ring(scn, CE_state->id, 1967 CE_state->status_ring, 1968 desc_size); 1969 qdf_mem_free(CE_state->status_ring); 1970 } 1971 1972 free_mem_ce_debug_history(scn, CE_id); 1973 reset_ce_debug_history(scn); 1974 ce_deinit_ce_desc_event_log(scn, CE_id); 1975 1976 qdf_spinlock_destroy(&CE_state->ce_index_lock); 1977 qdf_mem_free(CE_state); 1978 } 1979 1980 void hif_detach_htc(struct hif_opaque_softc *hif_ctx) 1981 { 1982 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1983 1984 qdf_mem_zero(&hif_state->msg_callbacks_pending, 1985 sizeof(hif_state->msg_callbacks_pending)); 1986 qdf_mem_zero(&hif_state->msg_callbacks_current, 1987 sizeof(hif_state->msg_callbacks_current)); 1988 } 1989 1990 /* Send the first nbytes bytes of the buffer */ 1991 QDF_STATUS 1992 hif_send_head(struct hif_opaque_softc *hif_ctx, 1993 uint8_t pipe, unsigned int transfer_id, unsigned int nbytes, 1994 qdf_nbuf_t nbuf, unsigned int data_attr) 1995 { 1996 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 1997 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 1998 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 1999 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 2000 int bytes = nbytes, nfrags = 0; 2001 struct ce_sendlist sendlist; 2002 int status, i = 0; 2003 unsigned int mux_id = 0; 2004 2005 if (nbytes > qdf_nbuf_len(nbuf)) { 2006 HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes, 2007 (uint32_t)qdf_nbuf_len(nbuf)); 2008 QDF_ASSERT(0); 2009 } 2010 2011 transfer_id = 2012 (mux_id & MUX_ID_MASK) | 2013 (transfer_id & TRANSACTION_ID_MASK); 2014 data_attr &= DESC_DATA_FLAG_MASK; 2015 /* 2016 * The common case involves sending multiple fragments within a 2017 * single download (the tx descriptor and the tx frame header). 2018 * So, optimize for the case of multiple fragments by not even 2019 * checking whether it's necessary to use a sendlist. 2020 * The overhead of using a sendlist for a single buffer download 2021 * is not a big deal, since it happens rarely (for WMI messages). 2022 */ 2023 ce_sendlist_init(&sendlist); 2024 do { 2025 qdf_dma_addr_t frag_paddr; 2026 int frag_bytes; 2027 2028 frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags); 2029 frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags); 2030 /* 2031 * Clear the packet offset for all but the first CE desc. 2032 */ 2033 if (i++ > 0) 2034 data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M; 2035 2036 status = ce_sendlist_buf_add(&sendlist, frag_paddr, 2037 frag_bytes > 2038 bytes ? bytes : frag_bytes, 2039 qdf_nbuf_get_frag_is_wordstream 2040 (nbuf, 2041 nfrags) ? 0 : 2042 CE_SEND_FLAG_SWAP_DISABLE, 2043 data_attr); 2044 if (status != QDF_STATUS_SUCCESS) { 2045 HIF_ERROR("%s: error, frag_num %d larger than limit", 2046 __func__, nfrags); 2047 return status; 2048 } 2049 bytes -= frag_bytes; 2050 nfrags++; 2051 } while (bytes > 0); 2052 2053 /* Make sure we have resources to handle this request */ 2054 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2055 if (pipe_info->num_sends_allowed < nfrags) { 2056 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2057 ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE); 2058 return QDF_STATUS_E_RESOURCES; 2059 } 2060 pipe_info->num_sends_allowed -= nfrags; 2061 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2062 2063 if (qdf_unlikely(!ce_hdl)) { 2064 HIF_ERROR("%s: error CE handle is null", __func__); 2065 return A_ERROR; 2066 } 2067 2068 QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF); 2069 DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD, 2070 QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf), 2071 sizeof(qdf_nbuf_data(nbuf)), QDF_TX)); 2072 status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 2073 QDF_ASSERT(status == QDF_STATUS_SUCCESS); 2074 2075 return status; 2076 } 2077 2078 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe, 2079 int force) 2080 { 2081 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2082 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2083 2084 if (!force) { 2085 int resources; 2086 /* 2087 * Decide whether to actually poll for completions, or just 2088 * wait for a later chance. If there seem to be plenty of 2089 * resources left, then just wait, since checking involves 2090 * reading a CE register, which is a relatively expensive 2091 * operation. 2092 */ 2093 resources = hif_get_free_queue_number(hif_ctx, pipe); 2094 /* 2095 * If at least 50% of the total resources are still available, 2096 * don't bother checking again yet. 2097 */ 2098 if (resources > (hif_state->host_ce_config[pipe].src_nentries >> 2099 1)) 2100 return; 2101 } 2102 #if ATH_11AC_TXCOMPACT 2103 ce_per_engine_servicereap(scn, pipe); 2104 #else 2105 ce_per_engine_service(scn, pipe); 2106 #endif 2107 } 2108 2109 uint16_t 2110 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe) 2111 { 2112 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2113 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 2114 uint16_t rv; 2115 2116 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2117 rv = pipe_info->num_sends_allowed; 2118 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2119 return rv; 2120 } 2121 2122 /* Called by lower (CE) layer when a send to Target completes. */ 2123 static void 2124 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context, 2125 void *transfer_context, qdf_dma_addr_t CE_data, 2126 unsigned int nbytes, unsigned int transfer_id, 2127 unsigned int sw_index, unsigned int hw_index, 2128 unsigned int toeplitz_hash_result) 2129 { 2130 struct HIF_CE_pipe_info *pipe_info = 2131 (struct HIF_CE_pipe_info *)ce_context; 2132 unsigned int sw_idx = sw_index, hw_idx = hw_index; 2133 struct hif_msg_callbacks *msg_callbacks = 2134 &pipe_info->pipe_callbacks; 2135 2136 do { 2137 /* 2138 * The upper layer callback will be triggered 2139 * when last fragment is complteted. 2140 */ 2141 if (transfer_context != CE_SENDLIST_ITEM_CTXT) 2142 msg_callbacks->txCompletionHandler( 2143 msg_callbacks->Context, 2144 transfer_context, transfer_id, 2145 toeplitz_hash_result); 2146 2147 qdf_spin_lock_bh(&pipe_info->completion_freeq_lock); 2148 pipe_info->num_sends_allowed++; 2149 qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock); 2150 } while (ce_completed_send_next(copyeng, 2151 &ce_context, &transfer_context, 2152 &CE_data, &nbytes, &transfer_id, 2153 &sw_idx, &hw_idx, 2154 &toeplitz_hash_result) == QDF_STATUS_SUCCESS); 2155 } 2156 2157 /** 2158 * hif_ce_do_recv(): send message from copy engine to upper layers 2159 * @msg_callbacks: structure containing callback and callback context 2160 * @netbuff: skb containing message 2161 * @nbytes: number of bytes in the message 2162 * @pipe_info: used for the pipe_number info 2163 * 2164 * Checks the packet length, configures the length in the netbuff, 2165 * and calls the upper layer callback. 2166 * 2167 * return: None 2168 */ 2169 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks, 2170 qdf_nbuf_t netbuf, int nbytes, 2171 struct HIF_CE_pipe_info *pipe_info) { 2172 if (nbytes <= pipe_info->buf_sz) { 2173 qdf_nbuf_set_pktlen(netbuf, nbytes); 2174 msg_callbacks-> 2175 rxCompletionHandler(msg_callbacks->Context, 2176 netbuf, pipe_info->pipe_num); 2177 } else { 2178 HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d", 2179 __func__, netbuf, nbytes); 2180 2181 qdf_nbuf_free(netbuf); 2182 } 2183 } 2184 2185 /* Called by lower (CE) layer when data is received from the Target. */ 2186 static void 2187 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context, 2188 void *transfer_context, qdf_dma_addr_t CE_data, 2189 unsigned int nbytes, unsigned int transfer_id, 2190 unsigned int flags) 2191 { 2192 struct HIF_CE_pipe_info *pipe_info = 2193 (struct HIF_CE_pipe_info *)ce_context; 2194 struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state; 2195 struct CE_state *ce_state = (struct CE_state *) copyeng; 2196 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2197 struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn); 2198 struct hif_msg_callbacks *msg_callbacks = 2199 &pipe_info->pipe_callbacks; 2200 2201 do { 2202 hif_pm_runtime_mark_last_busy(hif_ctx); 2203 qdf_nbuf_unmap_single(scn->qdf_dev, 2204 (qdf_nbuf_t) transfer_context, 2205 QDF_DMA_FROM_DEVICE); 2206 2207 atomic_inc(&pipe_info->recv_bufs_needed); 2208 hif_post_recv_buffers_for_pipe(pipe_info); 2209 if (scn->target_status == TARGET_STATUS_RESET) 2210 qdf_nbuf_free(transfer_context); 2211 else 2212 hif_ce_do_recv(msg_callbacks, transfer_context, 2213 nbytes, pipe_info); 2214 2215 /* Set up force_break flag if num of receices reaches 2216 * MAX_NUM_OF_RECEIVES 2217 */ 2218 ce_state->receive_count++; 2219 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) { 2220 ce_state->force_break = 1; 2221 break; 2222 } 2223 } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context, 2224 &CE_data, &nbytes, &transfer_id, 2225 &flags) == QDF_STATUS_SUCCESS); 2226 2227 } 2228 2229 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */ 2230 2231 void 2232 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused, 2233 struct hif_msg_callbacks *callbacks) 2234 { 2235 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx); 2236 2237 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG 2238 spin_lock_init(&pcie_access_log_lock); 2239 #endif 2240 /* Save callbacks for later installation */ 2241 qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks, 2242 sizeof(hif_state->msg_callbacks_pending)); 2243 2244 } 2245 2246 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state) 2247 { 2248 struct CE_handle *ce_diag = hif_state->ce_diag; 2249 int pipe_num; 2250 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2251 struct hif_msg_callbacks *hif_msg_callbacks = 2252 &hif_state->msg_callbacks_current; 2253 2254 /* daemonize("hif_compl_thread"); */ 2255 2256 if (scn->ce_count == 0) { 2257 HIF_ERROR("%s: Invalid ce_count", __func__); 2258 return -EINVAL; 2259 } 2260 2261 if (!hif_msg_callbacks || 2262 !hif_msg_callbacks->rxCompletionHandler || 2263 !hif_msg_callbacks->txCompletionHandler) { 2264 HIF_ERROR("%s: no completion handler registered", __func__); 2265 return -EFAULT; 2266 } 2267 2268 A_TARGET_ACCESS_LIKELY(scn); 2269 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2270 struct CE_attr attr; 2271 struct HIF_CE_pipe_info *pipe_info; 2272 2273 pipe_info = &hif_state->pipe_info[pipe_num]; 2274 if (pipe_info->ce_hdl == ce_diag) 2275 continue; /* Handle Diagnostic CE specially */ 2276 attr = hif_state->host_ce_config[pipe_num]; 2277 if (attr.src_nentries) { 2278 /* pipe used to send to target */ 2279 HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK", 2280 __func__, pipe_num, pipe_info); 2281 ce_send_cb_register(pipe_info->ce_hdl, 2282 hif_pci_ce_send_done, pipe_info, 2283 attr.flags & CE_ATTR_DISABLE_INTR); 2284 pipe_info->num_sends_allowed = attr.src_nentries - 1; 2285 } 2286 if (attr.dest_nentries) { 2287 /* pipe used to receive from target */ 2288 ce_recv_cb_register(pipe_info->ce_hdl, 2289 hif_pci_ce_recv_data, pipe_info, 2290 attr.flags & CE_ATTR_DISABLE_INTR); 2291 } 2292 2293 if (attr.src_nentries) 2294 qdf_spinlock_create(&pipe_info->completion_freeq_lock); 2295 2296 qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks, 2297 sizeof(pipe_info->pipe_callbacks)); 2298 } 2299 2300 A_TARGET_ACCESS_UNLIKELY(scn); 2301 return 0; 2302 } 2303 2304 /* 2305 * Install pending msg callbacks. 2306 * 2307 * TBDXXX: This hack is needed because upper layers install msg callbacks 2308 * for use with HTC before BMI is done; yet this HIF implementation 2309 * needs to continue to use BMI msg callbacks. Really, upper layers 2310 * should not register HTC callbacks until AFTER BMI phase. 2311 */ 2312 static void hif_msg_callbacks_install(struct hif_softc *scn) 2313 { 2314 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2315 2316 qdf_mem_copy(&hif_state->msg_callbacks_current, 2317 &hif_state->msg_callbacks_pending, 2318 sizeof(hif_state->msg_callbacks_pending)); 2319 } 2320 2321 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe, 2322 uint8_t *DLPipe) 2323 { 2324 int ul_is_polled, dl_is_polled; 2325 2326 (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC, 2327 ULPipe, DLPipe, &ul_is_polled, &dl_is_polled); 2328 } 2329 2330 /** 2331 * hif_dump_pipe_debug_count() - Log error count 2332 * @scn: hif_softc pointer. 2333 * 2334 * Output the pipe error counts of each pipe to log file 2335 * 2336 * Return: N/A 2337 */ 2338 void hif_dump_pipe_debug_count(struct hif_softc *scn) 2339 { 2340 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2341 int pipe_num; 2342 2343 if (!hif_state) { 2344 HIF_ERROR("%s hif_state is NULL", __func__); 2345 return; 2346 } 2347 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2348 struct HIF_CE_pipe_info *pipe_info; 2349 2350 pipe_info = &hif_state->pipe_info[pipe_num]; 2351 2352 if (pipe_info->nbuf_alloc_err_count > 0 || 2353 pipe_info->nbuf_dma_err_count > 0 || 2354 pipe_info->nbuf_ce_enqueue_err_count) 2355 HIF_ERROR( 2356 "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u", 2357 __func__, pipe_info->pipe_num, 2358 atomic_read(&pipe_info->recv_bufs_needed), 2359 pipe_info->nbuf_alloc_err_count, 2360 pipe_info->nbuf_dma_err_count, 2361 pipe_info->nbuf_ce_enqueue_err_count); 2362 } 2363 } 2364 2365 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, 2366 void *nbuf, uint32_t *error_cnt, 2367 enum hif_ce_event_type failure_type, 2368 const char *failure_type_string) 2369 { 2370 int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); 2371 struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; 2372 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2373 int ce_id = CE_state->id; 2374 uint32_t error_cnt_tmp; 2375 2376 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2377 error_cnt_tmp = ++(*error_cnt); 2378 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2379 HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", 2380 __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, 2381 failure_type_string); 2382 hif_record_ce_desc_event(scn, ce_id, failure_type, 2383 NULL, nbuf, bufs_needed_tmp, 0); 2384 /* if we fail to allocate the last buffer for an rx pipe, 2385 * there is no trigger to refill the ce and we will 2386 * eventually crash 2387 */ 2388 if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) 2389 qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work); 2390 2391 } 2392 2393 2394 2395 2396 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) 2397 { 2398 struct CE_handle *ce_hdl; 2399 qdf_size_t buf_sz; 2400 struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); 2401 QDF_STATUS status; 2402 uint32_t bufs_posted = 0; 2403 unsigned int ce_id; 2404 2405 buf_sz = pipe_info->buf_sz; 2406 if (buf_sz == 0) { 2407 /* Unused Copy Engine */ 2408 return QDF_STATUS_SUCCESS; 2409 } 2410 2411 ce_hdl = pipe_info->ce_hdl; 2412 ce_id = ((struct CE_state *)ce_hdl)->id; 2413 2414 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2415 while (atomic_read(&pipe_info->recv_bufs_needed) > 0) { 2416 qdf_dma_addr_t CE_data; /* CE space buffer address */ 2417 qdf_nbuf_t nbuf; 2418 2419 atomic_dec(&pipe_info->recv_bufs_needed); 2420 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2421 2422 hif_record_ce_desc_event(scn, ce_id, 2423 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL, 2424 0, 0); 2425 nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); 2426 if (!nbuf) { 2427 hif_post_recv_buffers_failure(pipe_info, nbuf, 2428 &pipe_info->nbuf_alloc_err_count, 2429 HIF_RX_NBUF_ALLOC_FAILURE, 2430 "HIF_RX_NBUF_ALLOC_FAILURE"); 2431 return QDF_STATUS_E_NOMEM; 2432 } 2433 2434 hif_record_ce_desc_event(scn, ce_id, 2435 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf, 2436 0, 0); 2437 /* 2438 * qdf_nbuf_peek_header(nbuf, &data, &unused); 2439 * CE_data = dma_map_single(dev, data, buf_sz, ); 2440 * DMA_FROM_DEVICE); 2441 */ 2442 status = qdf_nbuf_map_single(scn->qdf_dev, nbuf, 2443 QDF_DMA_FROM_DEVICE); 2444 2445 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2446 hif_post_recv_buffers_failure(pipe_info, nbuf, 2447 &pipe_info->nbuf_dma_err_count, 2448 HIF_RX_NBUF_MAP_FAILURE, 2449 "HIF_RX_NBUF_MAP_FAILURE"); 2450 qdf_nbuf_free(nbuf); 2451 return status; 2452 } 2453 2454 CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0); 2455 hif_record_ce_desc_event(scn, ce_id, 2456 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf, 2457 0, 0); 2458 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data, 2459 buf_sz, DMA_FROM_DEVICE); 2460 status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); 2461 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) { 2462 hif_post_recv_buffers_failure(pipe_info, nbuf, 2463 &pipe_info->nbuf_ce_enqueue_err_count, 2464 HIF_RX_NBUF_ENQUEUE_FAILURE, 2465 "HIF_RX_NBUF_ENQUEUE_FAILURE"); 2466 2467 qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, 2468 QDF_DMA_FROM_DEVICE); 2469 qdf_nbuf_free(nbuf); 2470 return status; 2471 } 2472 2473 qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); 2474 bufs_posted++; 2475 } 2476 pipe_info->nbuf_alloc_err_count = 2477 (pipe_info->nbuf_alloc_err_count > bufs_posted) ? 2478 pipe_info->nbuf_alloc_err_count - bufs_posted : 0; 2479 pipe_info->nbuf_dma_err_count = 2480 (pipe_info->nbuf_dma_err_count > bufs_posted) ? 2481 pipe_info->nbuf_dma_err_count - bufs_posted : 0; 2482 pipe_info->nbuf_ce_enqueue_err_count = 2483 (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? 2484 pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; 2485 2486 qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); 2487 2488 return QDF_STATUS_SUCCESS; 2489 } 2490 2491 /* 2492 * Try to post all desired receive buffers for all pipes. 2493 * Returns 0 for non fastpath rx copy engine as 2494 * oom_allocation_work will be scheduled to recover any 2495 * failures, non-zero if unable to completely replenish 2496 * receive buffers for fastpath rx Copy engine. 2497 */ 2498 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn) 2499 { 2500 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2501 int pipe_num; 2502 struct CE_state *ce_state = NULL; 2503 QDF_STATUS qdf_status; 2504 2505 A_TARGET_ACCESS_LIKELY(scn); 2506 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2507 struct HIF_CE_pipe_info *pipe_info; 2508 2509 ce_state = scn->ce_id_to_state[pipe_num]; 2510 pipe_info = &hif_state->pipe_info[pipe_num]; 2511 2512 if (hif_is_nss_wifi_enabled(scn) && 2513 ce_state && (ce_state->htt_rx_data)) 2514 continue; 2515 2516 qdf_status = hif_post_recv_buffers_for_pipe(pipe_info); 2517 if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state && 2518 ce_state->htt_rx_data && 2519 scn->fastpath_mode_on) { 2520 A_TARGET_ACCESS_UNLIKELY(scn); 2521 return qdf_status; 2522 } 2523 } 2524 2525 A_TARGET_ACCESS_UNLIKELY(scn); 2526 2527 return QDF_STATUS_SUCCESS; 2528 } 2529 2530 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx) 2531 { 2532 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2533 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2534 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; 2535 2536 hif_update_fastpath_recv_bufs_cnt(scn); 2537 2538 hif_msg_callbacks_install(scn); 2539 2540 if (hif_completion_thread_startup(hif_state)) 2541 return QDF_STATUS_E_FAILURE; 2542 2543 /* enable buffer cleanup */ 2544 hif_state->started = true; 2545 2546 /* Post buffers once to start things off. */ 2547 qdf_status = hif_post_recv_buffers(scn); 2548 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { 2549 /* cleanup is done in hif_ce_disable */ 2550 HIF_ERROR("%s:failed to post buffers", __func__); 2551 return qdf_status; 2552 } 2553 2554 return qdf_status; 2555 } 2556 2557 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2558 { 2559 struct hif_softc *scn; 2560 struct CE_handle *ce_hdl; 2561 uint32_t buf_sz; 2562 struct HIF_CE_state *hif_state; 2563 qdf_nbuf_t netbuf; 2564 qdf_dma_addr_t CE_data; 2565 void *per_CE_context; 2566 2567 buf_sz = pipe_info->buf_sz; 2568 /* Unused Copy Engine */ 2569 if (buf_sz == 0) 2570 return; 2571 2572 2573 hif_state = pipe_info->HIF_CE_state; 2574 if (!hif_state->started) 2575 return; 2576 2577 scn = HIF_GET_SOFTC(hif_state); 2578 ce_hdl = pipe_info->ce_hdl; 2579 2580 if (!scn->qdf_dev) 2581 return; 2582 while (ce_revoke_recv_next 2583 (ce_hdl, &per_CE_context, (void **)&netbuf, 2584 &CE_data) == QDF_STATUS_SUCCESS) { 2585 if (netbuf) { 2586 qdf_nbuf_unmap_single(scn->qdf_dev, netbuf, 2587 QDF_DMA_FROM_DEVICE); 2588 qdf_nbuf_free(netbuf); 2589 } 2590 } 2591 } 2592 2593 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info) 2594 { 2595 struct CE_handle *ce_hdl; 2596 struct HIF_CE_state *hif_state; 2597 struct hif_softc *scn; 2598 qdf_nbuf_t netbuf; 2599 void *per_CE_context; 2600 qdf_dma_addr_t CE_data; 2601 unsigned int nbytes; 2602 unsigned int id; 2603 uint32_t buf_sz; 2604 uint32_t toeplitz_hash_result; 2605 2606 buf_sz = pipe_info->buf_sz; 2607 if (buf_sz == 0) { 2608 /* Unused Copy Engine */ 2609 return; 2610 } 2611 2612 hif_state = pipe_info->HIF_CE_state; 2613 if (!hif_state->started) { 2614 return; 2615 } 2616 2617 scn = HIF_GET_SOFTC(hif_state); 2618 2619 ce_hdl = pipe_info->ce_hdl; 2620 2621 while (ce_cancel_send_next 2622 (ce_hdl, &per_CE_context, 2623 (void **)&netbuf, &CE_data, &nbytes, 2624 &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) { 2625 if (netbuf != CE_SENDLIST_ITEM_CTXT) { 2626 /* 2627 * Packets enqueued by htt_h2t_ver_req_msg() and 2628 * htt_h2t_rx_ring_cfg_msg_ll() have already been 2629 * freed in htt_htc_misc_pkt_pool_free() in 2630 * wlantl_close(), so do not free them here again 2631 * by checking whether it's the endpoint 2632 * which they are queued in. 2633 */ 2634 if (id == scn->htc_htt_tx_endpoint) 2635 return; 2636 /* Indicate the completion to higher 2637 * layer to free the buffer 2638 */ 2639 if (pipe_info->pipe_callbacks.txCompletionHandler) 2640 pipe_info->pipe_callbacks. 2641 txCompletionHandler(pipe_info-> 2642 pipe_callbacks.Context, 2643 netbuf, id, toeplitz_hash_result); 2644 } 2645 } 2646 } 2647 2648 /* 2649 * Cleanup residual buffers for device shutdown: 2650 * buffers that were enqueued for receive 2651 * buffers that were to be sent 2652 * Note: Buffers that had completed but which were 2653 * not yet processed are on a completion queue. They 2654 * are handled when the completion thread shuts down. 2655 */ 2656 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state) 2657 { 2658 int pipe_num; 2659 struct hif_softc *scn = HIF_GET_SOFTC(hif_state); 2660 struct CE_state *ce_state; 2661 2662 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2663 struct HIF_CE_pipe_info *pipe_info; 2664 2665 ce_state = scn->ce_id_to_state[pipe_num]; 2666 if (hif_is_nss_wifi_enabled(scn) && ce_state && 2667 ((ce_state->htt_tx_data) || 2668 (ce_state->htt_rx_data))) { 2669 continue; 2670 } 2671 2672 pipe_info = &hif_state->pipe_info[pipe_num]; 2673 hif_recv_buffer_cleanup_on_pipe(pipe_info); 2674 hif_send_buffer_cleanup_on_pipe(pipe_info); 2675 } 2676 } 2677 2678 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx) 2679 { 2680 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 2681 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2682 2683 hif_buffer_cleanup(hif_state); 2684 } 2685 2686 static void hif_destroy_oom_work(struct hif_softc *scn) 2687 { 2688 struct CE_state *ce_state; 2689 int ce_id; 2690 2691 for (ce_id = 0; ce_id < scn->ce_count; ce_id++) { 2692 ce_state = scn->ce_id_to_state[ce_id]; 2693 if (ce_state) 2694 qdf_destroy_work(scn->qdf_dev, 2695 &ce_state->oom_allocation_work); 2696 } 2697 } 2698 2699 void hif_ce_stop(struct hif_softc *scn) 2700 { 2701 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2702 int pipe_num; 2703 2704 /* 2705 * before cleaning up any memory, ensure irq & 2706 * bottom half contexts will not be re-entered 2707 */ 2708 hif_disable_isr(&scn->osc); 2709 hif_destroy_oom_work(scn); 2710 scn->hif_init_done = false; 2711 2712 /* 2713 * At this point, asynchronous threads are stopped, 2714 * The Target should not DMA nor interrupt, Host code may 2715 * not initiate anything more. So we just need to clean 2716 * up Host-side state. 2717 */ 2718 2719 if (scn->athdiag_procfs_inited) { 2720 athdiag_procfs_remove(); 2721 scn->athdiag_procfs_inited = false; 2722 } 2723 2724 hif_buffer_cleanup(hif_state); 2725 2726 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 2727 struct HIF_CE_pipe_info *pipe_info; 2728 struct CE_attr attr; 2729 struct CE_handle *ce_diag = hif_state->ce_diag; 2730 2731 pipe_info = &hif_state->pipe_info[pipe_num]; 2732 if (pipe_info->ce_hdl) { 2733 if (pipe_info->ce_hdl != ce_diag) { 2734 attr = hif_state->host_ce_config[pipe_num]; 2735 if (attr.src_nentries) 2736 qdf_spinlock_destroy(&pipe_info-> 2737 completion_freeq_lock); 2738 } 2739 ce_fini(pipe_info->ce_hdl); 2740 pipe_info->ce_hdl = NULL; 2741 pipe_info->buf_sz = 0; 2742 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 2743 } 2744 } 2745 2746 if (hif_state->sleep_timer_init) { 2747 qdf_timer_stop(&hif_state->sleep_timer); 2748 qdf_timer_free(&hif_state->sleep_timer); 2749 hif_state->sleep_timer_init = false; 2750 } 2751 2752 hif_state->started = false; 2753 } 2754 2755 static void hif_get_shadow_reg_cfg(struct hif_softc *scn, 2756 struct shadow_reg_cfg 2757 **target_shadow_reg_cfg_ret, 2758 uint32_t *shadow_cfg_sz_ret) 2759 { 2760 if (target_shadow_reg_cfg_ret) 2761 *target_shadow_reg_cfg_ret = target_shadow_reg_cfg; 2762 if (shadow_cfg_sz_ret) 2763 *shadow_cfg_sz_ret = shadow_cfg_sz; 2764 } 2765 2766 /** 2767 * hif_get_target_ce_config() - get copy engine configuration 2768 * @target_ce_config_ret: basic copy engine configuration 2769 * @target_ce_config_sz_ret: size of the basic configuration in bytes 2770 * @target_service_to_ce_map_ret: service mapping for the copy engines 2771 * @target_service_to_ce_map_sz_ret: size of the mapping in bytes 2772 * @target_shadow_reg_cfg_ret: shadow register configuration 2773 * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes 2774 * 2775 * providing accessor to these values outside of this file. 2776 * currently these are stored in static pointers to const sections. 2777 * there are multiple configurations that are selected from at compile time. 2778 * Runtime selection would need to consider mode, target type and bus type. 2779 * 2780 * Return: return by parameter. 2781 */ 2782 void hif_get_target_ce_config(struct hif_softc *scn, 2783 struct CE_pipe_config **target_ce_config_ret, 2784 uint32_t *target_ce_config_sz_ret, 2785 struct service_to_pipe **target_service_to_ce_map_ret, 2786 uint32_t *target_service_to_ce_map_sz_ret, 2787 struct shadow_reg_cfg **target_shadow_reg_cfg_ret, 2788 uint32_t *shadow_cfg_sz_ret) 2789 { 2790 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2791 2792 *target_ce_config_ret = hif_state->target_ce_config; 2793 *target_ce_config_sz_ret = hif_state->target_ce_config_sz; 2794 2795 hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret, 2796 target_service_to_ce_map_sz_ret); 2797 hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret, 2798 shadow_cfg_sz_ret); 2799 } 2800 2801 #ifdef CONFIG_SHADOW_V2 2802 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2803 { 2804 int i; 2805 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2806 "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg); 2807 2808 for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) { 2809 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, 2810 "%s: i %d, val %x", __func__, i, 2811 cfg->shadow_reg_v2_cfg[i].addr); 2812 } 2813 } 2814 2815 #else 2816 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg) 2817 { 2818 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, 2819 "%s: CONFIG_SHADOW_V2 not defined", __func__); 2820 } 2821 #endif 2822 2823 #ifdef ADRASTEA_RRI_ON_DDR 2824 /** 2825 * hif_get_src_ring_read_index(): Called to get the SRRI 2826 * 2827 * @scn: hif_softc pointer 2828 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 2829 * 2830 * This function returns the SRRI to the caller. For CEs that 2831 * dont have interrupts enabled, we look at the DDR based SRRI 2832 * 2833 * Return: SRRI 2834 */ 2835 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn, 2836 uint32_t CE_ctrl_addr) 2837 { 2838 struct CE_attr attr; 2839 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2840 2841 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 2842 if (attr.flags & CE_ATTR_DISABLE_INTR) { 2843 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 2844 } else { 2845 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 2846 return A_TARGET_READ(scn, 2847 (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS); 2848 else 2849 return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, 2850 CE_ctrl_addr); 2851 } 2852 } 2853 2854 /** 2855 * hif_get_dst_ring_read_index(): Called to get the DRRI 2856 * 2857 * @scn: hif_softc pointer 2858 * @CE_ctrl_addr: base address of the CE whose RRI is to be read 2859 * 2860 * This function returns the DRRI to the caller. For CEs that 2861 * dont have interrupts enabled, we look at the DDR based DRRI 2862 * 2863 * Return: DRRI 2864 */ 2865 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn, 2866 uint32_t CE_ctrl_addr) 2867 { 2868 struct CE_attr attr; 2869 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 2870 2871 attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)]; 2872 2873 if (attr.flags & CE_ATTR_DISABLE_INTR) { 2874 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr); 2875 } else { 2876 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) 2877 return A_TARGET_READ(scn, 2878 (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS); 2879 else 2880 return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, 2881 CE_ctrl_addr); 2882 } 2883 } 2884 2885 /** 2886 * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr 2887 * @scn: hif_softc pointer 2888 * 2889 * Return: qdf status 2890 */ 2891 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn) 2892 { 2893 qdf_dma_addr_t paddr_rri_on_ddr = 0; 2894 2895 scn->vaddr_rri_on_ddr = 2896 (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev, 2897 scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)), 2898 &paddr_rri_on_ddr); 2899 2900 if (!scn->vaddr_rri_on_ddr) { 2901 hif_err("dmaable page alloc fail"); 2902 return QDF_STATUS_E_NOMEM; 2903 } 2904 2905 scn->paddr_rri_on_ddr = paddr_rri_on_ddr; 2906 2907 qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t)); 2908 2909 return QDF_STATUS_SUCCESS; 2910 } 2911 #endif 2912 2913 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR) 2914 /** 2915 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 2916 * 2917 * @scn: hif_softc pointer 2918 * 2919 * This function allocates non cached memory on ddr and sends 2920 * the physical address of this memory to the CE hardware. The 2921 * hardware updates the RRI on this particular location. 2922 * 2923 * Return: None 2924 */ 2925 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 2926 { 2927 unsigned int i; 2928 uint32_t high_paddr, low_paddr; 2929 2930 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 2931 return; 2932 2933 low_paddr = BITS0_TO_31(scn->paddr_rri_on_ddr); 2934 high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr); 2935 2936 HIF_DBG("%s using srri and drri from DDR", __func__); 2937 2938 WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr); 2939 WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr); 2940 2941 for (i = 0; i < CE_COUNT; i++) 2942 CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i)); 2943 } 2944 #else 2945 /** 2946 * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism 2947 * 2948 * @scn: hif_softc pointer 2949 * 2950 * This is a dummy implementation for platforms that don't 2951 * support this functionality. 2952 * 2953 * Return: None 2954 */ 2955 static inline void hif_config_rri_on_ddr(struct hif_softc *scn) 2956 { 2957 } 2958 #endif 2959 2960 /** 2961 * hif_update_rri_over_ddr_config() - update rri_over_ddr config for 2962 * QMI command 2963 * @scn: hif context 2964 * @cfg: wlan enable config 2965 * 2966 * In case of Genoa, rri_over_ddr memory configuration is passed 2967 * to firmware through QMI configure command. 2968 */ 2969 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR) 2970 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 2971 struct pld_wlan_enable_cfg *cfg) 2972 { 2973 if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS) 2974 return; 2975 2976 cfg->rri_over_ddr_cfg_valid = true; 2977 cfg->rri_over_ddr_cfg.base_addr_low = 2978 BITS0_TO_31(scn->paddr_rri_on_ddr); 2979 cfg->rri_over_ddr_cfg.base_addr_high = 2980 BITS32_TO_35(scn->paddr_rri_on_ddr); 2981 } 2982 #else 2983 static void hif_update_rri_over_ddr_config(struct hif_softc *scn, 2984 struct pld_wlan_enable_cfg *cfg) 2985 { 2986 } 2987 #endif 2988 2989 /** 2990 * hif_wlan_enable(): call the platform driver to enable wlan 2991 * @scn: HIF Context 2992 * 2993 * This function passes the con_mode and CE configuration to 2994 * platform driver to enable wlan. 2995 * 2996 * Return: linux error code 2997 */ 2998 int hif_wlan_enable(struct hif_softc *scn) 2999 { 3000 struct pld_wlan_enable_cfg cfg; 3001 enum pld_driver_mode mode; 3002 uint32_t con_mode = hif_get_conparam(scn); 3003 3004 hif_get_target_ce_config(scn, 3005 (struct CE_pipe_config **)&cfg.ce_tgt_cfg, 3006 &cfg.num_ce_tgt_cfg, 3007 (struct service_to_pipe **)&cfg.ce_svc_cfg, 3008 &cfg.num_ce_svc_pipe_cfg, 3009 (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg, 3010 &cfg.num_shadow_reg_cfg); 3011 3012 /* translate from structure size to array size */ 3013 cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config); 3014 cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe); 3015 cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg); 3016 3017 hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg, 3018 &cfg.num_shadow_reg_v2_cfg); 3019 3020 hif_print_hal_shadow_register_cfg(&cfg); 3021 3022 hif_update_rri_over_ddr_config(scn, &cfg); 3023 3024 if (QDF_GLOBAL_FTM_MODE == con_mode) 3025 mode = PLD_FTM; 3026 else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode) 3027 mode = PLD_COLDBOOT_CALIBRATION; 3028 else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode) 3029 mode = PLD_FTM_COLDBOOT_CALIBRATION; 3030 else if (QDF_IS_EPPING_ENABLED(con_mode)) 3031 mode = PLD_EPPING; 3032 else 3033 mode = PLD_MISSION; 3034 3035 if (BYPASS_QMI) 3036 return 0; 3037 else 3038 return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode); 3039 } 3040 3041 #ifdef WLAN_FEATURE_EPPING 3042 3043 #define CE_EPPING_USES_IRQ true 3044 3045 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state) 3046 { 3047 if (CE_EPPING_USES_IRQ) 3048 hif_state->host_ce_config = host_ce_config_wlan_epping_irq; 3049 else 3050 hif_state->host_ce_config = host_ce_config_wlan_epping_poll; 3051 hif_state->target_ce_config = target_ce_config_wlan_epping; 3052 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping); 3053 target_shadow_reg_cfg = target_shadow_reg_cfg_epping; 3054 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping); 3055 } 3056 #endif 3057 3058 #ifdef QCN7605_SUPPORT 3059 static inline 3060 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 3061 struct HIF_CE_state *hif_state) 3062 { 3063 hif_state->host_ce_config = host_ce_config_wlan_qcn7605; 3064 hif_state->target_ce_config = target_ce_config_wlan_qcn7605; 3065 hif_state->target_ce_config_sz = 3066 sizeof(target_ce_config_wlan_qcn7605); 3067 target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605; 3068 shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605); 3069 scn->ce_count = QCN7605_CE_COUNT; 3070 } 3071 #else 3072 static inline 3073 void hif_set_ce_config_qcn7605(struct hif_softc *scn, 3074 struct HIF_CE_state *hif_state) 3075 { 3076 HIF_ERROR("QCN7605 not supported"); 3077 } 3078 #endif 3079 3080 #ifdef CE_SVC_CMN_INIT 3081 #ifdef QCA_WIFI_SUPPORT_SRNG 3082 static inline void hif_ce_service_init(void) 3083 { 3084 ce_service_srng_init(); 3085 } 3086 #else 3087 static inline void hif_ce_service_init(void) 3088 { 3089 ce_service_legacy_init(); 3090 } 3091 #endif 3092 #else 3093 static inline void hif_ce_service_init(void) 3094 { 3095 } 3096 #endif 3097 3098 3099 /** 3100 * hif_ce_prepare_config() - load the correct static tables. 3101 * @scn: hif context 3102 * 3103 * Epping uses different static attribute tables than mission mode. 3104 */ 3105 void hif_ce_prepare_config(struct hif_softc *scn) 3106 { 3107 uint32_t mode = hif_get_conparam(scn); 3108 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3109 struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl); 3110 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3111 3112 hif_ce_service_init(); 3113 hif_state->ce_services = ce_services_attach(scn); 3114 3115 scn->ce_count = HOST_CE_COUNT; 3116 /* if epping is enabled we need to use the epping configuration. */ 3117 if (QDF_IS_EPPING_ENABLED(mode)) { 3118 hif_ce_prepare_epping_config(hif_state); 3119 return; 3120 } 3121 3122 switch (tgt_info->target_type) { 3123 default: 3124 hif_state->host_ce_config = host_ce_config_wlan; 3125 hif_state->target_ce_config = target_ce_config_wlan; 3126 hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan); 3127 break; 3128 case TARGET_TYPE_QCN7605: 3129 hif_set_ce_config_qcn7605(scn, hif_state); 3130 break; 3131 case TARGET_TYPE_AR900B: 3132 case TARGET_TYPE_QCA9984: 3133 case TARGET_TYPE_IPQ4019: 3134 case TARGET_TYPE_QCA9888: 3135 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 3136 hif_state->host_ce_config = 3137 host_lowdesc_ce_cfg_wlan_ar900b_nopktlog; 3138 } else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 3139 hif_state->host_ce_config = 3140 host_lowdesc_ce_cfg_wlan_ar900b; 3141 } else { 3142 hif_state->host_ce_config = host_ce_config_wlan_ar900b; 3143 } 3144 3145 hif_state->target_ce_config = target_ce_config_wlan_ar900b; 3146 hif_state->target_ce_config_sz = 3147 sizeof(target_ce_config_wlan_ar900b); 3148 3149 break; 3150 3151 case TARGET_TYPE_AR9888: 3152 case TARGET_TYPE_AR9888V2: 3153 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) { 3154 hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888; 3155 } else { 3156 hif_state->host_ce_config = host_ce_config_wlan_ar9888; 3157 } 3158 3159 hif_state->target_ce_config = target_ce_config_wlan_ar9888; 3160 hif_state->target_ce_config_sz = 3161 sizeof(target_ce_config_wlan_ar9888); 3162 3163 break; 3164 3165 case TARGET_TYPE_QCA8074: 3166 case TARGET_TYPE_QCA8074V2: 3167 case TARGET_TYPE_QCA6018: 3168 if (scn->bus_type == QDF_BUS_TYPE_PCI) { 3169 hif_state->host_ce_config = 3170 host_ce_config_wlan_qca8074_pci; 3171 hif_state->target_ce_config = 3172 target_ce_config_wlan_qca8074_pci; 3173 hif_state->target_ce_config_sz = 3174 sizeof(target_ce_config_wlan_qca8074_pci); 3175 } else { 3176 hif_state->host_ce_config = host_ce_config_wlan_qca8074; 3177 hif_state->target_ce_config = 3178 target_ce_config_wlan_qca8074; 3179 hif_state->target_ce_config_sz = 3180 sizeof(target_ce_config_wlan_qca8074); 3181 } 3182 break; 3183 case TARGET_TYPE_QCA6290: 3184 hif_state->host_ce_config = host_ce_config_wlan_qca6290; 3185 hif_state->target_ce_config = target_ce_config_wlan_qca6290; 3186 hif_state->target_ce_config_sz = 3187 sizeof(target_ce_config_wlan_qca6290); 3188 3189 scn->ce_count = QCA_6290_CE_COUNT; 3190 break; 3191 case TARGET_TYPE_QCN9000: 3192 hif_state->host_ce_config = host_ce_config_wlan_qcn9000; 3193 hif_state->target_ce_config = target_ce_config_wlan_qcn9000; 3194 hif_state->target_ce_config_sz = 3195 sizeof(target_ce_config_wlan_qcn9000); 3196 scn->ce_count = QCN_9000_CE_COUNT; 3197 scn->disable_wake_irq = 1; 3198 break; 3199 case TARGET_TYPE_QCA6390: 3200 hif_state->host_ce_config = host_ce_config_wlan_qca6390; 3201 hif_state->target_ce_config = target_ce_config_wlan_qca6390; 3202 hif_state->target_ce_config_sz = 3203 sizeof(target_ce_config_wlan_qca6390); 3204 3205 scn->ce_count = QCA_6390_CE_COUNT; 3206 break; 3207 case TARGET_TYPE_QCA6490: 3208 hif_state->host_ce_config = host_ce_config_wlan_qca6490; 3209 hif_state->target_ce_config = target_ce_config_wlan_qca6490; 3210 hif_state->target_ce_config_sz = 3211 sizeof(target_ce_config_wlan_qca6490); 3212 3213 scn->ce_count = QCA_6490_CE_COUNT; 3214 break; 3215 case TARGET_TYPE_ADRASTEA: 3216 if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) { 3217 hif_state->host_ce_config = 3218 host_lowdesc_ce_config_wlan_adrastea_nopktlog; 3219 hif_state->target_ce_config = 3220 target_lowdesc_ce_config_wlan_adrastea_nopktlog; 3221 hif_state->target_ce_config_sz = 3222 sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog); 3223 } else { 3224 hif_state->host_ce_config = 3225 host_ce_config_wlan_adrastea; 3226 hif_state->target_ce_config = 3227 target_ce_config_wlan_adrastea; 3228 hif_state->target_ce_config_sz = 3229 sizeof(target_ce_config_wlan_adrastea); 3230 } 3231 break; 3232 3233 } 3234 QDF_BUG(scn->ce_count <= CE_COUNT_MAX); 3235 } 3236 3237 /** 3238 * hif_ce_open() - do ce specific allocations 3239 * @hif_sc: pointer to hif context 3240 * 3241 * return: 0 for success or QDF_STATUS_E_NOMEM 3242 */ 3243 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc) 3244 { 3245 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3246 3247 qdf_spinlock_create(&hif_state->irq_reg_lock); 3248 qdf_spinlock_create(&hif_state->keep_awake_lock); 3249 return QDF_STATUS_SUCCESS; 3250 } 3251 3252 /** 3253 * hif_ce_close() - do ce specific free 3254 * @hif_sc: pointer to hif context 3255 */ 3256 void hif_ce_close(struct hif_softc *hif_sc) 3257 { 3258 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3259 3260 qdf_spinlock_destroy(&hif_state->irq_reg_lock); 3261 qdf_spinlock_destroy(&hif_state->keep_awake_lock); 3262 } 3263 3264 /** 3265 * hif_unconfig_ce() - ensure resources from hif_config_ce are freed 3266 * @hif_sc: hif context 3267 * 3268 * uses state variables to support cleaning up when hif_config_ce fails. 3269 */ 3270 void hif_unconfig_ce(struct hif_softc *hif_sc) 3271 { 3272 int pipe_num; 3273 struct HIF_CE_pipe_info *pipe_info; 3274 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc); 3275 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc); 3276 3277 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3278 pipe_info = &hif_state->pipe_info[pipe_num]; 3279 if (pipe_info->ce_hdl) { 3280 ce_unregister_irq(hif_state, (1 << pipe_num)); 3281 } 3282 } 3283 deinit_tasklet_workers(hif_hdl); 3284 for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) { 3285 pipe_info = &hif_state->pipe_info[pipe_num]; 3286 if (pipe_info->ce_hdl) { 3287 ce_fini(pipe_info->ce_hdl); 3288 pipe_info->ce_hdl = NULL; 3289 pipe_info->buf_sz = 0; 3290 qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock); 3291 } 3292 } 3293 if (hif_sc->athdiag_procfs_inited) { 3294 athdiag_procfs_remove(); 3295 hif_sc->athdiag_procfs_inited = false; 3296 } 3297 } 3298 3299 #ifdef CONFIG_BYPASS_QMI 3300 #ifdef QCN7605_SUPPORT 3301 /** 3302 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 3303 * @scn: pointer to HIF structure 3304 * 3305 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 3306 * 3307 * Return: void 3308 */ 3309 static void hif_post_static_buf_to_target(struct hif_softc *scn) 3310 { 3311 void *target_va; 3312 phys_addr_t target_pa; 3313 struct ce_info *ce_info_ptr; 3314 uint32_t msi_data_start; 3315 uint32_t msi_data_count; 3316 uint32_t msi_irq_start; 3317 uint32_t i = 0; 3318 int ret; 3319 3320 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, 3321 scn->qdf_dev->dev, 3322 FW_SHARED_MEM + 3323 sizeof(struct ce_info), 3324 &target_pa); 3325 if (!target_va) 3326 return; 3327 3328 ce_info_ptr = (struct ce_info *)target_va; 3329 3330 if (scn->vaddr_rri_on_ddr) { 3331 ce_info_ptr->rri_over_ddr_low_paddr = 3332 BITS0_TO_31(scn->paddr_rri_on_ddr); 3333 ce_info_ptr->rri_over_ddr_high_paddr = 3334 BITS32_TO_35(scn->paddr_rri_on_ddr); 3335 } 3336 3337 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE", 3338 &msi_data_count, &msi_data_start, 3339 &msi_irq_start); 3340 if (ret) { 3341 hif_err("Failed to get CE msi config"); 3342 return; 3343 } 3344 3345 for (i = 0; i < CE_COUNT_MAX; i++) { 3346 ce_info_ptr->cfg[i].ce_id = i; 3347 ce_info_ptr->cfg[i].msi_vector = 3348 (i % msi_data_count) + msi_irq_start; 3349 } 3350 3351 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3352 hif_info("target va %pK target pa %pa", target_va, &target_pa); 3353 } 3354 #else 3355 /** 3356 * hif_post_static_buf_to_target() - post static buffer to WLAN FW 3357 * @scn: pointer to HIF structure 3358 * 3359 * WLAN FW needs 2MB memory from DDR when QMI is disabled. 3360 * 3361 * Return: void 3362 */ 3363 static void hif_post_static_buf_to_target(struct hif_softc *scn) 3364 { 3365 void *target_va; 3366 phys_addr_t target_pa; 3367 3368 target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev, 3369 FW_SHARED_MEM, &target_pa); 3370 if (!target_va) { 3371 HIF_TRACE("Memory allocation failed could not post target buf"); 3372 return; 3373 } 3374 hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa); 3375 HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa); 3376 } 3377 #endif 3378 3379 #else 3380 static inline void hif_post_static_buf_to_target(struct hif_softc *scn) 3381 { 3382 } 3383 #endif 3384 3385 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok, 3386 bool wait_for_it) 3387 { 3388 /* todo */ 3389 return 0; 3390 } 3391 3392 /** 3393 * hif_config_ce() - configure copy engines 3394 * @scn: hif context 3395 * 3396 * Prepares fw, copy engine hardware and host sw according 3397 * to the attributes selected by hif_ce_prepare_config. 3398 * 3399 * also calls athdiag_procfs_init 3400 * 3401 * return: 0 for success nonzero for failure. 3402 */ 3403 int hif_config_ce(struct hif_softc *scn) 3404 { 3405 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3406 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3407 struct HIF_CE_pipe_info *pipe_info; 3408 int pipe_num; 3409 struct CE_state *ce_state = NULL; 3410 3411 #ifdef ADRASTEA_SHADOW_REGISTERS 3412 int i; 3413 #endif 3414 QDF_STATUS rv = QDF_STATUS_SUCCESS; 3415 3416 scn->notice_send = true; 3417 scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM; 3418 3419 hif_post_static_buf_to_target(scn); 3420 3421 hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS; 3422 3423 hif_config_rri_on_ddr(scn); 3424 3425 if (ce_srng_based(scn)) 3426 scn->bus_ops.hif_target_sleep_state_adjust = 3427 &hif_srng_sleep_state_adjust; 3428 3429 /* Initialise the CE debug history sysfs interface inputs ce_id and 3430 * index. Disable data storing 3431 */ 3432 reset_ce_debug_history(scn); 3433 3434 for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) { 3435 struct CE_attr *attr; 3436 3437 pipe_info = &hif_state->pipe_info[pipe_num]; 3438 pipe_info->pipe_num = pipe_num; 3439 pipe_info->HIF_CE_state = hif_state; 3440 attr = &hif_state->host_ce_config[pipe_num]; 3441 3442 pipe_info->ce_hdl = ce_init(scn, pipe_num, attr); 3443 ce_state = scn->ce_id_to_state[pipe_num]; 3444 if (!ce_state) { 3445 A_TARGET_ACCESS_UNLIKELY(scn); 3446 goto err; 3447 } 3448 qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock); 3449 QDF_ASSERT(pipe_info->ce_hdl); 3450 if (!pipe_info->ce_hdl) { 3451 rv = QDF_STATUS_E_FAILURE; 3452 A_TARGET_ACCESS_UNLIKELY(scn); 3453 goto err; 3454 } 3455 3456 ce_state->lro_data = qdf_lro_init(); 3457 3458 if (attr->flags & CE_ATTR_DIAG) { 3459 /* Reserve the ultimate CE for 3460 * Diagnostic Window support 3461 */ 3462 hif_state->ce_diag = pipe_info->ce_hdl; 3463 continue; 3464 } 3465 3466 if (hif_is_nss_wifi_enabled(scn) && ce_state && 3467 (ce_state->htt_rx_data)) 3468 continue; 3469 3470 pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max); 3471 if (attr->dest_nentries > 0) { 3472 atomic_set(&pipe_info->recv_bufs_needed, 3473 init_buffer_count(attr->dest_nentries - 1)); 3474 /*SRNG based CE has one entry less */ 3475 if (ce_srng_based(scn)) 3476 atomic_dec(&pipe_info->recv_bufs_needed); 3477 } else { 3478 atomic_set(&pipe_info->recv_bufs_needed, 0); 3479 } 3480 ce_tasklet_init(hif_state, (1 << pipe_num)); 3481 ce_register_irq(hif_state, (1 << pipe_num)); 3482 } 3483 3484 if (athdiag_procfs_init(scn) != 0) { 3485 A_TARGET_ACCESS_UNLIKELY(scn); 3486 goto err; 3487 } 3488 scn->athdiag_procfs_inited = true; 3489 3490 HIF_DBG("%s: ce_init done", __func__); 3491 3492 init_tasklet_workers(hif_hdl); 3493 3494 HIF_DBG("%s: X, ret = %d", __func__, rv); 3495 3496 #ifdef ADRASTEA_SHADOW_REGISTERS 3497 HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__); 3498 for (i = 0; i < NUM_SHADOW_REGISTERS; i++) { 3499 HIF_DBG("%s Shadow Register%d is mapped to address %x", 3500 __func__, i, 3501 (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2)); 3502 } 3503 #endif 3504 3505 return rv != QDF_STATUS_SUCCESS; 3506 3507 err: 3508 /* Failure, so clean up */ 3509 hif_unconfig_ce(scn); 3510 HIF_TRACE("%s: X, ret = %d", __func__, rv); 3511 return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE; 3512 } 3513 3514 #ifdef IPA_OFFLOAD 3515 /** 3516 * hif_ce_ipa_get_ce_resource() - get uc resource on hif 3517 * @scn: bus context 3518 * @ce_sr_base_paddr: copyengine source ring base physical address 3519 * @ce_sr_ring_size: copyengine source ring size 3520 * @ce_reg_paddr: copyengine register physical address 3521 * 3522 * IPA micro controller data path offload feature enabled, 3523 * HIF should release copy engine related resource information to IPA UC 3524 * IPA UC will access hardware resource with released information 3525 * 3526 * Return: None 3527 */ 3528 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn, 3529 qdf_shared_mem_t **ce_sr, 3530 uint32_t *ce_sr_ring_size, 3531 qdf_dma_addr_t *ce_reg_paddr) 3532 { 3533 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3534 struct HIF_CE_pipe_info *pipe_info = 3535 &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]); 3536 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3537 3538 ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size, 3539 ce_reg_paddr); 3540 } 3541 #endif /* IPA_OFFLOAD */ 3542 3543 3544 #ifdef ADRASTEA_SHADOW_REGISTERS 3545 3546 /* 3547 * Current shadow register config 3548 * 3549 * ----------------------------------------------------------- 3550 * Shadow Register | CE | src/dst write index 3551 * ----------------------------------------------------------- 3552 * 0 | 0 | src 3553 * 1 No Config - Doesn't point to anything 3554 * 2 No Config - Doesn't point to anything 3555 * 3 | 3 | src 3556 * 4 | 4 | src 3557 * 5 | 5 | src 3558 * 6 No Config - Doesn't point to anything 3559 * 7 | 7 | src 3560 * 8 No Config - Doesn't point to anything 3561 * 9 No Config - Doesn't point to anything 3562 * 10 No Config - Doesn't point to anything 3563 * 11 No Config - Doesn't point to anything 3564 * ----------------------------------------------------------- 3565 * 12 No Config - Doesn't point to anything 3566 * 13 | 1 | dst 3567 * 14 | 2 | dst 3568 * 15 No Config - Doesn't point to anything 3569 * 16 No Config - Doesn't point to anything 3570 * 17 No Config - Doesn't point to anything 3571 * 18 No Config - Doesn't point to anything 3572 * 19 | 7 | dst 3573 * 20 | 8 | dst 3574 * 21 No Config - Doesn't point to anything 3575 * 22 No Config - Doesn't point to anything 3576 * 23 No Config - Doesn't point to anything 3577 * ----------------------------------------------------------- 3578 * 3579 * 3580 * ToDo - Move shadow register config to following in the future 3581 * This helps free up a block of shadow registers towards the end. 3582 * Can be used for other purposes 3583 * 3584 * ----------------------------------------------------------- 3585 * Shadow Register | CE | src/dst write index 3586 * ----------------------------------------------------------- 3587 * 0 | 0 | src 3588 * 1 | 3 | src 3589 * 2 | 4 | src 3590 * 3 | 5 | src 3591 * 4 | 7 | src 3592 * ----------------------------------------------------------- 3593 * 5 | 1 | dst 3594 * 6 | 2 | dst 3595 * 7 | 7 | dst 3596 * 8 | 8 | dst 3597 * ----------------------------------------------------------- 3598 * 9 No Config - Doesn't point to anything 3599 * 12 No Config - Doesn't point to anything 3600 * 13 No Config - Doesn't point to anything 3601 * 14 No Config - Doesn't point to anything 3602 * 15 No Config - Doesn't point to anything 3603 * 16 No Config - Doesn't point to anything 3604 * 17 No Config - Doesn't point to anything 3605 * 18 No Config - Doesn't point to anything 3606 * 19 No Config - Doesn't point to anything 3607 * 20 No Config - Doesn't point to anything 3608 * 21 No Config - Doesn't point to anything 3609 * 22 No Config - Doesn't point to anything 3610 * 23 No Config - Doesn't point to anything 3611 * ----------------------------------------------------------- 3612 */ 3613 #ifndef QCN7605_SUPPORT 3614 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3615 { 3616 u32 addr = 0; 3617 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3618 3619 switch (ce) { 3620 case 0: 3621 addr = SHADOW_VALUE0; 3622 break; 3623 case 3: 3624 addr = SHADOW_VALUE3; 3625 break; 3626 case 4: 3627 addr = SHADOW_VALUE4; 3628 break; 3629 case 5: 3630 addr = SHADOW_VALUE5; 3631 break; 3632 case 7: 3633 addr = SHADOW_VALUE7; 3634 break; 3635 default: 3636 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3637 QDF_ASSERT(0); 3638 } 3639 return addr; 3640 3641 } 3642 3643 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3644 { 3645 u32 addr = 0; 3646 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3647 3648 switch (ce) { 3649 case 1: 3650 addr = SHADOW_VALUE13; 3651 break; 3652 case 2: 3653 addr = SHADOW_VALUE14; 3654 break; 3655 case 5: 3656 addr = SHADOW_VALUE17; 3657 break; 3658 case 7: 3659 addr = SHADOW_VALUE19; 3660 break; 3661 case 8: 3662 addr = SHADOW_VALUE20; 3663 break; 3664 case 9: 3665 addr = SHADOW_VALUE21; 3666 break; 3667 case 10: 3668 addr = SHADOW_VALUE22; 3669 break; 3670 case 11: 3671 addr = SHADOW_VALUE23; 3672 break; 3673 default: 3674 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3675 QDF_ASSERT(0); 3676 } 3677 3678 return addr; 3679 3680 } 3681 #else 3682 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3683 { 3684 u32 addr = 0; 3685 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3686 3687 switch (ce) { 3688 case 0: 3689 addr = SHADOW_VALUE0; 3690 break; 3691 case 4: 3692 addr = SHADOW_VALUE4; 3693 break; 3694 case 5: 3695 addr = SHADOW_VALUE5; 3696 break; 3697 default: 3698 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3699 QDF_ASSERT(0); 3700 } 3701 return addr; 3702 } 3703 3704 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr) 3705 { 3706 u32 addr = 0; 3707 u32 ce = COPY_ENGINE_ID(ctrl_addr); 3708 3709 switch (ce) { 3710 case 1: 3711 addr = SHADOW_VALUE13; 3712 break; 3713 case 2: 3714 addr = SHADOW_VALUE14; 3715 break; 3716 case 3: 3717 addr = SHADOW_VALUE15; 3718 break; 3719 case 5: 3720 addr = SHADOW_VALUE17; 3721 break; 3722 case 7: 3723 addr = SHADOW_VALUE19; 3724 break; 3725 case 8: 3726 addr = SHADOW_VALUE20; 3727 break; 3728 case 9: 3729 addr = SHADOW_VALUE21; 3730 break; 3731 case 10: 3732 addr = SHADOW_VALUE22; 3733 break; 3734 case 11: 3735 addr = SHADOW_VALUE23; 3736 break; 3737 default: 3738 HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce); 3739 QDF_ASSERT(0); 3740 } 3741 3742 return addr; 3743 } 3744 #endif 3745 #endif 3746 3747 #if defined(FEATURE_LRO) 3748 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id) 3749 { 3750 struct CE_state *ce_state; 3751 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3752 3753 ce_state = scn->ce_id_to_state[ctx_id]; 3754 3755 return ce_state->lro_data; 3756 } 3757 #endif 3758 3759 /** 3760 * hif_map_service_to_pipe() - returns the ce ids pertaining to 3761 * this service 3762 * @scn: hif_softc pointer. 3763 * @svc_id: Service ID for which the mapping is needed. 3764 * @ul_pipe: address of the container in which ul pipe is returned. 3765 * @dl_pipe: address of the container in which dl pipe is returned. 3766 * @ul_is_polled: address of the container in which a bool 3767 * indicating if the UL CE for this service 3768 * is polled is returned. 3769 * @dl_is_polled: address of the container in which a bool 3770 * indicating if the DL CE for this service 3771 * is polled is returned. 3772 * 3773 * Return: Indicates whether the service has been found in the table. 3774 * Upon return, ul_is_polled is updated only if ul_pipe is updated. 3775 * There will be warning logs if either leg has not been updated 3776 * because it missed the entry in the table (but this is not an err). 3777 */ 3778 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id, 3779 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled, 3780 int *dl_is_polled) 3781 { 3782 int status = QDF_STATUS_E_INVAL; 3783 unsigned int i; 3784 struct service_to_pipe element; 3785 struct service_to_pipe *tgt_svc_map_to_use; 3786 uint32_t sz_tgt_svc_map_to_use; 3787 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 3788 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 3789 bool dl_updated = false; 3790 bool ul_updated = false; 3791 3792 hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use, 3793 &sz_tgt_svc_map_to_use); 3794 3795 *dl_is_polled = 0; /* polling for received messages not supported */ 3796 3797 for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) { 3798 3799 memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element)); 3800 if (element.service_id == svc_id) { 3801 if (element.pipedir == PIPEDIR_OUT) { 3802 *ul_pipe = element.pipenum; 3803 *ul_is_polled = 3804 (hif_state->host_ce_config[*ul_pipe].flags & 3805 CE_ATTR_DISABLE_INTR) != 0; 3806 ul_updated = true; 3807 } else if (element.pipedir == PIPEDIR_IN) { 3808 *dl_pipe = element.pipenum; 3809 dl_updated = true; 3810 } 3811 status = QDF_STATUS_SUCCESS; 3812 } 3813 } 3814 if (ul_updated == false) 3815 HIF_DBG("ul pipe is NOT updated for service %d", svc_id); 3816 if (dl_updated == false) 3817 HIF_DBG("dl pipe is NOT updated for service %d", svc_id); 3818 3819 return status; 3820 } 3821 3822 #ifdef SHADOW_REG_DEBUG 3823 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn, 3824 uint32_t CE_ctrl_addr) 3825 { 3826 uint32_t read_from_hw, srri_from_ddr = 0; 3827 3828 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS); 3829 3830 srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3831 3832 if (read_from_hw != srri_from_ddr) { 3833 HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3834 __func__, srri_from_ddr, read_from_hw, 3835 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3836 QDF_ASSERT(0); 3837 } 3838 return srri_from_ddr; 3839 } 3840 3841 3842 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn, 3843 uint32_t CE_ctrl_addr) 3844 { 3845 uint32_t read_from_hw, drri_from_ddr = 0; 3846 3847 read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS); 3848 3849 drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr)); 3850 3851 if (read_from_hw != drri_from_ddr) { 3852 HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x", 3853 drri_from_ddr, read_from_hw, 3854 CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr)); 3855 QDF_ASSERT(0); 3856 } 3857 return drri_from_ddr; 3858 } 3859 3860 #endif 3861 3862 /** 3863 * hif_dump_ce_registers() - dump ce registers 3864 * @scn: hif_opaque_softc pointer. 3865 * 3866 * Output the copy engine registers 3867 * 3868 * Return: 0 for success or error code 3869 */ 3870 int hif_dump_ce_registers(struct hif_softc *scn) 3871 { 3872 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 3873 uint32_t ce_reg_address = CE0_BASE_ADDRESS; 3874 uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2]; 3875 uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2; 3876 uint16_t i; 3877 QDF_STATUS status; 3878 3879 for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) { 3880 if (!scn->ce_id_to_state[i]) { 3881 HIF_DBG("CE%d not used.", i); 3882 continue; 3883 } 3884 3885 status = hif_diag_read_mem(hif_hdl, ce_reg_address, 3886 (uint8_t *) &ce_reg_values[0], 3887 ce_reg_word_size * sizeof(uint32_t)); 3888 3889 if (status != QDF_STATUS_SUCCESS) { 3890 HIF_ERROR("Dumping CE register failed!"); 3891 return -EACCES; 3892 } 3893 HIF_ERROR("CE%d=>\n", i); 3894 qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, 3895 (uint8_t *) &ce_reg_values[0], 3896 ce_reg_word_size * sizeof(uint32_t)); 3897 qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address 3898 + SR_WR_INDEX_ADDRESS), 3899 ce_reg_values[SR_WR_INDEX_ADDRESS/4]); 3900 qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address 3901 + CURRENT_SRRI_ADDRESS), 3902 ce_reg_values[CURRENT_SRRI_ADDRESS/4]); 3903 qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address 3904 + DST_WR_INDEX_ADDRESS), 3905 ce_reg_values[DST_WR_INDEX_ADDRESS/4]); 3906 qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address 3907 + CURRENT_DRRI_ADDRESS), 3908 ce_reg_values[CURRENT_DRRI_ADDRESS/4]); 3909 qdf_print("---"); 3910 } 3911 return 0; 3912 } 3913 qdf_export_symbol(hif_dump_ce_registers); 3914 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT 3915 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc, 3916 struct hif_pipe_addl_info *hif_info, uint32_t pipe) 3917 { 3918 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3919 struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn); 3920 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc); 3921 struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]); 3922 struct CE_handle *ce_hdl = pipe_info->ce_hdl; 3923 struct CE_state *ce_state = (struct CE_state *)ce_hdl; 3924 struct CE_ring_state *src_ring = ce_state->src_ring; 3925 struct CE_ring_state *dest_ring = ce_state->dest_ring; 3926 3927 if (src_ring) { 3928 hif_info->ul_pipe.nentries = src_ring->nentries; 3929 hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask; 3930 hif_info->ul_pipe.sw_index = src_ring->sw_index; 3931 hif_info->ul_pipe.write_index = src_ring->write_index; 3932 hif_info->ul_pipe.hw_index = src_ring->hw_index; 3933 hif_info->ul_pipe.base_addr_CE_space = 3934 src_ring->base_addr_CE_space; 3935 hif_info->ul_pipe.base_addr_owner_space = 3936 src_ring->base_addr_owner_space; 3937 } 3938 3939 3940 if (dest_ring) { 3941 hif_info->dl_pipe.nentries = dest_ring->nentries; 3942 hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask; 3943 hif_info->dl_pipe.sw_index = dest_ring->sw_index; 3944 hif_info->dl_pipe.write_index = dest_ring->write_index; 3945 hif_info->dl_pipe.hw_index = dest_ring->hw_index; 3946 hif_info->dl_pipe.base_addr_CE_space = 3947 dest_ring->base_addr_CE_space; 3948 hif_info->dl_pipe.base_addr_owner_space = 3949 dest_ring->base_addr_owner_space; 3950 } 3951 3952 hif_info->pci_mem = pci_resource_start(sc->pdev, 0); 3953 hif_info->ctrl_addr = ce_state->ctrl_addr; 3954 3955 return hif_info; 3956 } 3957 qdf_export_symbol(hif_get_addl_pipe_info); 3958 3959 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode) 3960 { 3961 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3962 3963 scn->nss_wifi_ol_mode = mode; 3964 return 0; 3965 } 3966 qdf_export_symbol(hif_set_nss_wifiol_mode); 3967 #endif 3968 3969 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib) 3970 { 3971 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3972 scn->hif_attribute = hif_attrib; 3973 } 3974 3975 3976 /* disable interrupts (only applicable for legacy copy engine currently */ 3977 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num) 3978 { 3979 struct hif_softc *scn = HIF_GET_SOFTC(osc); 3980 struct CE_state *CE_state = scn->ce_id_to_state[pipe_num]; 3981 uint32_t ctrl_addr = CE_state->ctrl_addr; 3982 3983 Q_TARGET_ACCESS_BEGIN(scn); 3984 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr); 3985 Q_TARGET_ACCESS_END(scn); 3986 } 3987 qdf_export_symbol(hif_disable_interrupt); 3988 3989 /** 3990 * hif_fw_event_handler() - hif fw event handler 3991 * @hif_state: pointer to hif ce state structure 3992 * 3993 * Process fw events and raise HTC callback to process fw events. 3994 * 3995 * Return: none 3996 */ 3997 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state) 3998 { 3999 struct hif_msg_callbacks *msg_callbacks = 4000 &hif_state->msg_callbacks_current; 4001 4002 if (!msg_callbacks->fwEventHandler) 4003 return; 4004 4005 msg_callbacks->fwEventHandler(msg_callbacks->Context, 4006 QDF_STATUS_E_FAILURE); 4007 } 4008 4009 #ifndef QCA_WIFI_3_0 4010 /** 4011 * hif_fw_interrupt_handler() - FW interrupt handler 4012 * @irq: irq number 4013 * @arg: the user pointer 4014 * 4015 * Called from the PCI interrupt handler when a 4016 * firmware-generated interrupt to the Host. 4017 * 4018 * only registered for legacy ce devices 4019 * 4020 * Return: status of handled irq 4021 */ 4022 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 4023 { 4024 struct hif_softc *scn = arg; 4025 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn); 4026 uint32_t fw_indicator_address, fw_indicator; 4027 4028 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) 4029 return ATH_ISR_NOSCHED; 4030 4031 fw_indicator_address = hif_state->fw_indicator_address; 4032 /* For sudden unplug this will return ~0 */ 4033 fw_indicator = A_TARGET_READ(scn, fw_indicator_address); 4034 4035 if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) { 4036 /* ACK: clear Target-side pending event */ 4037 A_TARGET_WRITE(scn, fw_indicator_address, 4038 fw_indicator & ~FW_IND_EVENT_PENDING); 4039 if (Q_TARGET_ACCESS_END(scn) < 0) 4040 return ATH_ISR_SCHED; 4041 4042 if (hif_state->started) { 4043 hif_fw_event_handler(hif_state); 4044 } else { 4045 /* 4046 * Probable Target failure before we're prepared 4047 * to handle it. Generally unexpected. 4048 * fw_indicator used as bitmap, and defined as below: 4049 * FW_IND_EVENT_PENDING 0x1 4050 * FW_IND_INITIALIZED 0x2 4051 * FW_IND_NEEDRECOVER 0x4 4052 */ 4053 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, 4054 ("%s: Early firmware event indicated 0x%x\n", 4055 __func__, fw_indicator)); 4056 } 4057 } else { 4058 if (Q_TARGET_ACCESS_END(scn) < 0) 4059 return ATH_ISR_SCHED; 4060 } 4061 4062 return ATH_ISR_SCHED; 4063 } 4064 #else 4065 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg) 4066 { 4067 return ATH_ISR_SCHED; 4068 } 4069 #endif /* #ifdef QCA_WIFI_3_0 */ 4070 4071 4072 /** 4073 * hif_wlan_disable(): call the platform driver to disable wlan 4074 * @scn: HIF Context 4075 * 4076 * This function passes the con_mode to platform driver to disable 4077 * wlan. 4078 * 4079 * Return: void 4080 */ 4081 void hif_wlan_disable(struct hif_softc *scn) 4082 { 4083 enum pld_driver_mode mode; 4084 uint32_t con_mode = hif_get_conparam(scn); 4085 4086 if (scn->target_status == TARGET_STATUS_RESET) 4087 return; 4088 4089 if (QDF_GLOBAL_FTM_MODE == con_mode) 4090 mode = PLD_FTM; 4091 else if (QDF_IS_EPPING_ENABLED(con_mode)) 4092 mode = PLD_EPPING; 4093 else 4094 mode = PLD_MISSION; 4095 4096 pld_wlan_disable(scn->qdf_dev->dev, mode); 4097 } 4098 4099 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id) 4100 { 4101 QDF_STATUS status; 4102 uint8_t ul_pipe, dl_pipe; 4103 int ul_is_polled, dl_is_polled; 4104 4105 /* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */ 4106 status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn), 4107 HTC_CTRL_RSVD_SVC, 4108 &ul_pipe, &dl_pipe, 4109 &ul_is_polled, &dl_is_polled); 4110 if (status) { 4111 HIF_ERROR("%s: failed to map pipe: %d", __func__, status); 4112 return qdf_status_to_os_return(status); 4113 } 4114 4115 *ce_id = dl_pipe; 4116 4117 return 0; 4118 } 4119