1 /* 2 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. 3 * 4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc. 5 * 6 * 7 * Permission to use, copy, modify, and/or distribute this software for 8 * any purpose with or without fee is hereby granted, provided that the 9 * above copyright notice and this permission notice appear in all 10 * copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 19 * PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 /* 23 * This file was originally distributed by Qualcomm Atheros, Inc. 24 * under proprietary terms before Copyright ownership was assigned 25 * to the Linux Foundation. 26 */ 27 28 #include <linux/pci.h> 29 #include <linux/slab.h> 30 #include <linux/interrupt.h> 31 #include <linux/if_arp.h> 32 #include "qdf_lock.h" 33 #include "qdf_types.h" 34 #include "qdf_status.h" 35 #include "regtable.h" 36 #include "hif.h" 37 #include "hif_io32.h" 38 #include "ce_main.h" 39 #include "ce_api.h" 40 #include "ce_reg.h" 41 #include "ce_internal.h" 42 #include "ce_tasklet.h" 43 #include "pld_common.h" 44 #include "hif_debug.h" 45 #include "hif_napi.h" 46 47 48 /** 49 * struct tasklet_work 50 * 51 * @id: ce_id 52 * @work: work 53 */ 54 struct tasklet_work { 55 enum ce_id_type id; 56 void *data; 57 struct work_struct work; 58 }; 59 60 61 /** 62 * reschedule_ce_tasklet_work_handler() - reschedule work 63 * @work: struct work_struct 64 * 65 * Return: N/A 66 */ 67 static void reschedule_ce_tasklet_work_handler(struct work_struct *work) 68 { 69 struct tasklet_work *ce_work = container_of(work, struct tasklet_work, 70 work); 71 struct hif_softc *scn = ce_work->data; 72 struct HIF_CE_state *hif_ce_state; 73 74 if (NULL == scn) { 75 HIF_ERROR("%s: tasklet scn is null", __func__); 76 return; 77 } 78 79 hif_ce_state = HIF_GET_CE_STATE(scn); 80 81 if (scn->hif_init_done == false) { 82 HIF_ERROR("%s: wlan driver is unloaded", __func__); 83 return; 84 } 85 tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq); 86 } 87 88 static struct tasklet_work tasklet_workers[CE_ID_MAX]; 89 static bool work_initialized; 90 91 /** 92 * init_tasklet_work() - init_tasklet_work 93 * @work: struct work_struct 94 * @work_handler: work_handler 95 * 96 * Return: N/A 97 */ 98 static void init_tasklet_work(struct work_struct *work, 99 work_func_t work_handler) 100 { 101 INIT_WORK(work, work_handler); 102 } 103 104 /** 105 * init_tasklet_workers() - init_tasklet_workers 106 * @scn: HIF Context 107 * 108 * Return: N/A 109 */ 110 void init_tasklet_workers(struct hif_opaque_softc *scn) 111 { 112 uint32_t id; 113 114 for (id = 0; id < CE_ID_MAX; id++) { 115 tasklet_workers[id].id = id; 116 tasklet_workers[id].data = scn; 117 init_tasklet_work(&tasklet_workers[id].work, 118 reschedule_ce_tasklet_work_handler); 119 } 120 work_initialized = true; 121 } 122 123 #ifdef HIF_CONFIG_SLUB_DEBUG_ON 124 /** 125 * ce_schedule_tasklet() - schedule ce tasklet 126 * @tasklet_entry: struct ce_tasklet_entry 127 * 128 * Return: N/A 129 */ 130 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) 131 { 132 if (work_initialized && (tasklet_entry->ce_id < CE_ID_MAX)) 133 schedule_work(&tasklet_workers[tasklet_entry->ce_id].work); 134 else 135 HIF_ERROR("%s: work_initialized = %d, ce_id = %d", 136 __func__, work_initialized, tasklet_entry->ce_id); 137 } 138 #else 139 /** 140 * ce_schedule_tasklet() - schedule ce tasklet 141 * @tasklet_entry: struct ce_tasklet_entry 142 * 143 * Return: N/A 144 */ 145 static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry) 146 { 147 tasklet_schedule(&tasklet_entry->intr_tq); 148 } 149 #endif 150 151 /** 152 * ce_tasklet() - ce_tasklet 153 * @data: data 154 * 155 * Return: N/A 156 */ 157 static void ce_tasklet(unsigned long data) 158 { 159 struct ce_tasklet_entry *tasklet_entry = 160 (struct ce_tasklet_entry *)data; 161 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 162 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 163 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id]; 164 165 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 166 HIF_CE_TASKLET_ENTRY, NULL, NULL, 0, 0); 167 168 if (qdf_atomic_read(&scn->link_suspended)) { 169 HIF_ERROR("%s: ce %d tasklet fired after link suspend.", 170 __func__, tasklet_entry->ce_id); 171 QDF_BUG(0); 172 } 173 174 ce_per_engine_service(scn, tasklet_entry->ce_id); 175 176 if (ce_check_rx_pending(CE_state)) { 177 /* 178 * There are frames pending, schedule tasklet to process them. 179 * Enable the interrupt only when there is no pending frames in 180 * any of the Copy Engine pipes. 181 */ 182 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, 183 HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, 0, 0); 184 185 ce_schedule_tasklet(tasklet_entry); 186 return; 187 } 188 189 if (scn->target_status != TARGET_STATUS_RESET) 190 hif_irq_enable(scn, tasklet_entry->ce_id); 191 192 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT, 193 NULL, NULL, 0, 0); 194 195 qdf_atomic_dec(&scn->active_tasklet_cnt); 196 } 197 198 /** 199 * ce_tasklet_init() - ce_tasklet_init 200 * @hif_ce_state: hif_ce_state 201 * @mask: mask 202 * 203 * Return: N/A 204 */ 205 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask) 206 { 207 int i; 208 209 for (i = 0; i < CE_COUNT_MAX; i++) { 210 if (mask & (1 << i)) { 211 hif_ce_state->tasklets[i].ce_id = i; 212 hif_ce_state->tasklets[i].inited = true; 213 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state; 214 tasklet_init(&hif_ce_state->tasklets[i].intr_tq, 215 ce_tasklet, 216 (unsigned long)&hif_ce_state->tasklets[i]); 217 } 218 } 219 } 220 /** 221 * ce_tasklet_kill() - ce_tasklet_kill 222 * @hif_ce_state: hif_ce_state 223 * 224 * Return: N/A 225 */ 226 void ce_tasklet_kill(struct hif_softc *scn) 227 { 228 int i; 229 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn); 230 231 for (i = 0; i < CE_COUNT_MAX; i++) 232 if (hif_ce_state->tasklets[i].inited) { 233 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq); 234 hif_ce_state->tasklets[i].inited = false; 235 } 236 qdf_atomic_set(&scn->active_tasklet_cnt, 0); 237 } 238 239 #define HIF_CE_DRAIN_WAIT_CNT 20 240 /** 241 * hif_drain_tasklets(): wait untill no tasklet is pending 242 * @scn: hif context 243 * 244 * Let running tasklets clear pending trafic. 245 * 246 * Return: 0 if no bottom half is in progress when it returns. 247 * -EFAULT if it times out. 248 */ 249 int hif_drain_tasklets(struct hif_softc *scn) 250 { 251 uint32_t ce_drain_wait_cnt = 0; 252 int32_t tasklet_cnt; 253 254 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) { 255 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) { 256 HIF_ERROR("%s: CE still not done with access: %d", 257 __func__, tasklet_cnt); 258 259 return -EFAULT; 260 } 261 HIF_INFO("%s: Waiting for CE to finish access", __func__); 262 msleep(10); 263 } 264 return 0; 265 } 266 267 #ifdef WLAN_SUSPEND_RESUME_TEST 268 /** 269 * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should 270 * trigger a unit-test resume. 271 * @scn: The HIF context to operate on 272 * @ce_id: The copy engine Id from the originating interrupt 273 * 274 * Return: true if the raised irq should trigger a unit-test resume 275 */ 276 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 277 { 278 int errno; 279 uint8_t wake_ce_id; 280 281 if (!hif_is_ut_suspended(scn)) 282 return false; 283 284 /* ensure passed ce_id matches wake ce_id */ 285 errno = hif_get_wake_ce_id(scn, &wake_ce_id); 286 if (errno) { 287 HIF_ERROR("%s: failed to get wake CE Id: %d", __func__, errno); 288 return false; 289 } 290 291 return ce_id == wake_ce_id; 292 } 293 #else 294 static inline bool 295 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id) 296 { 297 return false; 298 } 299 #endif /* WLAN_SUSPEND_RESUME_TEST */ 300 301 /** 302 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler 303 * @irq: irq coming from kernel 304 * @context: context 305 * 306 * Return: N/A 307 */ 308 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context) 309 { 310 struct ce_tasklet_entry *tasklet_entry = context; 311 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state); 312 313 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq), 314 tasklet_entry); 315 } 316 317 /** 318 * hif_ce_increment_interrupt_count() - update ce stats 319 * @hif_ce_state: ce state 320 * @ce_id: ce id 321 * 322 * Return: none 323 */ 324 static inline void 325 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id) 326 { 327 int cpu_id = qdf_get_cpu(); 328 329 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++; 330 } 331 332 /** 333 * hif_display_ce_stats() - display ce stats 334 * @hif_ce_state: ce state 335 * 336 * Return: none 337 */ 338 void hif_display_ce_stats(struct HIF_CE_state *hif_ce_state) 339 { 340 #define STR_SIZE 128 341 uint8_t i, j, pos; 342 char str_buffer[STR_SIZE]; 343 int size, ret; 344 345 qdf_debug("CE interrupt statistics:"); 346 for (i = 0; i < CE_COUNT_MAX; i++) { 347 size = STR_SIZE; 348 pos = 0; 349 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) { 350 ret = snprintf(str_buffer + pos, size, "[%d]:%d ", 351 j, hif_ce_state->stats.ce_per_cpu[i][j]); 352 if (ret <= 0 || ret >= size) 353 break; 354 size -= ret; 355 pos += ret; 356 } 357 qdf_debug("CE id[%2d] - %s", i, str_buffer); 358 } 359 #undef STR_SIZE 360 } 361 362 /** 363 * hif_clear_ce_stats() - clear ce stats 364 * @hif_ce_state: ce state 365 * 366 * Return: none 367 */ 368 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state) 369 { 370 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats)); 371 } 372 373 /** 374 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context 375 * @ce_id: ce_id 376 * @tasklet_entry: context 377 * 378 * Return: N/A 379 */ 380 irqreturn_t ce_dispatch_interrupt(int ce_id, 381 struct ce_tasklet_entry *tasklet_entry) 382 { 383 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state; 384 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 385 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn); 386 387 if (tasklet_entry->ce_id != ce_id) { 388 HIF_ERROR("%s: ce_id (expect %d, received %d) does not match", 389 __func__, tasklet_entry->ce_id, ce_id); 390 return IRQ_NONE; 391 } 392 if (unlikely(ce_id >= CE_COUNT_MAX)) { 393 HIF_ERROR("%s: ce_id=%d > CE_COUNT_MAX=%d", 394 __func__, tasklet_entry->ce_id, CE_COUNT_MAX); 395 return IRQ_NONE; 396 } 397 398 hif_irq_disable(scn, ce_id); 399 400 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) 401 return IRQ_HANDLED; 402 403 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, 404 NULL, NULL, 0, 0); 405 hif_ce_increment_interrupt_count(hif_ce_state, ce_id); 406 407 if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) { 408 hif_ut_fw_resume(scn); 409 hif_irq_enable(scn, ce_id); 410 return IRQ_HANDLED; 411 } 412 413 qdf_atomic_inc(&scn->active_tasklet_cnt); 414 415 if (hif_napi_enabled(hif_hdl, ce_id)) 416 hif_napi_schedule(hif_hdl, ce_id); 417 else 418 tasklet_schedule(&tasklet_entry->intr_tq); 419 420 return IRQ_HANDLED; 421 } 422 423 /** 424 * const char *ce_name 425 * 426 * @ce_name: ce_name 427 */ 428 const char *ce_name[] = { 429 "WLAN_CE_0", 430 "WLAN_CE_1", 431 "WLAN_CE_2", 432 "WLAN_CE_3", 433 "WLAN_CE_4", 434 "WLAN_CE_5", 435 "WLAN_CE_6", 436 "WLAN_CE_7", 437 "WLAN_CE_8", 438 "WLAN_CE_9", 439 "WLAN_CE_10", 440 "WLAN_CE_11", 441 }; 442 /** 443 * ce_unregister_irq() - ce_unregister_irq 444 * @hif_ce_state: hif_ce_state copy engine device handle 445 * @mask: which coppy engines to unregister for. 446 * 447 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x, 448 * unregister for copy engine x. 449 * 450 * Return: QDF_STATUS 451 */ 452 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 453 { 454 int id; 455 int ce_count; 456 int ret; 457 struct hif_softc *scn; 458 459 if (hif_ce_state == NULL) { 460 HIF_WARN("%s: hif_ce_state = NULL", __func__); 461 return QDF_STATUS_SUCCESS; 462 } 463 464 scn = HIF_GET_SOFTC(hif_ce_state); 465 ce_count = scn->ce_count; 466 /* we are removing interrupts, so better stop NAPI */ 467 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn), 468 NAPI_EVT_INT_STATE, (void *)0); 469 if (ret != 0) 470 HIF_ERROR("%s: napi_event INT_STATE returned %d", 471 __func__, ret); 472 /* this is not fatal, continue */ 473 474 /* filter mask to free only for ce's with irq registered */ 475 mask &= hif_ce_state->ce_register_irq_done; 476 for (id = 0; id < ce_count; id++) { 477 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 478 ret = pld_ce_free_irq(scn->qdf_dev->dev, id, 479 &hif_ce_state->tasklets[id]); 480 if (ret < 0) 481 HIF_ERROR( 482 "%s: pld_unregister_irq error - ce_id = %d, ret = %d", 483 __func__, id, ret); 484 } 485 } 486 hif_ce_state->ce_register_irq_done &= ~mask; 487 488 return QDF_STATUS_SUCCESS; 489 } 490 /** 491 * ce_register_irq() - ce_register_irq 492 * @hif_ce_state: hif_ce_state 493 * @mask: which coppy engines to unregister for. 494 * 495 * Registers copy engine irqs matching mask. If a 1 is set at bit x, 496 * Register for copy engine x. 497 * 498 * Return: QDF_STATUS 499 */ 500 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask) 501 { 502 int id; 503 int ce_count; 504 int ret; 505 unsigned long irqflags = IRQF_TRIGGER_RISING; 506 uint32_t done_mask = 0; 507 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state); 508 509 ce_count = scn->ce_count; 510 511 for (id = 0; id < ce_count; id++) { 512 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) { 513 ret = pld_ce_request_irq(scn->qdf_dev->dev, id, 514 hif_snoc_interrupt_handler, 515 irqflags, ce_name[id], 516 &hif_ce_state->tasklets[id]); 517 if (ret) { 518 HIF_ERROR( 519 "%s: cannot register CE %d irq handler, ret = %d", 520 __func__, id, ret); 521 ce_unregister_irq(hif_ce_state, done_mask); 522 return QDF_STATUS_E_FAULT; 523 } 524 done_mask |= 1 << id; 525 } 526 } 527 hif_ce_state->ce_register_irq_done |= done_mask; 528 529 return QDF_STATUS_SUCCESS; 530 } 531