1 /* 2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /** 21 * DOC: hif_napi.c 22 * 23 * HIF NAPI interface implementation 24 */ 25 26 #include <linux/string.h> /* memset */ 27 28 /* Linux headers */ 29 #include <linux/cpumask.h> 30 #include <linux/cpufreq.h> 31 #include <linux/cpu.h> 32 #include <linux/topology.h> 33 #include <linux/interrupt.h> 34 #ifdef CONFIG_SCHED_CORE_CTL 35 #include <linux/sched/core_ctl.h> 36 #endif 37 #include <pld_common.h> 38 #include <linux/pm.h> 39 40 /* Driver headers */ 41 #include <hif_napi.h> 42 #include <hif_debug.h> 43 #include <hif_io32.h> 44 #include <ce_api.h> 45 #include <ce_internal.h> 46 #include <hif_irq_affinity.h> 47 #include "qdf_cpuhp.h" 48 #include "qdf_module.h" 49 #include "qdf_net_if.h" 50 #include "qdf_dev.h" 51 #include "qdf_irq.h" 52 53 enum napi_decision_vector { 54 HIF_NAPI_NOEVENT = 0, 55 HIF_NAPI_INITED = 1, 56 HIF_NAPI_CONF_UP = 2 57 }; 58 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP) 59 60 #ifdef RECEIVE_OFFLOAD 61 /** 62 * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI 63 * @napi: Rx_thread NAPI 64 * @budget: NAPI BUDGET 65 * 66 * Return: 0 as it is not supposed to be polled at all as it is not scheduled. 67 */ 68 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget) 69 { 70 hif_err("This napi_poll should not be polled as we don't schedule it"); 71 QDF_ASSERT(0); 72 return 0; 73 } 74 75 /** 76 * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI 77 * @napii: Handle to napi_info holding rx_thread napi 78 * 79 * Return: None 80 */ 81 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 82 { 83 struct qdf_net_if *nd = (struct qdf_net_if *)&napii->rx_thread_netdev; 84 85 qdf_net_if_create_dummy_if(nd); 86 qdf_netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi, 87 hif_rxthread_napi_poll, 64); 88 qdf_napi_enable(&napii->rx_thread_napi); 89 } 90 91 /** 92 * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI 93 * @napii: Handle to napi_info holding rx_thread napi 94 * 95 * Return: None 96 */ 97 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 98 { 99 qdf_netif_napi_del(&napii->rx_thread_napi); 100 } 101 #else /* RECEIVE_OFFLOAD */ 102 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 103 { 104 } 105 106 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 107 { 108 } 109 #endif 110 111 /** 112 * hif_napi_create() - creates the NAPI structures for a given CE 113 * @hif_ctx: pointer to hif context 114 * @poll: poll function to be used for this NAPI instance 115 * @budget: budget to be registered with the NAPI instance 116 * @scale: scale factor on the weight (to scaler budget to 1000) 117 * @flags: feature flags 118 * 119 * Description: 120 * Creates NAPI instances. This function is called 121 * unconditionally during initialization. It creates 122 * napi structures through the proper HTC/HIF calls. 123 * The structures are disabled on creation. 124 * Note that for each NAPI instance a separate dummy netdev is used 125 * 126 * Return: 127 * < 0: error 128 * = 0: <should never happen> 129 * > 0: id of the created object (for multi-NAPI, number of objects created) 130 */ 131 int hif_napi_create(struct hif_opaque_softc *hif_ctx, 132 int (*poll)(struct napi_struct *, int), 133 int budget, 134 int scale, 135 uint8_t flags) 136 { 137 int i; 138 struct qca_napi_data *napid; 139 struct qca_napi_info *napii; 140 struct CE_state *ce_state; 141 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 142 int rc = 0; 143 144 NAPI_DEBUG("-->(budget=%d, scale=%d)", 145 budget, scale); 146 NAPI_DEBUG("hif->napi_data.state = 0x%08x", 147 hif->napi_data.state); 148 NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x", 149 hif->napi_data.ce_map); 150 151 napid = &(hif->napi_data); 152 if (0 == (napid->state & HIF_NAPI_INITED)) { 153 memset(napid, 0, sizeof(struct qca_napi_data)); 154 qdf_spinlock_create(&(napid->lock)); 155 156 napid->state |= HIF_NAPI_INITED; 157 napid->flags = flags; 158 159 rc = hif_napi_cpu_init(hif_ctx); 160 if (rc != 0 && rc != -EALREADY) { 161 hif_err("NAPI_initialization failed(rc=%d)", rc); 162 rc = napid->ce_map; 163 goto hnc_err; 164 } else 165 rc = 0; 166 167 hif_debug("NAPI structures initialized, rc=%d", rc); 168 } 169 for (i = 0; i < hif->ce_count; i++) { 170 ce_state = hif->ce_id_to_state[i]; 171 NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d", 172 i, ce_state->htt_rx_data, 173 ce_state->htt_tx_data); 174 if (ce_srng_based(hif)) 175 continue; 176 177 if (!ce_state->htt_rx_data) 178 continue; 179 180 /* Now this is a CE where we need NAPI on */ 181 NAPI_DEBUG("Creating NAPI on pipe %d", i); 182 napii = qdf_mem_malloc(sizeof(*napii)); 183 napid->napis[i] = napii; 184 if (!napii) { 185 rc = -ENOMEM; 186 goto napii_free; 187 } 188 } 189 190 for (i = 0; i < hif->ce_count; i++) { 191 napii = napid->napis[i]; 192 if (!napii) 193 continue; 194 195 NAPI_DEBUG("initializing NAPI for pipe %d", i); 196 memset(napii, 0, sizeof(struct qca_napi_info)); 197 napii->scale = scale; 198 napii->id = NAPI_PIPE2ID(i); 199 napii->hif_ctx = hif_ctx; 200 napii->irq = pld_get_irq(hif->qdf_dev->dev, i); 201 202 if (napii->irq < 0) 203 hif_warn("bad IRQ value for CE %d: %d", i, napii->irq); 204 205 qdf_net_if_create_dummy_if((struct qdf_net_if *)&napii->netdev); 206 207 NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)", 208 &(napii->napi), &(napii->netdev), poll, budget); 209 qdf_netif_napi_add(&(napii->netdev), &(napii->napi), 210 poll, budget); 211 212 NAPI_DEBUG("after napi_add"); 213 NAPI_DEBUG("napi=0x%pK, netdev=0x%pK", 214 &(napii->napi), &(napii->netdev)); 215 NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK", 216 napii->napi.dev_list.prev, 217 napii->napi.dev_list.next); 218 NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK", 219 napii->netdev.napi_list.prev, 220 napii->netdev.napi_list.next); 221 222 hif_init_rx_thread_napi(napii); 223 napii->lro_ctx = qdf_lro_init(); 224 NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n", 225 i, napii->id, napii->lro_ctx); 226 227 /* It is OK to change the state variable below without 228 * protection as there should be no-one around yet 229 */ 230 napid->ce_map |= (0x01 << i); 231 hif_debug("NAPI id %d created for pipe %d", napii->id, i); 232 } 233 234 /* no ces registered with the napi */ 235 if (!ce_srng_based(hif) && napid->ce_map == 0) { 236 hif_warn("no napis created for copy engines"); 237 rc = -EFAULT; 238 goto napii_free; 239 } 240 241 NAPI_DEBUG("napi map = %x", napid->ce_map); 242 NAPI_DEBUG("NAPI ids created for all applicable pipes"); 243 return napid->ce_map; 244 245 napii_free: 246 for (i = 0; i < hif->ce_count; i++) { 247 napii = napid->napis[i]; 248 napid->napis[i] = NULL; 249 if (napii) 250 qdf_mem_free(napii); 251 } 252 253 hnc_err: 254 NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map); 255 return rc; 256 } 257 qdf_export_symbol(hif_napi_create); 258 259 #ifdef RECEIVE_OFFLOAD 260 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, 261 void (offld_flush_handler)(void *)) 262 { 263 int i; 264 struct CE_state *ce_state; 265 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 266 struct qca_napi_data *napid; 267 struct qca_napi_info *napii; 268 269 if (!scn) { 270 hif_err("hif_state NULL!"); 271 QDF_ASSERT(0); 272 return; 273 } 274 275 napid = hif_napi_get_all(hif_hdl); 276 for (i = 0; i < scn->ce_count; i++) { 277 ce_state = scn->ce_id_to_state[i]; 278 if (ce_state && (ce_state->htt_rx_data)) { 279 napii = napid->napis[i]; 280 napii->offld_flush_cb = offld_flush_handler; 281 hif_debug("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK", 282 i, napii->id, napii->offld_flush_cb); 283 } 284 } 285 } 286 287 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl) 288 { 289 int i; 290 struct CE_state *ce_state; 291 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 292 struct qca_napi_data *napid; 293 struct qca_napi_info *napii; 294 295 if (!scn) { 296 hif_err("hif_state NULL!"); 297 QDF_ASSERT(0); 298 return; 299 } 300 301 napid = hif_napi_get_all(hif_hdl); 302 for (i = 0; i < scn->ce_count; i++) { 303 ce_state = scn->ce_id_to_state[i]; 304 if (ce_state && (ce_state->htt_rx_data)) { 305 napii = napid->napis[i]; 306 hif_debug("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK", 307 i, napii->id, napii->offld_flush_cb); 308 /* Not required */ 309 napii->offld_flush_cb = NULL; 310 } 311 } 312 } 313 #endif /* RECEIVE_OFFLOAD */ 314 315 /** 316 * hif_napi_destroy() - destroys the NAPI structures for a given instance 317 * @hif_ctx: pointer to hif context 318 * @id: the CE id whose napi instance will be destroyed 319 * @force: if set, will destroy even if entry is active (de-activates) 320 * 321 * Description: 322 * Destroy a given NAPI instance. This function is called 323 * unconditionally during cleanup. 324 * Refuses to destroy an entry of it is still enabled (unless force=1) 325 * Marks the whole napi_data invalid if all instances are destroyed. 326 * 327 * Return: 328 * -EINVAL: specific entry has not been created 329 * -EPERM : specific entry is still active 330 * 0 < : error 331 * 0 = : success 332 */ 333 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx, 334 uint8_t id, 335 int force) 336 { 337 uint8_t ce = NAPI_ID2PIPE(id); 338 int rc = 0; 339 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 340 341 NAPI_DEBUG("-->(id=%d, force=%d)", id, force); 342 343 if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) { 344 hif_err("NAPI not initialized or entry %d not created", id); 345 rc = -EINVAL; 346 } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) { 347 hif_err("NAPI instance %d (pipe %d) not created", id, ce); 348 if (hif->napi_data.napis[ce]) 349 hif_err("memory allocated but ce_map not set %d (pipe %d)", 350 id, ce); 351 rc = -EINVAL; 352 } else { 353 struct qca_napi_data *napid; 354 struct qca_napi_info *napii; 355 356 napid = &(hif->napi_data); 357 napii = napid->napis[ce]; 358 if (!napii) { 359 if (napid->ce_map & (0x01 << ce)) 360 hif_err("napii & ce_map out of sync(ce %d)", ce); 361 return -EINVAL; 362 } 363 364 365 if (hif->napi_data.state == HIF_NAPI_CONF_UP) { 366 if (force) { 367 qdf_napi_disable(&(napii->napi)); 368 hif_debug("NAPI entry %d force disabled", id); 369 NAPI_DEBUG("NAPI %d force disabled", id); 370 } else { 371 hif_err("Cannot destroy active NAPI %d", id); 372 rc = -EPERM; 373 } 374 } 375 if (0 == rc) { 376 NAPI_DEBUG("before napi_del"); 377 NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK", 378 napii->napi.dev_list.prev, 379 napii->napi.dev_list.next); 380 NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK", 381 napii->netdev.napi_list.prev, 382 napii->netdev.napi_list.next); 383 384 qdf_lro_deinit(napii->lro_ctx); 385 qdf_netif_napi_del(&(napii->napi)); 386 hif_deinit_rx_thread_napi(napii); 387 388 napid->ce_map &= ~(0x01 << ce); 389 napid->napis[ce] = NULL; 390 napii->scale = 0; 391 qdf_mem_free(napii); 392 hif_debug("NAPI %d destroyed", id); 393 394 /* if there are no active instances and 395 * if they are all destroyed, 396 * set the whole structure to uninitialized state 397 */ 398 if (napid->ce_map == 0) { 399 rc = hif_napi_cpu_deinit(hif_ctx); 400 /* caller is tolerant to receiving !=0 rc */ 401 402 qdf_spinlock_destroy(&(napid->lock)); 403 memset(napid, 404 0, sizeof(struct qca_napi_data)); 405 hif_debug("no NAPI instances. Zapped"); 406 } 407 } 408 } 409 410 return rc; 411 } 412 qdf_export_symbol(hif_napi_destroy); 413 414 #ifdef FEATURE_LRO 415 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id) 416 { 417 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 418 struct qca_napi_data *napid; 419 struct qca_napi_info *napii; 420 421 napid = &(scn->napi_data); 422 napii = napid->napis[NAPI_ID2PIPE(napi_id)]; 423 424 if (napii) 425 return napii->lro_ctx; 426 return 0; 427 } 428 #endif 429 430 /** 431 * hif_napi_get_all() - returns the address of the whole HIF NAPI structure 432 * @hif_ctx: pointer to hif context 433 * 434 * Description: 435 * Returns the address of the whole structure 436 * 437 * Return: 438 * <addr>: address of the whole HIF NAPI structure 439 */ 440 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx) 441 { 442 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 443 444 return &(hif->napi_data); 445 } 446 447 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid) 448 { 449 int id = NAPI_ID2PIPE(napi_id); 450 451 return napid->napis[id]; 452 } 453 454 /** 455 * hif_napi_event() - reacts to events that impact NAPI 456 * @hif_ctx: pointer to hif context 457 * @event: event that has been detected 458 * @data: more data regarding the event 459 * 460 * Description: 461 * This function handles two types of events: 462 * 1- Events that change the state of NAPI (enabled/disabled): 463 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} 464 * The state is retrievable by "hdd_napi_enabled(-1)" 465 * - NAPI will be on if either INI file is on and it has not been disabled 466 * by a subsequent vendor CMD, 467 * or it has been enabled by a vendor CMD. 468 * 2- Events that change the CPU affinity of a NAPI instance/IRQ: 469 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} 470 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode 471 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() 472 * - In LO tput mode, NAPI will yield control if its interrupts to the system 473 * management functions. However in HI throughput mode, NAPI will actively 474 * manage its interrupts/instances (by trying to disperse them out to 475 * separate performance cores). 476 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. 477 * 478 * + In some cases (roaming peer management is the only case so far), a 479 * a client can trigger a "SERIALIZE" event. Basically, this means that the 480 * users is asking NAPI to go into a truly single execution context state. 481 * So, NAPI indicates to msm-irqbalancer that it wants to be denylisted, 482 * (if called for the first time) and then moves all IRQs (for NAPI 483 * instances) to be collapsed to a single core. If called multiple times, 484 * it will just re-collapse the CPUs. This is because denylist-on() API 485 * is reference-counted, and because the API has already been called. 486 * 487 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go 488 * to its "normal" operation. Optionally, they can give a timeout value (in 489 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this 490 * case, NAPI will just set the current throughput state to uninitialized 491 * and set the delay period. Once policy handler is called, it would skip 492 * applying the policy delay period times, and otherwise apply the policy. 493 * 494 * Return: 495 * < 0: some error 496 * = 0: event handled successfully 497 */ 498 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, 499 void *data) 500 { 501 int rc = 0; 502 uint32_t prev_state; 503 int i; 504 bool state_changed; 505 struct napi_struct *napi; 506 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 507 struct qca_napi_data *napid = &(hif->napi_data); 508 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; 509 enum { 510 DENYLIST_NOT_PENDING, 511 DENYLIST_ON_PENDING, 512 DENYLIST_OFF_PENDING 513 } denylist_pending = DENYLIST_NOT_PENDING; 514 515 NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); 516 517 if (ce_srng_based(hif)) 518 return hif_exec_event(hif_ctx, event, data); 519 520 if ((napid->state & HIF_NAPI_INITED) == 0) { 521 NAPI_DEBUG("%s: got event when NAPI not initialized", 522 __func__); 523 return -EINVAL; 524 } 525 qdf_spin_lock_bh(&(napid->lock)); 526 prev_state = napid->state; 527 switch (event) { 528 case NAPI_EVT_INI_FILE: 529 case NAPI_EVT_CMD_STATE: 530 case NAPI_EVT_INT_STATE: { 531 int on = (data != ((void *)0)); 532 533 hif_debug("recved evnt: STATE_CMD %d; v = %d (state=0x%0x)", 534 event, on, prev_state); 535 if (on) 536 if (prev_state & HIF_NAPI_CONF_UP) { 537 hif_debug("Duplicate NAPI conf ON msg"); 538 } else { 539 hif_debug("Setting state to ON"); 540 napid->state |= HIF_NAPI_CONF_UP; 541 } 542 else /* off request */ 543 if (prev_state & HIF_NAPI_CONF_UP) { 544 hif_debug("Setting state to OFF"); 545 napid->state &= ~HIF_NAPI_CONF_UP; 546 } else { 547 hif_debug("Duplicate NAPI conf OFF msg"); 548 } 549 break; 550 } 551 /* case NAPI_INIT_FILE/CMD_STATE */ 552 553 case NAPI_EVT_CPU_STATE: { 554 int cpu = ((unsigned long int)data >> 16); 555 int val = ((unsigned long int)data & 0x0ff); 556 557 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", 558 __func__, cpu, val); 559 560 /* state has already been set by hnc_cpu_notify_cb */ 561 if ((val == QCA_NAPI_CPU_DOWN) && 562 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ 563 (napid->napi_cpu[cpu].napis != 0)) { 564 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", 565 __func__, cpu); 566 rc = hif_napi_cpu_migrate(napid, 567 cpu, 568 HNC_ACT_RELOCATE); 569 napid->napi_cpu[cpu].napis = 0; 570 } 571 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ 572 break; 573 } 574 575 case NAPI_EVT_TPUT_STATE: { 576 tput_mode = (enum qca_napi_tput_state)data; 577 if (tput_mode == QCA_NAPI_TPUT_LO) { 578 /* from TPUT_HI -> TPUT_LO */ 579 NAPI_DEBUG("%s: Moving to napi_tput_LO state", 580 __func__); 581 denylist_pending = DENYLIST_OFF_PENDING; 582 /* 583 * Ideally we should "collapse" interrupts here, since 584 * we are "dispersing" interrupts in the "else" case. 585 * This allows the possibility that our interrupts may 586 * still be on the perf cluster the next time we enter 587 * high tput mode. However, the irq_balancer is free 588 * to move our interrupts to power cluster once 589 * denylisting has been turned off in the "else" case. 590 */ 591 } else { 592 /* from TPUT_LO -> TPUT->HI */ 593 NAPI_DEBUG("%s: Moving to napi_tput_HI state", 594 __func__); 595 rc = hif_napi_cpu_migrate(napid, 596 HNC_ANY_CPU, 597 HNC_ACT_DISPERSE); 598 599 denylist_pending = DENYLIST_ON_PENDING; 600 } 601 napid->napi_mode = tput_mode; 602 break; 603 } 604 605 case NAPI_EVT_USR_SERIAL: { 606 unsigned long users = (unsigned long)data; 607 608 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", 609 __func__, users); 610 611 rc = hif_napi_cpu_migrate(napid, 612 HNC_ANY_CPU, 613 HNC_ACT_COLLAPSE); 614 if ((users == 0) && (rc == 0)) 615 denylist_pending = DENYLIST_ON_PENDING; 616 break; 617 } 618 case NAPI_EVT_USR_NORMAL: { 619 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); 620 if (!napid->user_cpu_affin_mask) 621 denylist_pending = DENYLIST_OFF_PENDING; 622 /* 623 * Deserialization timeout is handled at hdd layer; 624 * just mark current mode to uninitialized to ensure 625 * it will be set when the delay is over 626 */ 627 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; 628 break; 629 } 630 default: { 631 hif_err("Unknown event: %d (data=0x%0lx)", 632 event, (unsigned long) data); 633 break; 634 } /* default */ 635 }; /* switch */ 636 637 638 switch (denylist_pending) { 639 case DENYLIST_ON_PENDING: 640 /* assume the control of WLAN IRQs */ 641 hif_napi_cpu_denylist(napid, DENYLIST_ON); 642 break; 643 case DENYLIST_OFF_PENDING: 644 /* yield the control of WLAN IRQs */ 645 hif_napi_cpu_denylist(napid, DENYLIST_OFF); 646 break; 647 default: /* nothing to do */ 648 break; 649 } /* switch denylist_pending */ 650 651 /* we want to perform the comparison in lock: 652 * there is a possibility of hif_napi_event get called 653 * from two different contexts (driver unload and cpu hotplug 654 * notification) and napid->state get changed 655 * in driver unload context and can lead to race condition 656 * in cpu hotplug context. Therefore, perform the napid->state 657 * comparison before releasing lock. 658 */ 659 state_changed = (prev_state != napid->state); 660 qdf_spin_unlock_bh(&(napid->lock)); 661 662 if (state_changed) { 663 if (napid->state == ENABLE_NAPI_MASK) { 664 rc = 1; 665 for (i = 0; i < CE_COUNT_MAX; i++) { 666 struct qca_napi_info *napii = napid->napis[i]; 667 if (napii) { 668 napi = &(napii->napi); 669 NAPI_DEBUG("%s: enabling NAPI %d", 670 __func__, i); 671 qdf_napi_enable(napi); 672 } 673 } 674 } else { 675 rc = 0; 676 for (i = 0; i < CE_COUNT_MAX; i++) { 677 struct qca_napi_info *napii = napid->napis[i]; 678 if (napii) { 679 napi = &(napii->napi); 680 NAPI_DEBUG("%s: disabling NAPI %d", 681 __func__, i); 682 qdf_napi_disable(napi); 683 /* in case it is affined, remove it */ 684 qdf_dev_set_irq_affinity(napii->irq, 685 NULL); 686 } 687 } 688 } 689 } else { 690 hif_debug("no change in hif napi state (still %d)", prev_state); 691 } 692 693 NAPI_DEBUG("<--[rc=%d]", rc); 694 return rc; 695 } 696 qdf_export_symbol(hif_napi_event); 697 698 /** 699 * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not 700 * @hif_ctx: hif context 701 * @ce: CE instance (or -1, to check if any CEs are enabled) 702 * 703 * Return: bool 704 */ 705 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce) 706 { 707 int rc; 708 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 709 710 if (-1 == ce) 711 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK)); 712 else 713 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) && 714 (hif->napi_data.ce_map & (0x01 << ce))); 715 return rc; 716 } 717 qdf_export_symbol(hif_napi_enabled); 718 719 /** 720 * hif_napi_created() - checks whether NAPI is created for given ce or not 721 * @hif_ctx: hif context 722 * @ce: CE instance 723 * 724 * Return: bool 725 */ 726 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce) 727 { 728 int rc; 729 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 730 731 rc = (hif->napi_data.ce_map & (0x01 << ce)); 732 733 return !!rc; 734 } 735 qdf_export_symbol(hif_napi_created); 736 737 /** 738 * hif_napi_enable_irq() - enables bus interrupts after napi_complete 739 * 740 * @hif: hif context 741 * @id: id of NAPI instance calling this (used to determine the CE) 742 * 743 * Return: void 744 */ 745 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) 746 { 747 struct hif_softc *scn = HIF_GET_SOFTC(hif); 748 749 hif_irq_enable(scn, NAPI_ID2PIPE(id)); 750 } 751 752 753 /** 754 * hif_napi_schedule() - schedules napi, updates stats 755 * @hif_ctx: hif context 756 * @ce_id: index of napi instance 757 * 758 * Return: false if napi didn't enable or already scheduled, otherwise true 759 */ 760 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id) 761 { 762 int cpu = smp_processor_id(); 763 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 764 struct qca_napi_info *napii; 765 766 napii = scn->napi_data.napis[ce_id]; 767 if (qdf_unlikely(!napii)) { 768 hif_err("scheduling unallocated napi (ce:%d)", ce_id); 769 qdf_atomic_dec(&scn->active_tasklet_cnt); 770 return false; 771 } 772 773 if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) { 774 NAPI_DEBUG("napi scheduled, return"); 775 qdf_atomic_dec(&scn->active_tasklet_cnt); 776 return false; 777 } 778 779 hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE, 780 NULL, NULL, 0, 0); 781 napii->stats[cpu].napi_schedules++; 782 NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id); 783 napi_schedule(&(napii->napi)); 784 785 return true; 786 } 787 qdf_export_symbol(hif_napi_schedule); 788 789 /** 790 * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed 791 * @napi_info: pointer to qca_napi_info for the napi instance 792 * 793 * Return: true => interrupt already on correct cpu, no correction needed 794 * false => interrupt on wrong cpu, correction done for cpu affinity 795 * of the interrupt 796 */ 797 static inline 798 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info) 799 { 800 bool right_cpu = true; 801 int rc = 0; 802 int cpu; 803 struct qca_napi_data *napid; 804 QDF_STATUS ret; 805 806 napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx)); 807 808 if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) { 809 810 cpu = qdf_get_cpu(); 811 if (unlikely((hif_napi_cpu_denylist(napid, 812 DENYLIST_QUERY) > 0) && 813 cpu != napi_info->cpu)) { 814 right_cpu = false; 815 816 NAPI_DEBUG("interrupt on wrong CPU, correcting"); 817 napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu); 818 819 qdf_dev_modify_irq_status(napi_info->irq, 820 QDF_IRQ_NO_BALANCING, 0); 821 ret = qdf_dev_set_irq_affinity(napi_info->irq, 822 (struct qdf_cpu_mask *) 823 &napi_info->cpumask); 824 rc = qdf_status_to_os_return(ret); 825 qdf_dev_modify_irq_status(napi_info->irq, 0, 826 QDF_IRQ_NO_BALANCING); 827 828 if (rc) 829 hif_err("Setting irq affinity hint: %d", rc); 830 else 831 napi_info->stats[cpu].cpu_corrected++; 832 } 833 } 834 return right_cpu; 835 } 836 837 #ifdef RECEIVE_OFFLOAD 838 /** 839 * hif_napi_offld_flush_cb() - Call upper layer flush callback 840 * @napi_info: Handle to hif_napi_info 841 * 842 * Return: None 843 */ 844 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 845 { 846 if (napi_info->offld_flush_cb) 847 napi_info->offld_flush_cb(napi_info); 848 } 849 #else 850 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 851 { 852 } 853 #endif 854 855 /** 856 * hif_napi_poll() - NAPI poll routine 857 * @hif_ctx: HIF context 858 * @napi: pointer to NAPI struct as kernel holds it 859 * @budget: 860 * 861 * This is the body of the poll function. 862 * The poll function is called by kernel. So, there is a wrapper 863 * function in HDD, which in turn calls this function. 864 * Two main reasons why the whole thing is not implemented in HDD: 865 * a) references to things like ce_service that HDD is not aware of 866 * b) proximity to the implementation of ce_tasklet, which the body 867 * of this function should be very close to. 868 * 869 * NOTE TO THE MAINTAINER: 870 * Consider this function and ce_tasklet very tightly coupled pairs. 871 * Any changes to ce_tasklet or this function may likely need to be 872 * reflected in the counterpart. 873 * 874 * Returns: 875 * int: the amount of work done in this poll (<= budget) 876 */ 877 int hif_napi_poll(struct hif_opaque_softc *hif_ctx, 878 struct napi_struct *napi, 879 int budget) 880 { 881 int rc = 0; /* default: no work done, also takes care of error */ 882 int normalized = 0; 883 int bucket; 884 int cpu = smp_processor_id(); 885 bool poll_on_right_cpu; 886 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 887 struct qca_napi_info *napi_info; 888 struct CE_state *ce_state = NULL; 889 890 if (unlikely(!hif)) { 891 hif_err("hif context is NULL"); 892 QDF_ASSERT(0); 893 goto out; 894 } 895 896 napi_info = (struct qca_napi_info *) 897 container_of(napi, struct qca_napi_info, napi); 898 899 NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)", 900 __func__, napi_info->id, napi_info->irq, budget); 901 902 napi_info->stats[cpu].napi_polls++; 903 904 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 905 NAPI_POLL_ENTER, NULL, NULL, cpu, 0); 906 907 rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id)); 908 NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs", 909 __func__, rc); 910 911 hif_napi_offld_flush_cb(napi_info); 912 913 /* do not return 0, if there was some work done, 914 * even if it is below the scale 915 */ 916 if (rc) { 917 napi_info->stats[cpu].napi_workdone += rc; 918 normalized = (rc / napi_info->scale); 919 if (normalized == 0) 920 normalized++; 921 bucket = (normalized - 1) / 922 (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS); 923 if (bucket >= QCA_NAPI_NUM_BUCKETS) { 924 bucket = QCA_NAPI_NUM_BUCKETS - 1; 925 hif_err("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)" 926 " normalized %d, napi budget %d", 927 bucket, QCA_NAPI_NUM_BUCKETS, 928 normalized, QCA_NAPI_BUDGET); 929 } 930 napi_info->stats[cpu].napi_budget_uses[bucket]++; 931 } else { 932 /* if ce_per engine reports 0, then poll should be terminated */ 933 NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI", 934 __func__, __LINE__); 935 } 936 937 ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)]; 938 939 /* 940 * Not using the API hif_napi_correct_cpu directly in the if statement 941 * below since the API may not get evaluated if put at the end if any 942 * prior condition would evaluate to be true. The CPU correction 943 * check should kick in every poll. 944 */ 945 #ifdef NAPI_YIELD_BUDGET_BASED 946 if (ce_state && (ce_state->force_break || 0 == rc)) { 947 #else 948 poll_on_right_cpu = hif_napi_correct_cpu(napi_info); 949 if ((ce_state) && 950 (!ce_check_rx_pending(ce_state) || (0 == rc) || 951 !poll_on_right_cpu)) { 952 #endif 953 napi_info->stats[cpu].napi_completes++; 954 #ifdef NAPI_YIELD_BUDGET_BASED 955 ce_state->force_break = 0; 956 #endif 957 958 hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE, 959 NULL, NULL, 0, 0); 960 if (normalized >= budget) 961 normalized = budget - 1; 962 963 napi_complete(napi); 964 /* enable interrupts */ 965 hif_napi_enable_irq(hif_ctx, napi_info->id); 966 /* support suspend/resume */ 967 qdf_atomic_dec(&(hif->active_tasklet_cnt)); 968 969 NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts", 970 __func__, __LINE__); 971 } else { 972 /* 4.4 kernel NAPI implementation requires drivers to 973 * return full work when they ask to be re-scheduled, 974 * or napi_complete and re-start with a fresh interrupt 975 */ 976 normalized = budget; 977 } 978 979 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 980 NAPI_POLL_EXIT, NULL, NULL, normalized, 0); 981 982 NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized); 983 return normalized; 984 out: 985 return rc; 986 } 987 qdf_export_symbol(hif_napi_poll); 988 989 void hif_update_napi_max_poll_time(struct CE_state *ce_state, 990 int ce_id, 991 int cpu_id) 992 { 993 struct hif_softc *hif; 994 struct qca_napi_info *napi_info; 995 unsigned long long napi_poll_time = qdf_time_sched_clock() - 996 ce_state->ce_service_start_time; 997 998 hif = ce_state->scn; 999 napi_info = hif->napi_data.napis[ce_id]; 1000 if (napi_poll_time > 1001 napi_info->stats[cpu_id].napi_max_poll_time) 1002 napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time; 1003 } 1004 qdf_export_symbol(hif_update_napi_max_poll_time); 1005 1006 #ifdef HIF_IRQ_AFFINITY 1007 /** 1008 * hif_napi_update_yield_stats() - update NAPI yield related stats 1009 * @ce_state: CE state structure 1010 * @time_limit_reached: indicates whether the time limit was reached 1011 * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached 1012 * 1013 * Return: None 1014 */ 1015 void hif_napi_update_yield_stats(struct CE_state *ce_state, 1016 bool time_limit_reached, 1017 bool rxpkt_thresh_reached) 1018 { 1019 struct hif_softc *hif; 1020 struct qca_napi_data *napi_data = NULL; 1021 int ce_id = 0; 1022 int cpu_id = 0; 1023 1024 if (unlikely(!ce_state)) { 1025 QDF_ASSERT(ce_state); 1026 return; 1027 } 1028 1029 hif = ce_state->scn; 1030 1031 if (unlikely(!hif)) { 1032 QDF_ASSERT(hif); 1033 return; 1034 } 1035 napi_data = &(hif->napi_data); 1036 if (unlikely(!napi_data)) { 1037 QDF_ASSERT(napi_data); 1038 return; 1039 } 1040 1041 ce_id = ce_state->id; 1042 cpu_id = qdf_get_cpu(); 1043 1044 if (unlikely(!napi_data->napis[ce_id])) { 1045 return; 1046 } 1047 1048 if (time_limit_reached) 1049 napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++; 1050 else 1051 napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++; 1052 1053 hif_update_napi_max_poll_time(ce_state, ce_id, 1054 cpu_id); 1055 } 1056 1057 /** 1058 * hif_napi_stats() - display NAPI CPU statistics 1059 * @napid: pointer to qca_napi_data 1060 * 1061 * Description: 1062 * Prints the various CPU cores on which the NAPI instances /CEs interrupts 1063 * are being executed. Can be called from outside NAPI layer. 1064 * 1065 * Return: None 1066 */ 1067 void hif_napi_stats(struct qca_napi_data *napid) 1068 { 1069 int i; 1070 struct qca_napi_cpu *cpu; 1071 1072 if (!napid) { 1073 qdf_debug("%s: napiid struct is null", __func__); 1074 return; 1075 } 1076 1077 cpu = napid->napi_cpu; 1078 qdf_debug("NAPI CPU TABLE"); 1079 qdf_debug("lilclhead=%d, bigclhead=%d", 1080 napid->lilcl_head, napid->bigcl_head); 1081 for (i = 0; i < NR_CPUS; i++) { 1082 qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d", 1083 i, 1084 cpu[i].state, cpu[i].core_id, cpu[i].cluster_id, 1085 cpu[i].core_mask.bits[0], 1086 cpu[i].thread_mask.bits[0], 1087 cpu[i].max_freq, cpu[i].napis, 1088 cpu[i].cluster_nxt); 1089 } 1090 } 1091 1092 #ifdef FEATURE_NAPI_DEBUG 1093 /* 1094 * Local functions 1095 * - no argument checks, all internal/trusted callers 1096 */ 1097 static void hnc_dump_cpus(struct qca_napi_data *napid) 1098 { 1099 hif_napi_stats(napid); 1100 } 1101 #else 1102 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; 1103 #endif /* FEATURE_NAPI_DEBUG */ 1104 1105 #define HNC_MIN_CLUSTER 0 1106 #define HNC_MAX_CLUSTER 1 1107 1108 /** 1109 * hnc_link_clusters() - partitions to cpu table into clusters 1110 * @napid: pointer to NAPI data 1111 * 1112 * Takes in a CPU topology table and builds two linked lists 1113 * (big cluster cores, list-head at bigcl_head, and little cluster 1114 * cores, list-head at lilcl_head) out of it. 1115 * 1116 * If there are more than two clusters: 1117 * - bigcl_head and lilcl_head will be different, 1118 * - the cluster with highest cpufreq will be considered the "big" cluster. 1119 * If there are more than one with the highest frequency, the *last* of such 1120 * clusters will be designated as the "big cluster" 1121 * - the cluster with lowest cpufreq will be considered the "li'l" cluster. 1122 * If there are more than one clusters with the lowest cpu freq, the *first* 1123 * of such clusters will be designated as the "little cluster" 1124 * - We only support up to 32 clusters 1125 * Return: 0 : OK 1126 * !0: error (at least one of lil/big clusters could not be found) 1127 */ 1128 static int hnc_link_clusters(struct qca_napi_data *napid) 1129 { 1130 int rc = 0; 1131 1132 int i; 1133 int it = 0; 1134 uint32_t cl_done = 0x0; 1135 int cl, curcl, curclhead = 0; 1136 int more; 1137 unsigned int lilfrq = INT_MAX; 1138 unsigned int bigfrq = 0; 1139 unsigned int clfrq = 0; 1140 int prev = 0; 1141 struct qca_napi_cpu *cpus = napid->napi_cpu; 1142 1143 napid->lilcl_head = napid->bigcl_head = -1; 1144 1145 do { 1146 more = 0; 1147 it++; curcl = -1; 1148 for (i = 0; i < NR_CPUS; i++) { 1149 cl = cpus[i].cluster_id; 1150 NAPI_DEBUG("Processing cpu[%d], cluster=%d\n", 1151 i, cl); 1152 if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) { 1153 NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl); 1154 /* continue if ASSERTs are disabled */ 1155 continue; 1156 }; 1157 if (cpumask_weight(&(cpus[i].core_mask)) == 0) { 1158 NAPI_DEBUG("Core mask 0. SKIPPED\n"); 1159 continue; 1160 } 1161 if (cl_done & (0x01 << cl)) { 1162 NAPI_DEBUG("Cluster already processed. SKIPPED\n"); 1163 continue; 1164 } else { 1165 if (more == 0) { 1166 more = 1; 1167 curcl = cl; 1168 curclhead = i; /* row */ 1169 clfrq = cpus[i].max_freq; 1170 prev = -1; 1171 }; 1172 if ((curcl >= 0) && (curcl != cl)) { 1173 NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n", 1174 cl, curcl); 1175 continue; 1176 } 1177 if (cpus[i].max_freq != clfrq) 1178 NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n", 1179 cpus[i].max_freq, clfrq); 1180 if (clfrq >= bigfrq) { 1181 bigfrq = clfrq; 1182 napid->bigcl_head = curclhead; 1183 NAPI_DEBUG("bigcl=%d\n", curclhead); 1184 } 1185 if (clfrq < lilfrq) { 1186 lilfrq = clfrq; 1187 napid->lilcl_head = curclhead; 1188 NAPI_DEBUG("lilcl=%d\n", curclhead); 1189 } 1190 if (prev != -1) 1191 cpus[prev].cluster_nxt = i; 1192 1193 prev = i; 1194 } 1195 } 1196 if (curcl >= 0) 1197 cl_done |= (0x01 << curcl); 1198 1199 } while (more); 1200 1201 if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0))) 1202 rc = -EFAULT; 1203 1204 hnc_dump_cpus(napid); /* if NAPI_DEBUG */ 1205 return rc; 1206 } 1207 #undef HNC_MIN_CLUSTER 1208 #undef HNC_MAX_CLUSTER 1209 1210 /* 1211 * hotplug function group 1212 */ 1213 1214 /** 1215 * hnc_cpu_online_cb() - handles CPU hotplug "up" events 1216 * @context: the associated HIF context 1217 * @cpu: the CPU Id of the CPU the event happened on 1218 * 1219 * Return: None 1220 */ 1221 static void hnc_cpu_online_cb(void *context, uint32_t cpu) 1222 { 1223 struct hif_softc *hif = context; 1224 struct qca_napi_data *napid = &hif->napi_data; 1225 1226 if (cpu >= NR_CPUS) 1227 return; 1228 1229 NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu); 1230 1231 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; 1232 NAPI_DEBUG("%s: CPU %u marked %d", 1233 __func__, cpu, napid->napi_cpu[cpu].state); 1234 1235 NAPI_DEBUG("<--%s", __func__); 1236 } 1237 1238 /** 1239 * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events 1240 * @context: the associated HIF context 1241 * @cpu: the CPU Id of the CPU the event happened on 1242 * 1243 * On transition to offline, we act on PREP events, because we may need to move 1244 * the irqs/NAPIs to another CPU before it is actually off-lined. 1245 * 1246 * Return: None 1247 */ 1248 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu) 1249 { 1250 struct hif_softc *hif = context; 1251 struct qca_napi_data *napid = &hif->napi_data; 1252 1253 if (cpu >= NR_CPUS) 1254 return; 1255 1256 NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu); 1257 1258 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN; 1259 1260 NAPI_DEBUG("%s: CPU %u marked %d; updating affinity", 1261 __func__, cpu, napid->napi_cpu[cpu].state); 1262 1263 /** 1264 * we need to move any NAPIs on this CPU out. 1265 * if we are in LO throughput mode, then this is valid 1266 * if the CPU is the the low designated CPU. 1267 */ 1268 hif_napi_event(GET_HIF_OPAQUE_HDL(hif), 1269 NAPI_EVT_CPU_STATE, 1270 (void *) 1271 ((size_t)cpu << 16 | napid->napi_cpu[cpu].state)); 1272 1273 NAPI_DEBUG("<--%s", __func__); 1274 } 1275 1276 static int hnc_hotplug_register(struct hif_softc *hif_sc) 1277 { 1278 QDF_STATUS status; 1279 1280 NAPI_DEBUG("-->%s", __func__); 1281 1282 status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler, 1283 hif_sc, 1284 hnc_cpu_online_cb, 1285 hnc_cpu_before_offline_cb); 1286 1287 NAPI_DEBUG("<--%s [%d]", __func__, status); 1288 1289 return qdf_status_to_os_return(status); 1290 } 1291 1292 static void hnc_hotplug_unregister(struct hif_softc *hif_sc) 1293 { 1294 NAPI_DEBUG("-->%s", __func__); 1295 1296 if (hif_sc->napi_data.cpuhp_handler) 1297 qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler); 1298 1299 NAPI_DEBUG("<--%s", __func__); 1300 } 1301 1302 /** 1303 * hnc_tput_hook() - installs a callback in the throughput detector 1304 * @install: !0 => install; =0: uninstall 1305 * 1306 * installs a callback to be called when wifi driver throughput (tx+rx) 1307 * crosses a threshold. Currently, we are using the same criteria as 1308 * TCP ack suppression (500 packets/100ms by default). 1309 * 1310 * Return: 0 : success 1311 * <0: failure 1312 */ 1313 1314 static int hnc_tput_hook(int install) 1315 { 1316 int rc = 0; 1317 1318 /* 1319 * Nothing, until the bw_calculation accepts registration 1320 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk 1321 * hdd_napi_throughput_policy(...) 1322 */ 1323 return rc; 1324 } 1325 1326 /* 1327 * Implementation of hif_napi_cpu API 1328 */ 1329 1330 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) 1331 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1332 { 1333 cpumask_copy(&(cpus[i].thread_mask), 1334 topology_sibling_cpumask(i)); 1335 } 1336 #else 1337 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1338 { 1339 } 1340 #endif 1341 1342 1343 /** 1344 * hif_napi_cpu_init() - initialization of irq affinity block 1345 * @hif: HIF context 1346 * 1347 * called by hif_napi_create, after the first instance is called 1348 * - builds napi_rss_cpus table from cpu topology 1349 * - links cores of the same clusters together 1350 * - installs hot-plug notifier 1351 * - installs throughput trigger notifier (when such mechanism exists) 1352 * 1353 * Return: 0: OK 1354 * <0: error code 1355 */ 1356 int hif_napi_cpu_init(struct hif_opaque_softc *hif) 1357 { 1358 int rc = 0; 1359 int i; 1360 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1361 struct qca_napi_cpu *cpus = napid->napi_cpu; 1362 1363 NAPI_DEBUG("--> "); 1364 1365 if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) { 1366 NAPI_DEBUG("NAPI RSS table already initialized.\n"); 1367 rc = -EALREADY; 1368 goto lab_rss_init; 1369 } 1370 1371 /* build CPU topology table */ 1372 for_each_possible_cpu(i) { 1373 cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask) 1374 ? QCA_NAPI_CPU_UP 1375 : QCA_NAPI_CPU_DOWN)); 1376 cpus[i].core_id = topology_core_id(i); 1377 cpus[i].cluster_id = topology_physical_package_id(i); 1378 cpumask_copy(&(cpus[i].core_mask), 1379 topology_core_cpumask(i)); 1380 record_sibling_cpumask(cpus, i); 1381 cpus[i].max_freq = cpufreq_quick_get_max(i); 1382 cpus[i].napis = 0x0; 1383 cpus[i].cluster_nxt = -1; /* invalid */ 1384 } 1385 1386 /* link clusters together */ 1387 rc = hnc_link_clusters(napid); 1388 if (0 != rc) 1389 goto lab_err_topology; 1390 1391 /* install hotplug notifier */ 1392 rc = hnc_hotplug_register(HIF_GET_SOFTC(hif)); 1393 if (0 != rc) 1394 goto lab_err_hotplug; 1395 1396 /* install throughput notifier */ 1397 rc = hnc_tput_hook(1); 1398 if (0 == rc) 1399 goto lab_rss_init; 1400 1401 lab_err_hotplug: 1402 hnc_tput_hook(0); 1403 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1404 lab_err_topology: 1405 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1406 lab_rss_init: 1407 NAPI_DEBUG("<-- [rc=%d]", rc); 1408 return rc; 1409 } 1410 1411 /** 1412 * hif_napi_cpu_deinit() - clean-up of irq affinity block 1413 * @hif: HIF context 1414 * 1415 * called by hif_napi_destroy, when the last instance is removed 1416 * - uninstalls throughput and hotplug notifiers 1417 * - clears cpu topology table 1418 * Return: 0: OK 1419 */ 1420 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) 1421 { 1422 int rc = 0; 1423 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1424 1425 NAPI_DEBUG("-->%s(...)", __func__); 1426 1427 /* uninstall tput notifier */ 1428 rc = hnc_tput_hook(0); 1429 1430 /* uninstall hotplug notifier */ 1431 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1432 1433 /* clear the topology table */ 1434 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1435 1436 NAPI_DEBUG("<--%s[rc=%d]", __func__, rc); 1437 1438 return rc; 1439 } 1440 1441 /** 1442 * hncm_migrate_to() - migrates a NAPI to a CPU 1443 * @napid: pointer to NAPI block 1444 * @napi_ce: CE_id of the NAPI instance 1445 * @didx: index in the CPU topology table for the CPU to migrate to 1446 * 1447 * Migrates NAPI (identified by the CE_id) to the destination core 1448 * Updates the napi_map of the destination entry 1449 * 1450 * Return: 1451 * =0 : success 1452 * <0 : error 1453 */ 1454 static int hncm_migrate_to(struct qca_napi_data *napid, 1455 int napi_ce, 1456 int didx) 1457 { 1458 int rc = 0; 1459 QDF_STATUS status; 1460 1461 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); 1462 1463 if (!napid->napis[napi_ce]) 1464 return -EINVAL; 1465 1466 napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx); 1467 1468 qdf_dev_modify_irq_status(napid->napis[napi_ce]->irq, 1469 QDF_IRQ_NO_BALANCING, 0); 1470 status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq, 1471 (struct qdf_cpu_mask *) 1472 &napid->napis[napi_ce]->cpumask); 1473 rc = qdf_status_to_os_return(status); 1474 1475 /* unmark the napis bitmap in the cpu table */ 1476 napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce); 1477 /* mark the napis bitmap for the new designated cpu */ 1478 napid->napi_cpu[didx].napis |= (0x01 << napi_ce); 1479 napid->napis[napi_ce]->cpu = didx; 1480 1481 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1482 return rc; 1483 } 1484 /** 1485 * hncm_dest_cpu() - finds a destination CPU for NAPI 1486 * @napid: pointer to NAPI block 1487 * @act: RELOCATE | COLLAPSE | DISPERSE 1488 * 1489 * Finds the designated destination for the next IRQ. 1490 * RELOCATE: translated to either COLLAPSE or DISPERSE based 1491 * on napid->napi_mode (throughput state) 1492 * COLLAPSE: All have the same destination: the first online CPU in lilcl 1493 * DISPERSE: One of the CPU in bigcl, which has the smallest number of 1494 * NAPIs on it 1495 * 1496 * Return: >=0 : index in the cpu topology table 1497 * : < 0 : error 1498 */ 1499 static int hncm_dest_cpu(struct qca_napi_data *napid, int act) 1500 { 1501 int destidx = -1; 1502 int head, i; 1503 1504 NAPI_DEBUG("-->%s(act=%d)", __func__, act); 1505 if (act == HNC_ACT_RELOCATE) { 1506 if (napid->napi_mode == QCA_NAPI_TPUT_LO) 1507 act = HNC_ACT_COLLAPSE; 1508 else 1509 act = HNC_ACT_DISPERSE; 1510 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", 1511 __func__, act); 1512 } 1513 if (act == HNC_ACT_COLLAPSE) { 1514 head = i = napid->lilcl_head; 1515 retry_collapse: 1516 while (i >= 0) { 1517 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { 1518 destidx = i; 1519 break; 1520 } 1521 i = napid->napi_cpu[i].cluster_nxt; 1522 } 1523 if ((destidx < 0) && (head == napid->lilcl_head)) { 1524 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", 1525 __func__); 1526 head = i = napid->bigcl_head; 1527 goto retry_collapse; 1528 } 1529 } else { /* HNC_ACT_DISPERSE */ 1530 int smallest = 99; /* all 32 bits full */ 1531 int smallidx = -1; 1532 1533 head = i = napid->bigcl_head; 1534 retry_disperse: 1535 while (i >= 0) { 1536 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && 1537 (hweight32(napid->napi_cpu[i].napis) <= smallest)) { 1538 smallest = napid->napi_cpu[i].napis; 1539 smallidx = i; 1540 } 1541 i = napid->napi_cpu[i].cluster_nxt; 1542 } 1543 /* Check if matches with user specified CPU mask */ 1544 smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ? 1545 smallidx : -1; 1546 1547 if ((smallidx < 0) && (head == napid->bigcl_head)) { 1548 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", 1549 __func__); 1550 head = i = napid->lilcl_head; 1551 goto retry_disperse; 1552 } 1553 destidx = smallidx; 1554 } 1555 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); 1556 return destidx; 1557 } 1558 /** 1559 * hif_napi_cpu_migrate() - migrate IRQs away 1560 * @napid: pointer to NAPI block 1561 * @cpu: -1: all CPUs <n> specific CPU 1562 * @action: COLLAPSE | DISPERSE 1563 * 1564 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible 1565 * cores. Eligible cores are: 1566 * act=COLLAPSE -> the first online core of the little cluster 1567 * act=DISPERSE -> separate cores of the big cluster, so that each core will 1568 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) 1569 * 1570 * Note that this function is called with a spinlock acquired already. 1571 * 1572 * Return: =0: success 1573 * <0: error 1574 */ 1575 1576 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) 1577 { 1578 int rc = 0; 1579 struct qca_napi_cpu *cpup; 1580 int i, dind; 1581 uint32_t napis; 1582 1583 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", 1584 __func__, cpu, action); 1585 /* the following is really: hif_napi_enabled() with less overhead */ 1586 if (napid->ce_map == 0) { 1587 NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__); 1588 goto hncm_return; 1589 } 1590 1591 cpup = napid->napi_cpu; 1592 1593 switch (action) { 1594 case HNC_ACT_RELOCATE: 1595 case HNC_ACT_DISPERSE: 1596 case HNC_ACT_COLLAPSE: { 1597 /* first find the src napi set */ 1598 if (cpu == HNC_ANY_CPU) 1599 napis = napid->ce_map; 1600 else 1601 napis = cpup[cpu].napis; 1602 /* then clear the napi bitmap on each CPU */ 1603 for (i = 0; i < NR_CPUS; i++) 1604 cpup[i].napis = 0; 1605 /* then for each of the NAPIs to disperse: */ 1606 for (i = 0; i < CE_COUNT_MAX; i++) 1607 if (napis & (1 << i)) { 1608 /* find a destination CPU */ 1609 dind = hncm_dest_cpu(napid, action); 1610 if (dind >= 0) { 1611 NAPI_DEBUG("Migrating NAPI ce%d to %d", 1612 i, dind); 1613 rc = hncm_migrate_to(napid, i, dind); 1614 } else { 1615 NAPI_DEBUG("No dest for NAPI ce%d", i); 1616 hnc_dump_cpus(napid); 1617 rc = -1; 1618 } 1619 } 1620 break; 1621 } 1622 default: { 1623 NAPI_DEBUG("%s: bad action: %d\n", __func__, action); 1624 QDF_BUG(0); 1625 break; 1626 } 1627 } /* switch action */ 1628 1629 hncm_return: 1630 hnc_dump_cpus(napid); 1631 return rc; 1632 } 1633 1634 1635 /** 1636 * hif_napi_dl_irq() - calls irq_modify_status to enable/disable denylisting 1637 * @napid: pointer to qca_napi_data structure 1638 * @dl_flag: denylist flag to enable/disable denylisting 1639 * 1640 * The function enables/disables denylisting for all the copy engine 1641 * interrupts on which NAPI is enabled. 1642 * 1643 * Return: None 1644 */ 1645 static inline void hif_napi_dl_irq(struct qca_napi_data *napid, bool dl_flag) 1646 { 1647 int i; 1648 struct qca_napi_info *napii; 1649 1650 for (i = 0; i < CE_COUNT_MAX; i++) { 1651 /* check if NAPI is enabled on the CE */ 1652 if (!(napid->ce_map & (0x01 << i))) 1653 continue; 1654 1655 /*double check that NAPI is allocated for the CE */ 1656 napii = napid->napis[i]; 1657 if (!(napii)) 1658 continue; 1659 1660 if (dl_flag == true) 1661 qdf_dev_modify_irq_status(napii->irq, 1662 0, QDF_IRQ_NO_BALANCING); 1663 else 1664 qdf_dev_modify_irq_status(napii->irq, 1665 QDF_IRQ_NO_BALANCING, 0); 1666 hif_debug("dl_flag %d CE %d", dl_flag, i); 1667 } 1668 } 1669 1670 /** 1671 * hif_napi_cpu_denylist() - en(dis)ables denylisting for NAPI RX interrupts. 1672 * @napid: pointer to qca_napi_data structure 1673 * @op: denylist operation to perform 1674 * 1675 * The function enables/disables/queries denylisting for all CE RX 1676 * interrupts with NAPI enabled. Besides denylisting, it also enables/disables 1677 * core_ctl_set_boost. 1678 * Once denylisting is enabled, the interrupts will not be managed by the IRQ 1679 * balancer. 1680 * 1681 * Return: -EINVAL, in case IRQ_DENYLISTING and CORE_CTL_BOOST is not enabled 1682 * for DENYLIST_QUERY op - denylist refcount 1683 * for DENYLIST_ON op - return value from core_ctl_set_boost API 1684 * for DENYLIST_OFF op - return value from core_ctl_set_boost API 1685 */ 1686 int hif_napi_cpu_denylist(struct qca_napi_data *napid, 1687 enum qca_denylist_op op) 1688 { 1689 int rc = 0; 1690 static int ref_count; /* = 0 by the compiler */ 1691 uint8_t flags = napid->flags; 1692 bool dl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; 1693 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; 1694 1695 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); 1696 1697 if (!(dl_en && ccb_en)) { 1698 rc = -EINVAL; 1699 goto out; 1700 } 1701 1702 switch (op) { 1703 case DENYLIST_QUERY: 1704 rc = ref_count; 1705 break; 1706 case DENYLIST_ON: 1707 ref_count++; 1708 rc = 0; 1709 if (ref_count == 1) { 1710 rc = hif_napi_core_ctl_set_boost(true); 1711 NAPI_DEBUG("boost_on() returns %d - refcnt=%d", 1712 rc, ref_count); 1713 hif_napi_dl_irq(napid, true); 1714 } 1715 break; 1716 case DENYLIST_OFF: 1717 if (ref_count) { 1718 ref_count--; 1719 rc = 0; 1720 if (ref_count == 0) { 1721 rc = hif_napi_core_ctl_set_boost(false); 1722 NAPI_DEBUG("boost_off() returns %d - refcnt=%d", 1723 rc, ref_count); 1724 hif_napi_dl_irq(napid, false); 1725 } 1726 } 1727 break; 1728 default: 1729 NAPI_DEBUG("Invalid denylist op: %d", op); 1730 rc = -EINVAL; 1731 } /* switch */ 1732 out: 1733 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1734 return rc; 1735 } 1736 1737 static unsigned long napi_serialize_reqs; 1738 /** 1739 * hif_napi_serialize() - [de-]serialize NAPI operations 1740 * @hif: context 1741 * @is_on: 1: serialize, 0: deserialize 1742 * 1743 * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the 1744 * following steps (see hif_napi_event for code): 1745 * - put irqs of all NAPI instances on the same CPU 1746 * - only for the first serialize call: denylist 1747 * 1748 * hif_napi_serialize(hif, 0): 1749 * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec) 1750 * - at the end of the timer, check the current throughput state and 1751 * implement it. 1752 */ 1753 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) 1754 { 1755 int rc = -EINVAL; 1756 1757 if (hif) 1758 switch (is_on) { 1759 case 0: { /* de-serialize */ 1760 rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL, 1761 (void *) 0); 1762 napi_serialize_reqs = 0; 1763 break; 1764 } /* end de-serialize */ 1765 case 1: { /* serialize */ 1766 rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL, 1767 (void *)napi_serialize_reqs++); 1768 break; 1769 } /* end serialize */ 1770 default: 1771 break; /* no-op */ 1772 } /* switch */ 1773 return rc; 1774 } 1775 1776 #endif /* ifdef HIF_IRQ_AFFINITY */ 1777