1 /* 2 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: hif_napi.c 21 * 22 * HIF NAPI interface implementation 23 */ 24 25 #include <linux/string.h> /* memset */ 26 27 /* Linux headers */ 28 #include <linux/cpumask.h> 29 #include <linux/cpufreq.h> 30 #include <linux/cpu.h> 31 #include <linux/topology.h> 32 #include <linux/interrupt.h> 33 #include <linux/irq.h> 34 #ifdef CONFIG_SCHED_CORE_CTL 35 #include <linux/sched/core_ctl.h> 36 #endif 37 #include <pld_common.h> 38 #include <linux/pm.h> 39 40 /* Driver headers */ 41 #include <hif_napi.h> 42 #include <hif_debug.h> 43 #include <hif_io32.h> 44 #include <ce_api.h> 45 #include <ce_internal.h> 46 #include <hif_irq_affinity.h> 47 #include "qdf_cpuhp.h" 48 #include "qdf_module.h" 49 #include "qdf_net_if.h" 50 #include "qdf_dev.h" 51 52 enum napi_decision_vector { 53 HIF_NAPI_NOEVENT = 0, 54 HIF_NAPI_INITED = 1, 55 HIF_NAPI_CONF_UP = 2 56 }; 57 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP) 58 59 #ifdef RECEIVE_OFFLOAD 60 /** 61 * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI 62 * @napi: Rx_thread NAPI 63 * @budget: NAPI BUDGET 64 * 65 * Return: 0 as it is not supposed to be polled at all as it is not scheduled. 66 */ 67 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget) 68 { 69 HIF_ERROR("This napi_poll should not be polled as we don't schedule it"); 70 QDF_ASSERT(0); 71 return 0; 72 } 73 74 /** 75 * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI 76 * @napii: Handle to napi_info holding rx_thread napi 77 * 78 * Return: None 79 */ 80 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 81 { 82 init_dummy_netdev(&napii->rx_thread_netdev); 83 netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi, 84 hif_rxthread_napi_poll, 64); 85 napi_enable(&napii->rx_thread_napi); 86 } 87 88 /** 89 * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI 90 * @napii: Handle to napi_info holding rx_thread napi 91 * 92 * Return: None 93 */ 94 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 95 { 96 netif_napi_del(&napii->rx_thread_napi); 97 } 98 #else /* RECEIVE_OFFLOAD */ 99 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 100 { 101 } 102 103 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 104 { 105 } 106 #endif 107 108 /** 109 * hif_napi_create() - creates the NAPI structures for a given CE 110 * @hif : pointer to hif context 111 * @pipe_id: the CE id on which the instance will be created 112 * @poll : poll function to be used for this NAPI instance 113 * @budget : budget to be registered with the NAPI instance 114 * @scale : scale factor on the weight (to scaler budget to 1000) 115 * @flags : feature flags 116 * 117 * Description: 118 * Creates NAPI instances. This function is called 119 * unconditionally during initialization. It creates 120 * napi structures through the proper HTC/HIF calls. 121 * The structures are disabled on creation. 122 * Note that for each NAPI instance a separate dummy netdev is used 123 * 124 * Return: 125 * < 0: error 126 * = 0: <should never happen> 127 * > 0: id of the created object (for multi-NAPI, number of objects created) 128 */ 129 int hif_napi_create(struct hif_opaque_softc *hif_ctx, 130 int (*poll)(struct napi_struct *, int), 131 int budget, 132 int scale, 133 uint8_t flags) 134 { 135 int i; 136 struct qca_napi_data *napid; 137 struct qca_napi_info *napii; 138 struct CE_state *ce_state; 139 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 140 int rc = 0; 141 142 NAPI_DEBUG("-->(budget=%d, scale=%d)", 143 budget, scale); 144 NAPI_DEBUG("hif->napi_data.state = 0x%08x", 145 hif->napi_data.state); 146 NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x", 147 hif->napi_data.ce_map); 148 149 napid = &(hif->napi_data); 150 if (0 == (napid->state & HIF_NAPI_INITED)) { 151 memset(napid, 0, sizeof(struct qca_napi_data)); 152 qdf_spinlock_create(&(napid->lock)); 153 154 napid->state |= HIF_NAPI_INITED; 155 napid->flags = flags; 156 157 rc = hif_napi_cpu_init(hif_ctx); 158 if (rc != 0 && rc != -EALREADY) { 159 HIF_ERROR("NAPI_initialization failed,. %d", rc); 160 rc = napid->ce_map; 161 goto hnc_err; 162 } else 163 rc = 0; 164 165 HIF_DBG("%s: NAPI structures initialized, rc=%d", 166 __func__, rc); 167 } 168 for (i = 0; i < hif->ce_count; i++) { 169 ce_state = hif->ce_id_to_state[i]; 170 NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d", 171 i, ce_state->htt_rx_data, 172 ce_state->htt_tx_data); 173 if (ce_srng_based(hif)) 174 continue; 175 176 if (!ce_state->htt_rx_data) 177 continue; 178 179 /* Now this is a CE where we need NAPI on */ 180 NAPI_DEBUG("Creating NAPI on pipe %d", i); 181 napii = qdf_mem_malloc(sizeof(*napii)); 182 napid->napis[i] = napii; 183 if (!napii) { 184 NAPI_DEBUG("NAPI alloc failure %d", i); 185 rc = -ENOMEM; 186 goto napii_free; 187 } 188 } 189 190 for (i = 0; i < hif->ce_count; i++) { 191 napii = napid->napis[i]; 192 if (!napii) 193 continue; 194 195 NAPI_DEBUG("initializing NAPI for pipe %d", i); 196 memset(napii, 0, sizeof(struct qca_napi_info)); 197 napii->scale = scale; 198 napii->id = NAPI_PIPE2ID(i); 199 napii->hif_ctx = hif_ctx; 200 napii->irq = pld_get_irq(hif->qdf_dev->dev, i); 201 202 if (napii->irq < 0) 203 HIF_WARN("%s: bad IRQ value for CE %d: %d", 204 __func__, i, napii->irq); 205 206 init_dummy_netdev(&(napii->netdev)); 207 208 NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)", 209 &(napii->napi), &(napii->netdev), poll, budget); 210 netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget); 211 212 NAPI_DEBUG("after napi_add"); 213 NAPI_DEBUG("napi=0x%pK, netdev=0x%pK", 214 &(napii->napi), &(napii->netdev)); 215 NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK", 216 napii->napi.dev_list.prev, 217 napii->napi.dev_list.next); 218 NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK", 219 napii->netdev.napi_list.prev, 220 napii->netdev.napi_list.next); 221 222 hif_init_rx_thread_napi(napii); 223 napii->lro_ctx = qdf_lro_init(); 224 NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n", 225 i, napii->id, napii->lro_ctx); 226 227 /* It is OK to change the state variable below without 228 * protection as there should be no-one around yet 229 */ 230 napid->ce_map |= (0x01 << i); 231 HIF_DBG("%s: NAPI id %d created for pipe %d", __func__, 232 napii->id, i); 233 } 234 235 /* no ces registered with the napi */ 236 if (!ce_srng_based(hif) && napid->ce_map == 0) { 237 HIF_WARN("%s: no napis created for copy engines", __func__); 238 rc = -EFAULT; 239 goto napii_free; 240 } 241 242 NAPI_DEBUG("napi map = %x", napid->ce_map); 243 NAPI_DEBUG("NAPI ids created for all applicable pipes"); 244 return napid->ce_map; 245 246 napii_free: 247 for (i = 0; i < hif->ce_count; i++) { 248 napii = napid->napis[i]; 249 napid->napis[i] = NULL; 250 if (napii) 251 qdf_mem_free(napii); 252 } 253 254 hnc_err: 255 NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map); 256 return rc; 257 } 258 qdf_export_symbol(hif_napi_create); 259 260 #ifdef RECEIVE_OFFLOAD 261 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, 262 void (offld_flush_handler)(void *)) 263 { 264 int i; 265 struct CE_state *ce_state; 266 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 267 struct qca_napi_data *napid; 268 struct qca_napi_info *napii; 269 270 if (!scn) { 271 HIF_ERROR("%s: hif_state NULL!", __func__); 272 QDF_ASSERT(0); 273 return; 274 } 275 276 napid = hif_napi_get_all(hif_hdl); 277 for (i = 0; i < scn->ce_count; i++) { 278 ce_state = scn->ce_id_to_state[i]; 279 if (ce_state && (ce_state->htt_rx_data)) { 280 napii = napid->napis[i]; 281 napii->offld_flush_cb = offld_flush_handler; 282 HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK\n", 283 i, napii->id, napii->offld_flush_cb); 284 } 285 } 286 } 287 288 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl) 289 { 290 int i; 291 struct CE_state *ce_state; 292 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 293 struct qca_napi_data *napid; 294 struct qca_napi_info *napii; 295 296 if (!scn) { 297 HIF_ERROR("%s: hif_state NULL!", __func__); 298 QDF_ASSERT(0); 299 return; 300 } 301 302 napid = hif_napi_get_all(hif_hdl); 303 for (i = 0; i < scn->ce_count; i++) { 304 ce_state = scn->ce_id_to_state[i]; 305 if (ce_state && (ce_state->htt_rx_data)) { 306 napii = napid->napis[i]; 307 HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n", 308 i, napii->id, napii->offld_flush_cb); 309 /* Not required */ 310 napii->offld_flush_cb = NULL; 311 } 312 } 313 } 314 #endif /* RECEIVE_OFFLOAD */ 315 316 /** 317 * 318 * hif_napi_destroy() - destroys the NAPI structures for a given instance 319 * @hif : pointer to hif context 320 * @ce_id : the CE id whose napi instance will be destroyed 321 * @force : if set, will destroy even if entry is active (de-activates) 322 * 323 * Description: 324 * Destroy a given NAPI instance. This function is called 325 * unconditionally during cleanup. 326 * Refuses to destroy an entry of it is still enabled (unless force=1) 327 * Marks the whole napi_data invalid if all instances are destroyed. 328 * 329 * Return: 330 * -EINVAL: specific entry has not been created 331 * -EPERM : specific entry is still active 332 * 0 < : error 333 * 0 = : success 334 */ 335 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx, 336 uint8_t id, 337 int force) 338 { 339 uint8_t ce = NAPI_ID2PIPE(id); 340 int rc = 0; 341 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 342 343 NAPI_DEBUG("-->(id=%d, force=%d)", id, force); 344 345 if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) { 346 HIF_ERROR("%s: NAPI not initialized or entry %d not created", 347 __func__, id); 348 rc = -EINVAL; 349 } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) { 350 HIF_ERROR("%s: NAPI instance %d (pipe %d) not created", 351 __func__, id, ce); 352 if (hif->napi_data.napis[ce]) 353 HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)", 354 __func__, id, ce); 355 rc = -EINVAL; 356 } else { 357 struct qca_napi_data *napid; 358 struct qca_napi_info *napii; 359 360 napid = &(hif->napi_data); 361 napii = napid->napis[ce]; 362 if (!napii) { 363 if (napid->ce_map & (0x01 << ce)) 364 HIF_ERROR("%s: napii & ce_map out of sync(ce %d)", 365 __func__, ce); 366 return -EINVAL; 367 } 368 369 370 if (hif->napi_data.state == HIF_NAPI_CONF_UP) { 371 if (force) { 372 napi_disable(&(napii->napi)); 373 HIF_DBG("%s: NAPI entry %d force disabled", 374 __func__, id); 375 NAPI_DEBUG("NAPI %d force disabled", id); 376 } else { 377 HIF_ERROR("%s: Cannot destroy active NAPI %d", 378 __func__, id); 379 rc = -EPERM; 380 } 381 } 382 if (0 == rc) { 383 NAPI_DEBUG("before napi_del"); 384 NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK", 385 napii->napi.dev_list.prev, 386 napii->napi.dev_list.next); 387 NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK", 388 napii->netdev.napi_list.prev, 389 napii->netdev.napi_list.next); 390 391 qdf_lro_deinit(napii->lro_ctx); 392 netif_napi_del(&(napii->napi)); 393 hif_deinit_rx_thread_napi(napii); 394 395 napid->ce_map &= ~(0x01 << ce); 396 napid->napis[ce] = NULL; 397 napii->scale = 0; 398 qdf_mem_free(napii); 399 HIF_DBG("%s: NAPI %d destroyed\n", __func__, id); 400 401 /* if there are no active instances and 402 * if they are all destroyed, 403 * set the whole structure to uninitialized state 404 */ 405 if (napid->ce_map == 0) { 406 rc = hif_napi_cpu_deinit(hif_ctx); 407 /* caller is tolerant to receiving !=0 rc */ 408 409 qdf_spinlock_destroy(&(napid->lock)); 410 memset(napid, 411 0, sizeof(struct qca_napi_data)); 412 HIF_DBG("%s: no NAPI instances. Zapped.", 413 __func__); 414 } 415 } 416 } 417 418 return rc; 419 } 420 qdf_export_symbol(hif_napi_destroy); 421 422 #ifdef FEATURE_LRO 423 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id) 424 { 425 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 426 struct qca_napi_data *napid; 427 struct qca_napi_info *napii; 428 429 napid = &(scn->napi_data); 430 napii = napid->napis[NAPI_ID2PIPE(napi_id)]; 431 432 if (napii) 433 return napii->lro_ctx; 434 return 0; 435 } 436 #endif 437 438 /** 439 * 440 * hif_napi_get_all() - returns the address of the whole HIF NAPI structure 441 * @hif: pointer to hif context 442 * 443 * Description: 444 * Returns the address of the whole structure 445 * 446 * Return: 447 * <addr>: address of the whole HIF NAPI structure 448 */ 449 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx) 450 { 451 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 452 453 return &(hif->napi_data); 454 } 455 456 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid) 457 { 458 int id = NAPI_ID2PIPE(napi_id); 459 460 return napid->napis[id]; 461 } 462 463 /** 464 * 465 * hif_napi_event() - reacts to events that impact NAPI 466 * @hif : pointer to hif context 467 * @evnt: event that has been detected 468 * @data: more data regarding the event 469 * 470 * Description: 471 * This function handles two types of events: 472 * 1- Events that change the state of NAPI (enabled/disabled): 473 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} 474 * The state is retrievable by "hdd_napi_enabled(-1)" 475 * - NAPI will be on if either INI file is on and it has not been disabled 476 * by a subsequent vendor CMD, 477 * or it has been enabled by a vendor CMD. 478 * 2- Events that change the CPU affinity of a NAPI instance/IRQ: 479 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} 480 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode 481 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() 482 * - In LO tput mode, NAPI will yield control if its interrupts to the system 483 * management functions. However in HI throughput mode, NAPI will actively 484 * manage its interrupts/instances (by trying to disperse them out to 485 * separate performance cores). 486 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. 487 * 488 * + In some cases (roaming peer management is the only case so far), a 489 * a client can trigger a "SERIALIZE" event. Basically, this means that the 490 * users is asking NAPI to go into a truly single execution context state. 491 * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, 492 * (if called for the first time) and then moves all IRQs (for NAPI 493 * instances) to be collapsed to a single core. If called multiple times, 494 * it will just re-collapse the CPUs. This is because blacklist-on() API 495 * is reference-counted, and because the API has already been called. 496 * 497 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go 498 * to its "normal" operation. Optionally, they can give a timeout value (in 499 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this 500 * case, NAPI will just set the current throughput state to uninitialized 501 * and set the delay period. Once policy handler is called, it would skip 502 * applying the policy delay period times, and otherwise apply the policy. 503 * 504 * Return: 505 * < 0: some error 506 * = 0: event handled successfully 507 */ 508 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, 509 void *data) 510 { 511 int rc = 0; 512 uint32_t prev_state; 513 int i; 514 bool state_changed; 515 struct napi_struct *napi; 516 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 517 struct qca_napi_data *napid = &(hif->napi_data); 518 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; 519 enum { 520 BLACKLIST_NOT_PENDING, 521 BLACKLIST_ON_PENDING, 522 BLACKLIST_OFF_PENDING 523 } blacklist_pending = BLACKLIST_NOT_PENDING; 524 525 NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); 526 527 if (ce_srng_based(hif)) 528 return hif_exec_event(hif_ctx, event, data); 529 530 if ((napid->state & HIF_NAPI_INITED) == 0) { 531 NAPI_DEBUG("%s: got event when NAPI not initialized", 532 __func__); 533 return -EINVAL; 534 } 535 qdf_spin_lock_bh(&(napid->lock)); 536 prev_state = napid->state; 537 switch (event) { 538 case NAPI_EVT_INI_FILE: 539 case NAPI_EVT_CMD_STATE: 540 case NAPI_EVT_INT_STATE: { 541 int on = (data != ((void *)0)); 542 543 HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)", 544 __func__, event, 545 on, prev_state); 546 if (on) 547 if (prev_state & HIF_NAPI_CONF_UP) { 548 HIF_DBG("%s: duplicate NAPI conf ON msg", 549 __func__); 550 } else { 551 HIF_DBG("%s: setting state to ON", 552 __func__); 553 napid->state |= HIF_NAPI_CONF_UP; 554 } 555 else /* off request */ 556 if (prev_state & HIF_NAPI_CONF_UP) { 557 HIF_DBG("%s: setting state to OFF", 558 __func__); 559 napid->state &= ~HIF_NAPI_CONF_UP; 560 } else { 561 HIF_DBG("%s: duplicate NAPI conf OFF msg", 562 __func__); 563 } 564 break; 565 } 566 /* case NAPI_INIT_FILE/CMD_STATE */ 567 568 case NAPI_EVT_CPU_STATE: { 569 int cpu = ((unsigned long int)data >> 16); 570 int val = ((unsigned long int)data & 0x0ff); 571 572 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", 573 __func__, cpu, val); 574 575 /* state has already been set by hnc_cpu_notify_cb */ 576 if ((val == QCA_NAPI_CPU_DOWN) && 577 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ 578 (napid->napi_cpu[cpu].napis != 0)) { 579 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", 580 __func__, cpu); 581 rc = hif_napi_cpu_migrate(napid, 582 cpu, 583 HNC_ACT_RELOCATE); 584 napid->napi_cpu[cpu].napis = 0; 585 } 586 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ 587 break; 588 } 589 590 case NAPI_EVT_TPUT_STATE: { 591 tput_mode = (enum qca_napi_tput_state)data; 592 if (tput_mode == QCA_NAPI_TPUT_LO) { 593 /* from TPUT_HI -> TPUT_LO */ 594 NAPI_DEBUG("%s: Moving to napi_tput_LO state", 595 __func__); 596 blacklist_pending = BLACKLIST_OFF_PENDING; 597 /* 598 * Ideally we should "collapse" interrupts here, since 599 * we are "dispersing" interrupts in the "else" case. 600 * This allows the possibility that our interrupts may 601 * still be on the perf cluster the next time we enter 602 * high tput mode. However, the irq_balancer is free 603 * to move our interrupts to power cluster once 604 * blacklisting has been turned off in the "else" case. 605 */ 606 } else { 607 /* from TPUT_LO -> TPUT->HI */ 608 NAPI_DEBUG("%s: Moving to napi_tput_HI state", 609 __func__); 610 rc = hif_napi_cpu_migrate(napid, 611 HNC_ANY_CPU, 612 HNC_ACT_DISPERSE); 613 614 blacklist_pending = BLACKLIST_ON_PENDING; 615 } 616 napid->napi_mode = tput_mode; 617 break; 618 } 619 620 case NAPI_EVT_USR_SERIAL: { 621 unsigned long users = (unsigned long)data; 622 623 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", 624 __func__, users); 625 626 rc = hif_napi_cpu_migrate(napid, 627 HNC_ANY_CPU, 628 HNC_ACT_COLLAPSE); 629 if ((users == 0) && (rc == 0)) 630 blacklist_pending = BLACKLIST_ON_PENDING; 631 break; 632 } 633 case NAPI_EVT_USR_NORMAL: { 634 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); 635 /* 636 * Deserialization timeout is handled at hdd layer; 637 * just mark current mode to uninitialized to ensure 638 * it will be set when the delay is over 639 */ 640 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; 641 break; 642 } 643 default: { 644 HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", 645 __func__, event, (unsigned long) data); 646 break; 647 } /* default */ 648 }; /* switch */ 649 650 651 switch (blacklist_pending) { 652 case BLACKLIST_ON_PENDING: 653 /* assume the control of WLAN IRQs */ 654 hif_napi_cpu_blacklist(napid, BLACKLIST_ON); 655 break; 656 case BLACKLIST_OFF_PENDING: 657 /* yield the control of WLAN IRQs */ 658 hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); 659 break; 660 default: /* nothing to do */ 661 break; 662 } /* switch blacklist_pending */ 663 664 /* we want to perform the comparison in lock: 665 * there is a possiblity of hif_napi_event get called 666 * from two different contexts (driver unload and cpu hotplug 667 * notification) and napid->state get changed 668 * in driver unload context and can lead to race condition 669 * in cpu hotplug context. Therefore, perform the napid->state 670 * comparison before releasing lock. 671 */ 672 state_changed = (prev_state != napid->state); 673 qdf_spin_unlock_bh(&(napid->lock)); 674 675 if (state_changed) { 676 if (napid->state == ENABLE_NAPI_MASK) { 677 rc = 1; 678 for (i = 0; i < CE_COUNT_MAX; i++) { 679 struct qca_napi_info *napii = napid->napis[i]; 680 if (napii) { 681 napi = &(napii->napi); 682 NAPI_DEBUG("%s: enabling NAPI %d", 683 __func__, i); 684 napi_enable(napi); 685 } 686 } 687 } else { 688 rc = 0; 689 for (i = 0; i < CE_COUNT_MAX; i++) { 690 struct qca_napi_info *napii = napid->napis[i]; 691 if (napii) { 692 napi = &(napii->napi); 693 NAPI_DEBUG("%s: disabling NAPI %d", 694 __func__, i); 695 napi_disable(napi); 696 /* in case it is affined, remove it */ 697 qdf_dev_set_irq_affinity(napii->irq, 698 NULL); 699 } 700 } 701 } 702 } else { 703 HIF_DBG("%s: no change in hif napi state (still %d)", 704 __func__, prev_state); 705 } 706 707 NAPI_DEBUG("<--[rc=%d]", rc); 708 return rc; 709 } 710 qdf_export_symbol(hif_napi_event); 711 712 /** 713 * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not 714 * @hif: hif context 715 * @ce : CE instance (or -1, to check if any CEs are enabled) 716 * 717 * Return: bool 718 */ 719 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce) 720 { 721 int rc; 722 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 723 724 if (-1 == ce) 725 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK)); 726 else 727 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) && 728 (hif->napi_data.ce_map & (0x01 << ce))); 729 return rc; 730 } 731 qdf_export_symbol(hif_napi_enabled); 732 733 /** 734 * hif_napi_created() - checks whether NAPI is created for given ce or not 735 * @hif: hif context 736 * @ce : CE instance 737 * 738 * Return: bool 739 */ 740 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce) 741 { 742 int rc; 743 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 744 745 rc = (hif->napi_data.ce_map & (0x01 << ce)); 746 747 return !!rc; 748 } 749 qdf_export_symbol(hif_napi_created); 750 751 /** 752 * hif_napi_enable_irq() - enables bus interrupts after napi_complete 753 * 754 * @hif: hif context 755 * @id : id of NAPI instance calling this (used to determine the CE) 756 * 757 * Return: void 758 */ 759 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) 760 { 761 struct hif_softc *scn = HIF_GET_SOFTC(hif); 762 763 hif_irq_enable(scn, NAPI_ID2PIPE(id)); 764 } 765 766 767 /** 768 * hif_napi_schedule() - schedules napi, updates stats 769 * @scn: hif context 770 * @ce_id: index of napi instance 771 * 772 * Return: false if napi didn't enable or already scheduled, otherwise true 773 */ 774 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id) 775 { 776 int cpu = smp_processor_id(); 777 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 778 struct qca_napi_info *napii; 779 780 napii = scn->napi_data.napis[ce_id]; 781 if (qdf_unlikely(!napii)) { 782 HIF_ERROR("%s, scheduling unallocated napi (ce:%d)", 783 __func__, ce_id); 784 qdf_atomic_dec(&scn->active_tasklet_cnt); 785 return false; 786 } 787 788 if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) { 789 NAPI_DEBUG("napi scheduled, return"); 790 qdf_atomic_dec(&scn->active_tasklet_cnt); 791 return false; 792 } 793 794 hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE, 795 NULL, NULL, 0, 0); 796 napii->stats[cpu].napi_schedules++; 797 NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id); 798 napi_schedule(&(napii->napi)); 799 800 return true; 801 } 802 qdf_export_symbol(hif_napi_schedule); 803 804 /** 805 * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed 806 * @napi_info: pointer to qca_napi_info for the napi instance 807 * 808 * Return: true => interrupt already on correct cpu, no correction needed 809 * false => interrupt on wrong cpu, correction done for cpu affinity 810 * of the interrupt 811 */ 812 static inline 813 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info) 814 { 815 bool right_cpu = true; 816 int rc = 0; 817 int cpu; 818 struct qca_napi_data *napid; 819 QDF_STATUS ret; 820 821 napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx)); 822 823 if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) { 824 825 cpu = qdf_get_cpu(); 826 if (unlikely((hif_napi_cpu_blacklist(napid, 827 BLACKLIST_QUERY) > 0) && 828 (cpu != napi_info->cpu))) { 829 right_cpu = false; 830 831 NAPI_DEBUG("interrupt on wrong CPU, correcting"); 832 napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu); 833 834 irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0); 835 ret = qdf_dev_set_irq_affinity(napi_info->irq, 836 (struct qdf_cpu_mask *) 837 &napi_info->cpumask); 838 rc = qdf_status_to_os_return(ret); 839 irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING); 840 841 if (rc) 842 HIF_ERROR("error setting irq affinity hint: %d", 843 rc); 844 else 845 napi_info->stats[cpu].cpu_corrected++; 846 } 847 } 848 return right_cpu; 849 } 850 851 #ifdef RECEIVE_OFFLOAD 852 /** 853 * hif_napi_offld_flush_cb() - Call upper layer flush callback 854 * @napi_info: Handle to hif_napi_info 855 * 856 * Return: None 857 */ 858 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 859 { 860 if (napi_info->offld_flush_cb) 861 napi_info->offld_flush_cb(napi_info); 862 } 863 #else 864 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 865 { 866 } 867 #endif 868 869 /** 870 * hif_napi_poll() - NAPI poll routine 871 * @napi : pointer to NAPI struct as kernel holds it 872 * @budget: 873 * 874 * This is the body of the poll function. 875 * The poll function is called by kernel. So, there is a wrapper 876 * function in HDD, which in turn calls this function. 877 * Two main reasons why the whole thing is not implemented in HDD: 878 * a) references to things like ce_service that HDD is not aware of 879 * b) proximity to the implementation of ce_tasklet, which the body 880 * of this function should be very close to. 881 * 882 * NOTE TO THE MAINTAINER: 883 * Consider this function and ce_tasklet very tightly coupled pairs. 884 * Any changes to ce_tasklet or this function may likely need to be 885 * reflected in the counterpart. 886 * 887 * Returns: 888 * int: the amount of work done in this poll (<= budget) 889 */ 890 int hif_napi_poll(struct hif_opaque_softc *hif_ctx, 891 struct napi_struct *napi, 892 int budget) 893 { 894 int rc = 0; /* default: no work done, also takes care of error */ 895 int normalized = 0; 896 int bucket; 897 int cpu = smp_processor_id(); 898 bool poll_on_right_cpu; 899 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 900 struct qca_napi_info *napi_info; 901 struct CE_state *ce_state = NULL; 902 903 if (unlikely(NULL == hif)) { 904 HIF_ERROR("%s: hif context is NULL", __func__); 905 QDF_ASSERT(0); 906 goto out; 907 } 908 909 napi_info = (struct qca_napi_info *) 910 container_of(napi, struct qca_napi_info, napi); 911 912 NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)", 913 __func__, napi_info->id, napi_info->irq, budget); 914 915 napi_info->stats[cpu].napi_polls++; 916 917 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 918 NAPI_POLL_ENTER, NULL, NULL, cpu, 0); 919 920 rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id)); 921 NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs", 922 __func__, rc); 923 924 hif_napi_offld_flush_cb(napi_info); 925 926 /* do not return 0, if there was some work done, 927 * even if it is below the scale 928 */ 929 if (rc) { 930 napi_info->stats[cpu].napi_workdone += rc; 931 normalized = (rc / napi_info->scale); 932 if (normalized == 0) 933 normalized++; 934 bucket = (normalized - 1) / 935 (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS); 936 if (bucket >= QCA_NAPI_NUM_BUCKETS) { 937 bucket = QCA_NAPI_NUM_BUCKETS - 1; 938 HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)" 939 " normalized %d, napi budget %d", 940 bucket, QCA_NAPI_NUM_BUCKETS, 941 normalized, QCA_NAPI_BUDGET); 942 } 943 napi_info->stats[cpu].napi_budget_uses[bucket]++; 944 } else { 945 /* if ce_per engine reports 0, then poll should be terminated */ 946 NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI", 947 __func__, __LINE__); 948 } 949 950 ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)]; 951 952 /* 953 * Not using the API hif_napi_correct_cpu directly in the if statement 954 * below since the API may not get evaluated if put at the end if any 955 * prior condition would evaluate to be true. The CPU correction 956 * check should kick in every poll. 957 */ 958 #ifdef NAPI_YIELD_BUDGET_BASED 959 if (ce_state && (ce_state->force_break || 0 == rc)) { 960 #else 961 poll_on_right_cpu = hif_napi_correct_cpu(napi_info); 962 if ((ce_state) && 963 (!ce_check_rx_pending(ce_state) || (0 == rc) || 964 !poll_on_right_cpu)) { 965 #endif 966 napi_info->stats[cpu].napi_completes++; 967 #ifdef NAPI_YIELD_BUDGET_BASED 968 ce_state->force_break = 0; 969 #endif 970 971 hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE, 972 NULL, NULL, 0, 0); 973 if (normalized >= budget) 974 normalized = budget - 1; 975 976 napi_complete(napi); 977 /* enable interrupts */ 978 hif_napi_enable_irq(hif_ctx, napi_info->id); 979 /* support suspend/resume */ 980 qdf_atomic_dec(&(hif->active_tasklet_cnt)); 981 982 NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts", 983 __func__, __LINE__); 984 } else { 985 /* 4.4 kernel NAPI implementation requires drivers to 986 * return full work when they ask to be re-scheduled, 987 * or napi_complete and re-start with a fresh interrupt 988 */ 989 normalized = budget; 990 } 991 992 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 993 NAPI_POLL_EXIT, NULL, NULL, normalized, 0); 994 995 NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized); 996 return normalized; 997 out: 998 return rc; 999 } 1000 qdf_export_symbol(hif_napi_poll); 1001 1002 void hif_update_napi_max_poll_time(struct CE_state *ce_state, 1003 int ce_id, 1004 int cpu_id) 1005 { 1006 struct hif_softc *hif; 1007 struct qca_napi_info *napi_info; 1008 unsigned long long napi_poll_time = sched_clock() - 1009 ce_state->ce_service_start_time; 1010 1011 hif = ce_state->scn; 1012 napi_info = hif->napi_data.napis[ce_id]; 1013 if (napi_poll_time > 1014 napi_info->stats[cpu_id].napi_max_poll_time) 1015 napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time; 1016 } 1017 qdf_export_symbol(hif_update_napi_max_poll_time); 1018 1019 #ifdef HIF_IRQ_AFFINITY 1020 /** 1021 * 1022 * hif_napi_update_yield_stats() - update NAPI yield related stats 1023 * @cpu_id: CPU ID for which stats needs to be updates 1024 * @ce_id: Copy Engine ID for which yield stats needs to be updates 1025 * @time_limit_reached: indicates whether the time limit was reached 1026 * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached 1027 * 1028 * Return: None 1029 */ 1030 void hif_napi_update_yield_stats(struct CE_state *ce_state, 1031 bool time_limit_reached, 1032 bool rxpkt_thresh_reached) 1033 { 1034 struct hif_softc *hif; 1035 struct qca_napi_data *napi_data = NULL; 1036 int ce_id = 0; 1037 int cpu_id = 0; 1038 1039 if (unlikely(NULL == ce_state)) { 1040 QDF_ASSERT(NULL != ce_state); 1041 return; 1042 } 1043 1044 hif = ce_state->scn; 1045 1046 if (unlikely(NULL == hif)) { 1047 QDF_ASSERT(NULL != hif); 1048 return; 1049 } 1050 napi_data = &(hif->napi_data); 1051 if (unlikely(NULL == napi_data)) { 1052 QDF_ASSERT(NULL != napi_data); 1053 return; 1054 } 1055 1056 ce_id = ce_state->id; 1057 cpu_id = qdf_get_cpu(); 1058 1059 if (unlikely(!napi_data->napis[ce_id])) { 1060 HIF_INFO("%s: NAPI info is NULL for ce id: %d", 1061 __func__, ce_id); 1062 return; 1063 } 1064 1065 if (time_limit_reached) 1066 napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++; 1067 else 1068 napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++; 1069 1070 hif_update_napi_max_poll_time(ce_state, ce_id, 1071 cpu_id); 1072 } 1073 1074 /** 1075 * 1076 * hif_napi_stats() - display NAPI CPU statistics 1077 * @napid: pointer to qca_napi_data 1078 * 1079 * Description: 1080 * Prints the various CPU cores on which the NAPI instances /CEs interrupts 1081 * are being executed. Can be called from outside NAPI layer. 1082 * 1083 * Return: None 1084 */ 1085 void hif_napi_stats(struct qca_napi_data *napid) 1086 { 1087 int i; 1088 struct qca_napi_cpu *cpu; 1089 1090 if (napid == NULL) { 1091 qdf_debug("%s: napiid struct is null", __func__); 1092 return; 1093 } 1094 1095 cpu = napid->napi_cpu; 1096 qdf_debug("NAPI CPU TABLE"); 1097 qdf_debug("lilclhead=%d, bigclhead=%d", 1098 napid->lilcl_head, napid->bigcl_head); 1099 for (i = 0; i < NR_CPUS; i++) { 1100 qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d", 1101 i, 1102 cpu[i].state, cpu[i].core_id, cpu[i].cluster_id, 1103 cpu[i].core_mask.bits[0], 1104 cpu[i].thread_mask.bits[0], 1105 cpu[i].max_freq, cpu[i].napis, 1106 cpu[i].cluster_nxt); 1107 } 1108 } 1109 1110 #ifdef FEATURE_NAPI_DEBUG 1111 /* 1112 * Local functions 1113 * - no argument checks, all internal/trusted callers 1114 */ 1115 static void hnc_dump_cpus(struct qca_napi_data *napid) 1116 { 1117 hif_napi_stats(napid); 1118 } 1119 #else 1120 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; 1121 #endif /* FEATURE_NAPI_DEBUG */ 1122 /** 1123 * hnc_link_clusters() - partitions to cpu table into clusters 1124 * @napid: pointer to NAPI data 1125 * 1126 * Takes in a CPU topology table and builds two linked lists 1127 * (big cluster cores, list-head at bigcl_head, and little cluster 1128 * cores, list-head at lilcl_head) out of it. 1129 * 1130 * If there are more than two clusters: 1131 * - bigcl_head and lilcl_head will be different, 1132 * - the cluster with highest cpufreq will be considered the "big" cluster. 1133 * If there are more than one with the highest frequency, the *last* of such 1134 * clusters will be designated as the "big cluster" 1135 * - the cluster with lowest cpufreq will be considered the "li'l" cluster. 1136 * If there are more than one clusters with the lowest cpu freq, the *first* 1137 * of such clusters will be designated as the "little cluster" 1138 * - We only support up to 32 clusters 1139 * Return: 0 : OK 1140 * !0: error (at least one of lil/big clusters could not be found) 1141 */ 1142 #define HNC_MIN_CLUSTER 0 1143 #define HNC_MAX_CLUSTER 1 1144 static int hnc_link_clusters(struct qca_napi_data *napid) 1145 { 1146 int rc = 0; 1147 1148 int i; 1149 int it = 0; 1150 uint32_t cl_done = 0x0; 1151 int cl, curcl, curclhead = 0; 1152 int more; 1153 unsigned int lilfrq = INT_MAX; 1154 unsigned int bigfrq = 0; 1155 unsigned int clfrq = 0; 1156 int prev = 0; 1157 struct qca_napi_cpu *cpus = napid->napi_cpu; 1158 1159 napid->lilcl_head = napid->bigcl_head = -1; 1160 1161 do { 1162 more = 0; 1163 it++; curcl = -1; 1164 for (i = 0; i < NR_CPUS; i++) { 1165 cl = cpus[i].cluster_id; 1166 NAPI_DEBUG("Processing cpu[%d], cluster=%d\n", 1167 i, cl); 1168 if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) { 1169 NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl); 1170 /* continue if ASSERTs are disabled */ 1171 continue; 1172 }; 1173 if (cpumask_weight(&(cpus[i].core_mask)) == 0) { 1174 NAPI_DEBUG("Core mask 0. SKIPPED\n"); 1175 continue; 1176 } 1177 if (cl_done & (0x01 << cl)) { 1178 NAPI_DEBUG("Cluster already processed. SKIPPED\n"); 1179 continue; 1180 } else { 1181 if (more == 0) { 1182 more = 1; 1183 curcl = cl; 1184 curclhead = i; /* row */ 1185 clfrq = cpus[i].max_freq; 1186 prev = -1; 1187 }; 1188 if ((curcl >= 0) && (curcl != cl)) { 1189 NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n", 1190 cl, curcl); 1191 continue; 1192 } 1193 if (cpus[i].max_freq != clfrq) 1194 NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n", 1195 cpus[i].max_freq, clfrq); 1196 if (clfrq >= bigfrq) { 1197 bigfrq = clfrq; 1198 napid->bigcl_head = curclhead; 1199 NAPI_DEBUG("bigcl=%d\n", curclhead); 1200 } 1201 if (clfrq < lilfrq) { 1202 lilfrq = clfrq; 1203 napid->lilcl_head = curclhead; 1204 NAPI_DEBUG("lilcl=%d\n", curclhead); 1205 } 1206 if (prev != -1) 1207 cpus[prev].cluster_nxt = i; 1208 1209 prev = i; 1210 } 1211 } 1212 if (curcl >= 0) 1213 cl_done |= (0x01 << curcl); 1214 1215 } while (more); 1216 1217 if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0))) 1218 rc = -EFAULT; 1219 1220 hnc_dump_cpus(napid); /* if NAPI_DEBUG */ 1221 return rc; 1222 } 1223 #undef HNC_MIN_CLUSTER 1224 #undef HNC_MAX_CLUSTER 1225 1226 /* 1227 * hotplug function group 1228 */ 1229 1230 /** 1231 * hnc_cpu_online_cb() - handles CPU hotplug "up" events 1232 * @context: the associated HIF context 1233 * @cpu: the CPU Id of the CPU the event happened on 1234 * 1235 * Return: None 1236 */ 1237 static void hnc_cpu_online_cb(void *context, uint32_t cpu) 1238 { 1239 struct hif_softc *hif = context; 1240 struct qca_napi_data *napid = &hif->napi_data; 1241 1242 if (cpu >= NR_CPUS) 1243 return; 1244 1245 NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu); 1246 1247 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; 1248 NAPI_DEBUG("%s: CPU %u marked %d", 1249 __func__, cpu, napid->napi_cpu[cpu].state); 1250 1251 NAPI_DEBUG("<--%s", __func__); 1252 } 1253 1254 /** 1255 * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events 1256 * @context: the associated HIF context 1257 * @cpu: the CPU Id of the CPU the event happened on 1258 * 1259 * On transtion to offline, we act on PREP events, because we may need to move 1260 * the irqs/NAPIs to another CPU before it is actually off-lined. 1261 * 1262 * Return: None 1263 */ 1264 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu) 1265 { 1266 struct hif_softc *hif = context; 1267 struct qca_napi_data *napid = &hif->napi_data; 1268 1269 if (cpu >= NR_CPUS) 1270 return; 1271 1272 NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu); 1273 1274 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN; 1275 1276 NAPI_DEBUG("%s: CPU %u marked %d; updating affinity", 1277 __func__, cpu, napid->napi_cpu[cpu].state); 1278 1279 /** 1280 * we need to move any NAPIs on this CPU out. 1281 * if we are in LO throughput mode, then this is valid 1282 * if the CPU is the the low designated CPU. 1283 */ 1284 hif_napi_event(GET_HIF_OPAQUE_HDL(hif), 1285 NAPI_EVT_CPU_STATE, 1286 (void *) 1287 ((size_t)cpu << 16 | napid->napi_cpu[cpu].state)); 1288 1289 NAPI_DEBUG("<--%s", __func__); 1290 } 1291 1292 static int hnc_hotplug_register(struct hif_softc *hif_sc) 1293 { 1294 QDF_STATUS status; 1295 1296 NAPI_DEBUG("-->%s", __func__); 1297 1298 status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler, 1299 hif_sc, 1300 hnc_cpu_online_cb, 1301 hnc_cpu_before_offline_cb); 1302 1303 NAPI_DEBUG("<--%s [%d]", __func__, status); 1304 1305 return qdf_status_to_os_return(status); 1306 } 1307 1308 static void hnc_hotplug_unregister(struct hif_softc *hif_sc) 1309 { 1310 NAPI_DEBUG("-->%s", __func__); 1311 1312 if (hif_sc->napi_data.cpuhp_handler) 1313 qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler); 1314 1315 NAPI_DEBUG("<--%s", __func__); 1316 } 1317 1318 /** 1319 * hnc_install_tput() - installs a callback in the throughput detector 1320 * @register: !0 => register; =0: unregister 1321 * 1322 * installs a callback to be called when wifi driver throughput (tx+rx) 1323 * crosses a threshold. Currently, we are using the same criteria as 1324 * TCP ack suppression (500 packets/100ms by default). 1325 * 1326 * Return: 0 : success 1327 * <0: failure 1328 */ 1329 1330 static int hnc_tput_hook(int install) 1331 { 1332 int rc = 0; 1333 1334 /* 1335 * Nothing, until the bw_calculation accepts registration 1336 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk 1337 * hdd_napi_throughput_policy(...) 1338 */ 1339 return rc; 1340 } 1341 1342 /* 1343 * Implementation of hif_napi_cpu API 1344 */ 1345 1346 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) 1347 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1348 { 1349 cpumask_copy(&(cpus[i].thread_mask), 1350 topology_sibling_cpumask(i)); 1351 } 1352 #else 1353 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1354 { 1355 } 1356 #endif 1357 1358 1359 /** 1360 * hif_napi_cpu_init() - initialization of irq affinity block 1361 * @ctx: pointer to qca_napi_data 1362 * 1363 * called by hif_napi_create, after the first instance is called 1364 * - builds napi_rss_cpus table from cpu topology 1365 * - links cores of the same clusters together 1366 * - installs hot-plug notifier 1367 * - installs throughput trigger notifier (when such mechanism exists) 1368 * 1369 * Return: 0: OK 1370 * <0: error code 1371 */ 1372 int hif_napi_cpu_init(struct hif_opaque_softc *hif) 1373 { 1374 int rc = 0; 1375 int i; 1376 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1377 struct qca_napi_cpu *cpus = napid->napi_cpu; 1378 1379 NAPI_DEBUG("--> "); 1380 1381 if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) { 1382 NAPI_DEBUG("NAPI RSS table already initialized.\n"); 1383 rc = -EALREADY; 1384 goto lab_rss_init; 1385 } 1386 1387 /* build CPU topology table */ 1388 for_each_possible_cpu(i) { 1389 cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask) 1390 ? QCA_NAPI_CPU_UP 1391 : QCA_NAPI_CPU_DOWN)); 1392 cpus[i].core_id = topology_core_id(i); 1393 cpus[i].cluster_id = topology_physical_package_id(i); 1394 cpumask_copy(&(cpus[i].core_mask), 1395 topology_core_cpumask(i)); 1396 record_sibling_cpumask(cpus, i); 1397 cpus[i].max_freq = cpufreq_quick_get_max(i); 1398 cpus[i].napis = 0x0; 1399 cpus[i].cluster_nxt = -1; /* invalid */ 1400 } 1401 1402 /* link clusters together */ 1403 rc = hnc_link_clusters(napid); 1404 if (0 != rc) 1405 goto lab_err_topology; 1406 1407 /* install hotplug notifier */ 1408 rc = hnc_hotplug_register(HIF_GET_SOFTC(hif)); 1409 if (0 != rc) 1410 goto lab_err_hotplug; 1411 1412 /* install throughput notifier */ 1413 rc = hnc_tput_hook(1); 1414 if (0 == rc) 1415 goto lab_rss_init; 1416 1417 lab_err_hotplug: 1418 hnc_tput_hook(0); 1419 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1420 lab_err_topology: 1421 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1422 lab_rss_init: 1423 NAPI_DEBUG("<-- [rc=%d]", rc); 1424 return rc; 1425 } 1426 1427 /** 1428 * hif_napi_cpu_deinit() - clean-up of irq affinity block 1429 * 1430 * called by hif_napi_destroy, when the last instance is removed 1431 * - uninstalls throughput and hotplug notifiers 1432 * - clears cpu topology table 1433 * Return: 0: OK 1434 */ 1435 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) 1436 { 1437 int rc = 0; 1438 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1439 1440 NAPI_DEBUG("-->%s(...)", __func__); 1441 1442 /* uninstall tput notifier */ 1443 rc = hnc_tput_hook(0); 1444 1445 /* uninstall hotplug notifier */ 1446 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1447 1448 /* clear the topology table */ 1449 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1450 1451 NAPI_DEBUG("<--%s[rc=%d]", __func__, rc); 1452 1453 return rc; 1454 } 1455 1456 /** 1457 * hncm_migrate_to() - migrates a NAPI to a CPU 1458 * @napid: pointer to NAPI block 1459 * @ce_id: CE_id of the NAPI instance 1460 * @didx : index in the CPU topology table for the CPU to migrate to 1461 * 1462 * Migrates NAPI (identified by the CE_id) to the destination core 1463 * Updates the napi_map of the destination entry 1464 * 1465 * Return: 1466 * =0 : success 1467 * <0 : error 1468 */ 1469 static int hncm_migrate_to(struct qca_napi_data *napid, 1470 int napi_ce, 1471 int didx) 1472 { 1473 int rc = 0; 1474 QDF_STATUS status; 1475 1476 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); 1477 1478 if (!napid->napis[napi_ce]) 1479 return -EINVAL; 1480 1481 napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx); 1482 1483 irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0); 1484 status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq, 1485 (struct qdf_cpu_mask *) 1486 &napid->napis[napi_ce]->cpumask); 1487 rc = qdf_status_to_os_return(status); 1488 1489 /* unmark the napis bitmap in the cpu table */ 1490 napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce); 1491 /* mark the napis bitmap for the new designated cpu */ 1492 napid->napi_cpu[didx].napis |= (0x01 << napi_ce); 1493 napid->napis[napi_ce]->cpu = didx; 1494 1495 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1496 return rc; 1497 } 1498 /** 1499 * hncm_dest_cpu() - finds a destination CPU for NAPI 1500 * @napid: pointer to NAPI block 1501 * @act : RELOCATE | COLLAPSE | DISPERSE 1502 * 1503 * Finds the designated destionation for the next IRQ. 1504 * RELOCATE: translated to either COLLAPSE or DISPERSE based 1505 * on napid->napi_mode (throughput state) 1506 * COLLAPSE: All have the same destination: the first online CPU in lilcl 1507 * DISPERSE: One of the CPU in bigcl, which has the smallest number of 1508 * NAPIs on it 1509 * 1510 * Return: >=0 : index in the cpu topology table 1511 * : < 0 : error 1512 */ 1513 static int hncm_dest_cpu(struct qca_napi_data *napid, int act) 1514 { 1515 int destidx = -1; 1516 int head, i; 1517 1518 NAPI_DEBUG("-->%s(act=%d)", __func__, act); 1519 if (act == HNC_ACT_RELOCATE) { 1520 if (napid->napi_mode == QCA_NAPI_TPUT_LO) 1521 act = HNC_ACT_COLLAPSE; 1522 else 1523 act = HNC_ACT_DISPERSE; 1524 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", 1525 __func__, act); 1526 } 1527 if (act == HNC_ACT_COLLAPSE) { 1528 head = i = napid->lilcl_head; 1529 retry_collapse: 1530 while (i >= 0) { 1531 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { 1532 destidx = i; 1533 break; 1534 } 1535 i = napid->napi_cpu[i].cluster_nxt; 1536 } 1537 if ((destidx < 0) && (head == napid->lilcl_head)) { 1538 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", 1539 __func__); 1540 head = i = napid->bigcl_head; 1541 goto retry_collapse; 1542 } 1543 } else { /* HNC_ACT_DISPERSE */ 1544 int smallest = 99; /* all 32 bits full */ 1545 int smallidx = -1; 1546 1547 head = i = napid->bigcl_head; 1548 retry_disperse: 1549 while (i >= 0) { 1550 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && 1551 (hweight32(napid->napi_cpu[i].napis) <= smallest)) { 1552 smallest = napid->napi_cpu[i].napis; 1553 smallidx = i; 1554 } 1555 i = napid->napi_cpu[i].cluster_nxt; 1556 } 1557 /* Check if matches with user sepecified CPU mask */ 1558 smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ? 1559 smallidx : -1; 1560 1561 if ((smallidx < 0) && (head == napid->bigcl_head)) { 1562 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", 1563 __func__); 1564 head = i = napid->lilcl_head; 1565 goto retry_disperse; 1566 } 1567 destidx = smallidx; 1568 } 1569 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); 1570 return destidx; 1571 } 1572 /** 1573 * hif_napi_cpu_migrate() - migrate IRQs away 1574 * @cpu: -1: all CPUs <n> specific CPU 1575 * @act: COLLAPSE | DISPERSE 1576 * 1577 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible 1578 * cores. Eligible cores are: 1579 * act=COLLAPSE -> the first online core of the little cluster 1580 * act=DISPERSE -> separate cores of the big cluster, so that each core will 1581 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) 1582 * 1583 * Note that this function is called with a spinlock acquired already. 1584 * 1585 * Return: =0: success 1586 * <0: error 1587 */ 1588 1589 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) 1590 { 1591 int rc = 0; 1592 struct qca_napi_cpu *cpup; 1593 int i, dind; 1594 uint32_t napis; 1595 1596 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", 1597 __func__, cpu, action); 1598 /* the following is really: hif_napi_enabled() with less overhead */ 1599 if (napid->ce_map == 0) { 1600 NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__); 1601 goto hncm_return; 1602 } 1603 1604 cpup = napid->napi_cpu; 1605 1606 switch (action) { 1607 case HNC_ACT_RELOCATE: 1608 case HNC_ACT_DISPERSE: 1609 case HNC_ACT_COLLAPSE: { 1610 /* first find the src napi set */ 1611 if (cpu == HNC_ANY_CPU) 1612 napis = napid->ce_map; 1613 else 1614 napis = cpup[cpu].napis; 1615 /* then clear the napi bitmap on each CPU */ 1616 for (i = 0; i < NR_CPUS; i++) 1617 cpup[i].napis = 0; 1618 /* then for each of the NAPIs to disperse: */ 1619 for (i = 0; i < CE_COUNT_MAX; i++) 1620 if (napis & (1 << i)) { 1621 /* find a destination CPU */ 1622 dind = hncm_dest_cpu(napid, action); 1623 if (dind >= 0) { 1624 NAPI_DEBUG("Migrating NAPI ce%d to %d", 1625 i, dind); 1626 rc = hncm_migrate_to(napid, i, dind); 1627 } else { 1628 NAPI_DEBUG("No dest for NAPI ce%d", i); 1629 hnc_dump_cpus(napid); 1630 rc = -1; 1631 } 1632 } 1633 break; 1634 } 1635 default: { 1636 NAPI_DEBUG("%s: bad action: %d\n", __func__, action); 1637 QDF_BUG(0); 1638 break; 1639 } 1640 } /* switch action */ 1641 1642 hncm_return: 1643 hnc_dump_cpus(napid); 1644 return rc; 1645 } 1646 1647 1648 /** 1649 * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting 1650 * @napid: pointer to qca_napi_data structure 1651 * @bl_flag: blacklist flag to enable/disable blacklisting 1652 * 1653 * The function enables/disables blacklisting for all the copy engine 1654 * interrupts on which NAPI is enabled. 1655 * 1656 * Return: None 1657 */ 1658 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag) 1659 { 1660 int i; 1661 struct qca_napi_info *napii; 1662 1663 for (i = 0; i < CE_COUNT_MAX; i++) { 1664 /* check if NAPI is enabled on the CE */ 1665 if (!(napid->ce_map & (0x01 << i))) 1666 continue; 1667 1668 /*double check that NAPI is allocated for the CE */ 1669 napii = napid->napis[i]; 1670 if (!(napii)) 1671 continue; 1672 1673 if (bl_flag == true) 1674 irq_modify_status(napii->irq, 1675 0, IRQ_NO_BALANCING); 1676 else 1677 irq_modify_status(napii->irq, 1678 IRQ_NO_BALANCING, 0); 1679 HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); 1680 } 1681 } 1682 1683 #ifdef CONFIG_SCHED_CORE_CTL 1684 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ 1685 static inline int hif_napi_core_ctl_set_boost(bool boost) 1686 { 1687 return core_ctl_set_boost(boost); 1688 } 1689 #else 1690 static inline int hif_napi_core_ctl_set_boost(bool boost) 1691 { 1692 return 0; 1693 } 1694 #endif 1695 /** 1696 * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. 1697 * @napid: pointer to qca_napi_data structure 1698 * @op: blacklist operation to perform 1699 * 1700 * The function enables/disables/queries blacklisting for all CE RX 1701 * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables 1702 * core_ctl_set_boost. 1703 * Once blacklisting is enabled, the interrupts will not be managed by the IRQ 1704 * balancer. 1705 * 1706 * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled 1707 * for BLACKLIST_QUERY op - blacklist refcount 1708 * for BLACKLIST_ON op - return value from core_ctl_set_boost API 1709 * for BLACKLIST_OFF op - return value from core_ctl_set_boost API 1710 */ 1711 int hif_napi_cpu_blacklist(struct qca_napi_data *napid, 1712 enum qca_blacklist_op op) 1713 { 1714 int rc = 0; 1715 static int ref_count; /* = 0 by the compiler */ 1716 uint8_t flags = napid->flags; 1717 bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; 1718 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; 1719 1720 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); 1721 1722 if (!(bl_en && ccb_en)) { 1723 rc = -EINVAL; 1724 goto out; 1725 } 1726 1727 switch (op) { 1728 case BLACKLIST_QUERY: 1729 rc = ref_count; 1730 break; 1731 case BLACKLIST_ON: 1732 ref_count++; 1733 rc = 0; 1734 if (ref_count == 1) { 1735 rc = hif_napi_core_ctl_set_boost(true); 1736 NAPI_DEBUG("boost_on() returns %d - refcnt=%d", 1737 rc, ref_count); 1738 hif_napi_bl_irq(napid, true); 1739 } 1740 break; 1741 case BLACKLIST_OFF: 1742 if (ref_count) { 1743 ref_count--; 1744 rc = 0; 1745 if (ref_count == 0) { 1746 rc = hif_napi_core_ctl_set_boost(false); 1747 NAPI_DEBUG("boost_off() returns %d - refcnt=%d", 1748 rc, ref_count); 1749 hif_napi_bl_irq(napid, false); 1750 } 1751 } 1752 break; 1753 default: 1754 NAPI_DEBUG("Invalid blacklist op: %d", op); 1755 rc = -EINVAL; 1756 } /* switch */ 1757 out: 1758 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1759 return rc; 1760 } 1761 1762 /** 1763 * hif_napi_serialize() - [de-]serialize NAPI operations 1764 * @hif: context 1765 * @is_on: 1: serialize, 0: deserialize 1766 * 1767 * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the 1768 * following steps (see hif_napi_event for code): 1769 * - put irqs of all NAPI instances on the same CPU 1770 * - only for the first serialize call: blacklist 1771 * 1772 * hif_napi_serialize(hif, 0): 1773 * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec) 1774 * - at the end of the timer, check the current throughput state and 1775 * implement it. 1776 */ 1777 static unsigned long napi_serialize_reqs; 1778 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) 1779 { 1780 int rc = -EINVAL; 1781 1782 if (hif != NULL) 1783 switch (is_on) { 1784 case 0: { /* de-serialize */ 1785 rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL, 1786 (void *) 0); 1787 napi_serialize_reqs = 0; 1788 break; 1789 } /* end de-serialize */ 1790 case 1: { /* serialize */ 1791 rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL, 1792 (void *)napi_serialize_reqs++); 1793 break; 1794 } /* end serialize */ 1795 default: 1796 break; /* no-op */ 1797 } /* switch */ 1798 return rc; 1799 } 1800 1801 #endif /* ifdef HIF_IRQ_AFFINITY */ 1802