1 /* 2 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: hif_napi.c 21 * 22 * HIF NAPI interface implementation 23 */ 24 25 #include <linux/string.h> /* memset */ 26 27 /* Linux headers */ 28 #include <linux/cpumask.h> 29 #include <linux/cpufreq.h> 30 #include <linux/cpu.h> 31 #include <linux/topology.h> 32 #include <linux/interrupt.h> 33 #include <linux/irq.h> 34 #ifdef CONFIG_SCHED_CORE_CTL 35 #include <linux/sched/core_ctl.h> 36 #endif 37 #include <pld_common.h> 38 #include <linux/pm.h> 39 40 /* Driver headers */ 41 #include <hif_napi.h> 42 #include <hif_debug.h> 43 #include <hif_io32.h> 44 #include <ce_api.h> 45 #include <ce_internal.h> 46 #include <hif_irq_affinity.h> 47 #include "qdf_cpuhp.h" 48 #include "qdf_module.h" 49 #include "qdf_net_if.h" 50 #include "qdf_dev.h" 51 52 enum napi_decision_vector { 53 HIF_NAPI_NOEVENT = 0, 54 HIF_NAPI_INITED = 1, 55 HIF_NAPI_CONF_UP = 2 56 }; 57 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP) 58 59 #ifdef RECEIVE_OFFLOAD 60 /** 61 * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI 62 * @napi: Rx_thread NAPI 63 * @budget: NAPI BUDGET 64 * 65 * Return: 0 as it is not supposed to be polled at all as it is not scheduled. 66 */ 67 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget) 68 { 69 HIF_ERROR("This napi_poll should not be polled as we don't schedule it"); 70 QDF_ASSERT(0); 71 return 0; 72 } 73 74 /** 75 * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI 76 * @napii: Handle to napi_info holding rx_thread napi 77 * 78 * Return: None 79 */ 80 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 81 { 82 init_dummy_netdev(&napii->rx_thread_netdev); 83 netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi, 84 hif_rxthread_napi_poll, 64); 85 napi_enable(&napii->rx_thread_napi); 86 } 87 88 /** 89 * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI 90 * @napii: Handle to napi_info holding rx_thread napi 91 * 92 * Return: None 93 */ 94 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 95 { 96 netif_napi_del(&napii->rx_thread_napi); 97 } 98 #else /* RECEIVE_OFFLOAD */ 99 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 100 { 101 } 102 103 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 104 { 105 } 106 #endif 107 108 /** 109 * hif_napi_create() - creates the NAPI structures for a given CE 110 * @hif : pointer to hif context 111 * @pipe_id: the CE id on which the instance will be created 112 * @poll : poll function to be used for this NAPI instance 113 * @budget : budget to be registered with the NAPI instance 114 * @scale : scale factor on the weight (to scaler budget to 1000) 115 * @flags : feature flags 116 * 117 * Description: 118 * Creates NAPI instances. This function is called 119 * unconditionally during initialization. It creates 120 * napi structures through the proper HTC/HIF calls. 121 * The structures are disabled on creation. 122 * Note that for each NAPI instance a separate dummy netdev is used 123 * 124 * Return: 125 * < 0: error 126 * = 0: <should never happen> 127 * > 0: id of the created object (for multi-NAPI, number of objects created) 128 */ 129 int hif_napi_create(struct hif_opaque_softc *hif_ctx, 130 int (*poll)(struct napi_struct *, int), 131 int budget, 132 int scale, 133 uint8_t flags) 134 { 135 int i; 136 struct qca_napi_data *napid; 137 struct qca_napi_info *napii; 138 struct CE_state *ce_state; 139 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 140 int rc = 0; 141 142 NAPI_DEBUG("-->(budget=%d, scale=%d)", 143 budget, scale); 144 NAPI_DEBUG("hif->napi_data.state = 0x%08x", 145 hif->napi_data.state); 146 NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x", 147 hif->napi_data.ce_map); 148 149 napid = &(hif->napi_data); 150 if (0 == (napid->state & HIF_NAPI_INITED)) { 151 memset(napid, 0, sizeof(struct qca_napi_data)); 152 qdf_spinlock_create(&(napid->lock)); 153 154 napid->state |= HIF_NAPI_INITED; 155 napid->flags = flags; 156 157 rc = hif_napi_cpu_init(hif_ctx); 158 if (rc != 0 && rc != -EALREADY) { 159 HIF_ERROR("NAPI_initialization failed,. %d", rc); 160 rc = napid->ce_map; 161 goto hnc_err; 162 } else 163 rc = 0; 164 165 HIF_DBG("%s: NAPI structures initialized, rc=%d", 166 __func__, rc); 167 } 168 for (i = 0; i < hif->ce_count; i++) { 169 ce_state = hif->ce_id_to_state[i]; 170 NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d", 171 i, ce_state->htt_rx_data, 172 ce_state->htt_tx_data); 173 if (ce_srng_based(hif)) 174 continue; 175 176 if (!ce_state->htt_rx_data) 177 continue; 178 179 /* Now this is a CE where we need NAPI on */ 180 NAPI_DEBUG("Creating NAPI on pipe %d", i); 181 napii = qdf_mem_malloc(sizeof(*napii)); 182 napid->napis[i] = napii; 183 if (!napii) { 184 rc = -ENOMEM; 185 goto napii_free; 186 } 187 } 188 189 for (i = 0; i < hif->ce_count; i++) { 190 napii = napid->napis[i]; 191 if (!napii) 192 continue; 193 194 NAPI_DEBUG("initializing NAPI for pipe %d", i); 195 memset(napii, 0, sizeof(struct qca_napi_info)); 196 napii->scale = scale; 197 napii->id = NAPI_PIPE2ID(i); 198 napii->hif_ctx = hif_ctx; 199 napii->irq = pld_get_irq(hif->qdf_dev->dev, i); 200 201 if (napii->irq < 0) 202 HIF_WARN("%s: bad IRQ value for CE %d: %d", 203 __func__, i, napii->irq); 204 205 init_dummy_netdev(&(napii->netdev)); 206 207 NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)", 208 &(napii->napi), &(napii->netdev), poll, budget); 209 netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget); 210 211 NAPI_DEBUG("after napi_add"); 212 NAPI_DEBUG("napi=0x%pK, netdev=0x%pK", 213 &(napii->napi), &(napii->netdev)); 214 NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK", 215 napii->napi.dev_list.prev, 216 napii->napi.dev_list.next); 217 NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK", 218 napii->netdev.napi_list.prev, 219 napii->netdev.napi_list.next); 220 221 hif_init_rx_thread_napi(napii); 222 napii->lro_ctx = qdf_lro_init(); 223 NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n", 224 i, napii->id, napii->lro_ctx); 225 226 /* It is OK to change the state variable below without 227 * protection as there should be no-one around yet 228 */ 229 napid->ce_map |= (0x01 << i); 230 HIF_DBG("%s: NAPI id %d created for pipe %d", __func__, 231 napii->id, i); 232 } 233 234 /* no ces registered with the napi */ 235 if (!ce_srng_based(hif) && napid->ce_map == 0) { 236 HIF_WARN("%s: no napis created for copy engines", __func__); 237 rc = -EFAULT; 238 goto napii_free; 239 } 240 241 NAPI_DEBUG("napi map = %x", napid->ce_map); 242 NAPI_DEBUG("NAPI ids created for all applicable pipes"); 243 return napid->ce_map; 244 245 napii_free: 246 for (i = 0; i < hif->ce_count; i++) { 247 napii = napid->napis[i]; 248 napid->napis[i] = NULL; 249 if (napii) 250 qdf_mem_free(napii); 251 } 252 253 hnc_err: 254 NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map); 255 return rc; 256 } 257 qdf_export_symbol(hif_napi_create); 258 259 #ifdef RECEIVE_OFFLOAD 260 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, 261 void (offld_flush_handler)(void *)) 262 { 263 int i; 264 struct CE_state *ce_state; 265 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 266 struct qca_napi_data *napid; 267 struct qca_napi_info *napii; 268 269 if (!scn) { 270 HIF_ERROR("%s: hif_state NULL!", __func__); 271 QDF_ASSERT(0); 272 return; 273 } 274 275 napid = hif_napi_get_all(hif_hdl); 276 for (i = 0; i < scn->ce_count; i++) { 277 ce_state = scn->ce_id_to_state[i]; 278 if (ce_state && (ce_state->htt_rx_data)) { 279 napii = napid->napis[i]; 280 napii->offld_flush_cb = offld_flush_handler; 281 HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK\n", 282 i, napii->id, napii->offld_flush_cb); 283 } 284 } 285 } 286 287 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl) 288 { 289 int i; 290 struct CE_state *ce_state; 291 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 292 struct qca_napi_data *napid; 293 struct qca_napi_info *napii; 294 295 if (!scn) { 296 HIF_ERROR("%s: hif_state NULL!", __func__); 297 QDF_ASSERT(0); 298 return; 299 } 300 301 napid = hif_napi_get_all(hif_hdl); 302 for (i = 0; i < scn->ce_count; i++) { 303 ce_state = scn->ce_id_to_state[i]; 304 if (ce_state && (ce_state->htt_rx_data)) { 305 napii = napid->napis[i]; 306 HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n", 307 i, napii->id, napii->offld_flush_cb); 308 /* Not required */ 309 napii->offld_flush_cb = NULL; 310 } 311 } 312 } 313 #endif /* RECEIVE_OFFLOAD */ 314 315 /** 316 * 317 * hif_napi_destroy() - destroys the NAPI structures for a given instance 318 * @hif : pointer to hif context 319 * @ce_id : the CE id whose napi instance will be destroyed 320 * @force : if set, will destroy even if entry is active (de-activates) 321 * 322 * Description: 323 * Destroy a given NAPI instance. This function is called 324 * unconditionally during cleanup. 325 * Refuses to destroy an entry of it is still enabled (unless force=1) 326 * Marks the whole napi_data invalid if all instances are destroyed. 327 * 328 * Return: 329 * -EINVAL: specific entry has not been created 330 * -EPERM : specific entry is still active 331 * 0 < : error 332 * 0 = : success 333 */ 334 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx, 335 uint8_t id, 336 int force) 337 { 338 uint8_t ce = NAPI_ID2PIPE(id); 339 int rc = 0; 340 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 341 342 NAPI_DEBUG("-->(id=%d, force=%d)", id, force); 343 344 if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) { 345 HIF_ERROR("%s: NAPI not initialized or entry %d not created", 346 __func__, id); 347 rc = -EINVAL; 348 } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) { 349 HIF_ERROR("%s: NAPI instance %d (pipe %d) not created", 350 __func__, id, ce); 351 if (hif->napi_data.napis[ce]) 352 HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)", 353 __func__, id, ce); 354 rc = -EINVAL; 355 } else { 356 struct qca_napi_data *napid; 357 struct qca_napi_info *napii; 358 359 napid = &(hif->napi_data); 360 napii = napid->napis[ce]; 361 if (!napii) { 362 if (napid->ce_map & (0x01 << ce)) 363 HIF_ERROR("%s: napii & ce_map out of sync(ce %d)", 364 __func__, ce); 365 return -EINVAL; 366 } 367 368 369 if (hif->napi_data.state == HIF_NAPI_CONF_UP) { 370 if (force) { 371 napi_disable(&(napii->napi)); 372 HIF_DBG("%s: NAPI entry %d force disabled", 373 __func__, id); 374 NAPI_DEBUG("NAPI %d force disabled", id); 375 } else { 376 HIF_ERROR("%s: Cannot destroy active NAPI %d", 377 __func__, id); 378 rc = -EPERM; 379 } 380 } 381 if (0 == rc) { 382 NAPI_DEBUG("before napi_del"); 383 NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK", 384 napii->napi.dev_list.prev, 385 napii->napi.dev_list.next); 386 NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK", 387 napii->netdev.napi_list.prev, 388 napii->netdev.napi_list.next); 389 390 qdf_lro_deinit(napii->lro_ctx); 391 netif_napi_del(&(napii->napi)); 392 hif_deinit_rx_thread_napi(napii); 393 394 napid->ce_map &= ~(0x01 << ce); 395 napid->napis[ce] = NULL; 396 napii->scale = 0; 397 qdf_mem_free(napii); 398 HIF_DBG("%s: NAPI %d destroyed\n", __func__, id); 399 400 /* if there are no active instances and 401 * if they are all destroyed, 402 * set the whole structure to uninitialized state 403 */ 404 if (napid->ce_map == 0) { 405 rc = hif_napi_cpu_deinit(hif_ctx); 406 /* caller is tolerant to receiving !=0 rc */ 407 408 qdf_spinlock_destroy(&(napid->lock)); 409 memset(napid, 410 0, sizeof(struct qca_napi_data)); 411 HIF_DBG("%s: no NAPI instances. Zapped.", 412 __func__); 413 } 414 } 415 } 416 417 return rc; 418 } 419 qdf_export_symbol(hif_napi_destroy); 420 421 #ifdef FEATURE_LRO 422 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id) 423 { 424 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 425 struct qca_napi_data *napid; 426 struct qca_napi_info *napii; 427 428 napid = &(scn->napi_data); 429 napii = napid->napis[NAPI_ID2PIPE(napi_id)]; 430 431 if (napii) 432 return napii->lro_ctx; 433 return 0; 434 } 435 #endif 436 437 /** 438 * 439 * hif_napi_get_all() - returns the address of the whole HIF NAPI structure 440 * @hif: pointer to hif context 441 * 442 * Description: 443 * Returns the address of the whole structure 444 * 445 * Return: 446 * <addr>: address of the whole HIF NAPI structure 447 */ 448 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx) 449 { 450 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 451 452 return &(hif->napi_data); 453 } 454 455 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid) 456 { 457 int id = NAPI_ID2PIPE(napi_id); 458 459 return napid->napis[id]; 460 } 461 462 /** 463 * 464 * hif_napi_event() - reacts to events that impact NAPI 465 * @hif : pointer to hif context 466 * @evnt: event that has been detected 467 * @data: more data regarding the event 468 * 469 * Description: 470 * This function handles two types of events: 471 * 1- Events that change the state of NAPI (enabled/disabled): 472 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} 473 * The state is retrievable by "hdd_napi_enabled(-1)" 474 * - NAPI will be on if either INI file is on and it has not been disabled 475 * by a subsequent vendor CMD, 476 * or it has been enabled by a vendor CMD. 477 * 2- Events that change the CPU affinity of a NAPI instance/IRQ: 478 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} 479 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode 480 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() 481 * - In LO tput mode, NAPI will yield control if its interrupts to the system 482 * management functions. However in HI throughput mode, NAPI will actively 483 * manage its interrupts/instances (by trying to disperse them out to 484 * separate performance cores). 485 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. 486 * 487 * + In some cases (roaming peer management is the only case so far), a 488 * a client can trigger a "SERIALIZE" event. Basically, this means that the 489 * users is asking NAPI to go into a truly single execution context state. 490 * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, 491 * (if called for the first time) and then moves all IRQs (for NAPI 492 * instances) to be collapsed to a single core. If called multiple times, 493 * it will just re-collapse the CPUs. This is because blacklist-on() API 494 * is reference-counted, and because the API has already been called. 495 * 496 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go 497 * to its "normal" operation. Optionally, they can give a timeout value (in 498 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this 499 * case, NAPI will just set the current throughput state to uninitialized 500 * and set the delay period. Once policy handler is called, it would skip 501 * applying the policy delay period times, and otherwise apply the policy. 502 * 503 * Return: 504 * < 0: some error 505 * = 0: event handled successfully 506 */ 507 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, 508 void *data) 509 { 510 int rc = 0; 511 uint32_t prev_state; 512 int i; 513 bool state_changed; 514 struct napi_struct *napi; 515 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 516 struct qca_napi_data *napid = &(hif->napi_data); 517 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; 518 enum { 519 BLACKLIST_NOT_PENDING, 520 BLACKLIST_ON_PENDING, 521 BLACKLIST_OFF_PENDING 522 } blacklist_pending = BLACKLIST_NOT_PENDING; 523 524 NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); 525 526 if (ce_srng_based(hif)) 527 return hif_exec_event(hif_ctx, event, data); 528 529 if ((napid->state & HIF_NAPI_INITED) == 0) { 530 NAPI_DEBUG("%s: got event when NAPI not initialized", 531 __func__); 532 return -EINVAL; 533 } 534 qdf_spin_lock_bh(&(napid->lock)); 535 prev_state = napid->state; 536 switch (event) { 537 case NAPI_EVT_INI_FILE: 538 case NAPI_EVT_CMD_STATE: 539 case NAPI_EVT_INT_STATE: { 540 int on = (data != ((void *)0)); 541 542 HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)", 543 __func__, event, 544 on, prev_state); 545 if (on) 546 if (prev_state & HIF_NAPI_CONF_UP) { 547 HIF_DBG("%s: duplicate NAPI conf ON msg", 548 __func__); 549 } else { 550 HIF_DBG("%s: setting state to ON", 551 __func__); 552 napid->state |= HIF_NAPI_CONF_UP; 553 } 554 else /* off request */ 555 if (prev_state & HIF_NAPI_CONF_UP) { 556 HIF_DBG("%s: setting state to OFF", 557 __func__); 558 napid->state &= ~HIF_NAPI_CONF_UP; 559 } else { 560 HIF_DBG("%s: duplicate NAPI conf OFF msg", 561 __func__); 562 } 563 break; 564 } 565 /* case NAPI_INIT_FILE/CMD_STATE */ 566 567 case NAPI_EVT_CPU_STATE: { 568 int cpu = ((unsigned long int)data >> 16); 569 int val = ((unsigned long int)data & 0x0ff); 570 571 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", 572 __func__, cpu, val); 573 574 /* state has already been set by hnc_cpu_notify_cb */ 575 if ((val == QCA_NAPI_CPU_DOWN) && 576 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ 577 (napid->napi_cpu[cpu].napis != 0)) { 578 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", 579 __func__, cpu); 580 rc = hif_napi_cpu_migrate(napid, 581 cpu, 582 HNC_ACT_RELOCATE); 583 napid->napi_cpu[cpu].napis = 0; 584 } 585 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ 586 break; 587 } 588 589 case NAPI_EVT_TPUT_STATE: { 590 tput_mode = (enum qca_napi_tput_state)data; 591 if (tput_mode == QCA_NAPI_TPUT_LO) { 592 /* from TPUT_HI -> TPUT_LO */ 593 NAPI_DEBUG("%s: Moving to napi_tput_LO state", 594 __func__); 595 blacklist_pending = BLACKLIST_OFF_PENDING; 596 /* 597 * Ideally we should "collapse" interrupts here, since 598 * we are "dispersing" interrupts in the "else" case. 599 * This allows the possibility that our interrupts may 600 * still be on the perf cluster the next time we enter 601 * high tput mode. However, the irq_balancer is free 602 * to move our interrupts to power cluster once 603 * blacklisting has been turned off in the "else" case. 604 */ 605 } else { 606 /* from TPUT_LO -> TPUT->HI */ 607 NAPI_DEBUG("%s: Moving to napi_tput_HI state", 608 __func__); 609 rc = hif_napi_cpu_migrate(napid, 610 HNC_ANY_CPU, 611 HNC_ACT_DISPERSE); 612 613 blacklist_pending = BLACKLIST_ON_PENDING; 614 } 615 napid->napi_mode = tput_mode; 616 break; 617 } 618 619 case NAPI_EVT_USR_SERIAL: { 620 unsigned long users = (unsigned long)data; 621 622 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", 623 __func__, users); 624 625 rc = hif_napi_cpu_migrate(napid, 626 HNC_ANY_CPU, 627 HNC_ACT_COLLAPSE); 628 if ((users == 0) && (rc == 0)) 629 blacklist_pending = BLACKLIST_ON_PENDING; 630 break; 631 } 632 case NAPI_EVT_USR_NORMAL: { 633 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); 634 if (!napid->user_cpu_affin_mask) 635 blacklist_pending = BLACKLIST_OFF_PENDING; 636 /* 637 * Deserialization timeout is handled at hdd layer; 638 * just mark current mode to uninitialized to ensure 639 * it will be set when the delay is over 640 */ 641 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; 642 break; 643 } 644 default: { 645 HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", 646 __func__, event, (unsigned long) data); 647 break; 648 } /* default */ 649 }; /* switch */ 650 651 652 switch (blacklist_pending) { 653 case BLACKLIST_ON_PENDING: 654 /* assume the control of WLAN IRQs */ 655 hif_napi_cpu_blacklist(napid, BLACKLIST_ON); 656 break; 657 case BLACKLIST_OFF_PENDING: 658 /* yield the control of WLAN IRQs */ 659 hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); 660 break; 661 default: /* nothing to do */ 662 break; 663 } /* switch blacklist_pending */ 664 665 /* we want to perform the comparison in lock: 666 * there is a possiblity of hif_napi_event get called 667 * from two different contexts (driver unload and cpu hotplug 668 * notification) and napid->state get changed 669 * in driver unload context and can lead to race condition 670 * in cpu hotplug context. Therefore, perform the napid->state 671 * comparison before releasing lock. 672 */ 673 state_changed = (prev_state != napid->state); 674 qdf_spin_unlock_bh(&(napid->lock)); 675 676 if (state_changed) { 677 if (napid->state == ENABLE_NAPI_MASK) { 678 rc = 1; 679 for (i = 0; i < CE_COUNT_MAX; i++) { 680 struct qca_napi_info *napii = napid->napis[i]; 681 if (napii) { 682 napi = &(napii->napi); 683 NAPI_DEBUG("%s: enabling NAPI %d", 684 __func__, i); 685 napi_enable(napi); 686 } 687 } 688 } else { 689 rc = 0; 690 for (i = 0; i < CE_COUNT_MAX; i++) { 691 struct qca_napi_info *napii = napid->napis[i]; 692 if (napii) { 693 napi = &(napii->napi); 694 NAPI_DEBUG("%s: disabling NAPI %d", 695 __func__, i); 696 napi_disable(napi); 697 /* in case it is affined, remove it */ 698 qdf_dev_set_irq_affinity(napii->irq, 699 NULL); 700 } 701 } 702 } 703 } else { 704 HIF_DBG("%s: no change in hif napi state (still %d)", 705 __func__, prev_state); 706 } 707 708 NAPI_DEBUG("<--[rc=%d]", rc); 709 return rc; 710 } 711 qdf_export_symbol(hif_napi_event); 712 713 /** 714 * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not 715 * @hif: hif context 716 * @ce : CE instance (or -1, to check if any CEs are enabled) 717 * 718 * Return: bool 719 */ 720 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce) 721 { 722 int rc; 723 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 724 725 if (-1 == ce) 726 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK)); 727 else 728 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) && 729 (hif->napi_data.ce_map & (0x01 << ce))); 730 return rc; 731 } 732 qdf_export_symbol(hif_napi_enabled); 733 734 /** 735 * hif_napi_created() - checks whether NAPI is created for given ce or not 736 * @hif: hif context 737 * @ce : CE instance 738 * 739 * Return: bool 740 */ 741 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce) 742 { 743 int rc; 744 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 745 746 rc = (hif->napi_data.ce_map & (0x01 << ce)); 747 748 return !!rc; 749 } 750 qdf_export_symbol(hif_napi_created); 751 752 /** 753 * hif_napi_enable_irq() - enables bus interrupts after napi_complete 754 * 755 * @hif: hif context 756 * @id : id of NAPI instance calling this (used to determine the CE) 757 * 758 * Return: void 759 */ 760 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) 761 { 762 struct hif_softc *scn = HIF_GET_SOFTC(hif); 763 764 hif_irq_enable(scn, NAPI_ID2PIPE(id)); 765 } 766 767 768 /** 769 * hif_napi_schedule() - schedules napi, updates stats 770 * @scn: hif context 771 * @ce_id: index of napi instance 772 * 773 * Return: false if napi didn't enable or already scheduled, otherwise true 774 */ 775 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id) 776 { 777 int cpu = smp_processor_id(); 778 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 779 struct qca_napi_info *napii; 780 781 napii = scn->napi_data.napis[ce_id]; 782 if (qdf_unlikely(!napii)) { 783 HIF_ERROR("%s, scheduling unallocated napi (ce:%d)", 784 __func__, ce_id); 785 qdf_atomic_dec(&scn->active_tasklet_cnt); 786 return false; 787 } 788 789 if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) { 790 NAPI_DEBUG("napi scheduled, return"); 791 qdf_atomic_dec(&scn->active_tasklet_cnt); 792 return false; 793 } 794 795 hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE, 796 NULL, NULL, 0, 0); 797 napii->stats[cpu].napi_schedules++; 798 NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id); 799 napi_schedule(&(napii->napi)); 800 801 return true; 802 } 803 qdf_export_symbol(hif_napi_schedule); 804 805 /** 806 * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed 807 * @napi_info: pointer to qca_napi_info for the napi instance 808 * 809 * Return: true => interrupt already on correct cpu, no correction needed 810 * false => interrupt on wrong cpu, correction done for cpu affinity 811 * of the interrupt 812 */ 813 static inline 814 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info) 815 { 816 bool right_cpu = true; 817 int rc = 0; 818 int cpu; 819 struct qca_napi_data *napid; 820 QDF_STATUS ret; 821 822 napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx)); 823 824 if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) { 825 826 cpu = qdf_get_cpu(); 827 if (unlikely((hif_napi_cpu_blacklist(napid, 828 BLACKLIST_QUERY) > 0) && 829 (cpu != napi_info->cpu))) { 830 right_cpu = false; 831 832 NAPI_DEBUG("interrupt on wrong CPU, correcting"); 833 napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu); 834 835 irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0); 836 ret = qdf_dev_set_irq_affinity(napi_info->irq, 837 (struct qdf_cpu_mask *) 838 &napi_info->cpumask); 839 rc = qdf_status_to_os_return(ret); 840 irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING); 841 842 if (rc) 843 HIF_ERROR("error setting irq affinity hint: %d", 844 rc); 845 else 846 napi_info->stats[cpu].cpu_corrected++; 847 } 848 } 849 return right_cpu; 850 } 851 852 #ifdef RECEIVE_OFFLOAD 853 /** 854 * hif_napi_offld_flush_cb() - Call upper layer flush callback 855 * @napi_info: Handle to hif_napi_info 856 * 857 * Return: None 858 */ 859 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 860 { 861 if (napi_info->offld_flush_cb) 862 napi_info->offld_flush_cb(napi_info); 863 } 864 #else 865 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 866 { 867 } 868 #endif 869 870 /** 871 * hif_napi_poll() - NAPI poll routine 872 * @napi : pointer to NAPI struct as kernel holds it 873 * @budget: 874 * 875 * This is the body of the poll function. 876 * The poll function is called by kernel. So, there is a wrapper 877 * function in HDD, which in turn calls this function. 878 * Two main reasons why the whole thing is not implemented in HDD: 879 * a) references to things like ce_service that HDD is not aware of 880 * b) proximity to the implementation of ce_tasklet, which the body 881 * of this function should be very close to. 882 * 883 * NOTE TO THE MAINTAINER: 884 * Consider this function and ce_tasklet very tightly coupled pairs. 885 * Any changes to ce_tasklet or this function may likely need to be 886 * reflected in the counterpart. 887 * 888 * Returns: 889 * int: the amount of work done in this poll (<= budget) 890 */ 891 int hif_napi_poll(struct hif_opaque_softc *hif_ctx, 892 struct napi_struct *napi, 893 int budget) 894 { 895 int rc = 0; /* default: no work done, also takes care of error */ 896 int normalized = 0; 897 int bucket; 898 int cpu = smp_processor_id(); 899 bool poll_on_right_cpu; 900 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 901 struct qca_napi_info *napi_info; 902 struct CE_state *ce_state = NULL; 903 904 if (unlikely(!hif)) { 905 HIF_ERROR("%s: hif context is NULL", __func__); 906 QDF_ASSERT(0); 907 goto out; 908 } 909 910 napi_info = (struct qca_napi_info *) 911 container_of(napi, struct qca_napi_info, napi); 912 913 NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)", 914 __func__, napi_info->id, napi_info->irq, budget); 915 916 napi_info->stats[cpu].napi_polls++; 917 918 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 919 NAPI_POLL_ENTER, NULL, NULL, cpu, 0); 920 921 rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id)); 922 NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs", 923 __func__, rc); 924 925 hif_napi_offld_flush_cb(napi_info); 926 927 /* do not return 0, if there was some work done, 928 * even if it is below the scale 929 */ 930 if (rc) { 931 napi_info->stats[cpu].napi_workdone += rc; 932 normalized = (rc / napi_info->scale); 933 if (normalized == 0) 934 normalized++; 935 bucket = (normalized - 1) / 936 (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS); 937 if (bucket >= QCA_NAPI_NUM_BUCKETS) { 938 bucket = QCA_NAPI_NUM_BUCKETS - 1; 939 HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)" 940 " normalized %d, napi budget %d", 941 bucket, QCA_NAPI_NUM_BUCKETS, 942 normalized, QCA_NAPI_BUDGET); 943 } 944 napi_info->stats[cpu].napi_budget_uses[bucket]++; 945 } else { 946 /* if ce_per engine reports 0, then poll should be terminated */ 947 NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI", 948 __func__, __LINE__); 949 } 950 951 ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)]; 952 953 /* 954 * Not using the API hif_napi_correct_cpu directly in the if statement 955 * below since the API may not get evaluated if put at the end if any 956 * prior condition would evaluate to be true. The CPU correction 957 * check should kick in every poll. 958 */ 959 #ifdef NAPI_YIELD_BUDGET_BASED 960 if (ce_state && (ce_state->force_break || 0 == rc)) { 961 #else 962 poll_on_right_cpu = hif_napi_correct_cpu(napi_info); 963 if ((ce_state) && 964 (!ce_check_rx_pending(ce_state) || (0 == rc) || 965 !poll_on_right_cpu)) { 966 #endif 967 napi_info->stats[cpu].napi_completes++; 968 #ifdef NAPI_YIELD_BUDGET_BASED 969 ce_state->force_break = 0; 970 #endif 971 972 hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE, 973 NULL, NULL, 0, 0); 974 if (normalized >= budget) 975 normalized = budget - 1; 976 977 napi_complete(napi); 978 /* enable interrupts */ 979 hif_napi_enable_irq(hif_ctx, napi_info->id); 980 /* support suspend/resume */ 981 qdf_atomic_dec(&(hif->active_tasklet_cnt)); 982 983 NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts", 984 __func__, __LINE__); 985 } else { 986 /* 4.4 kernel NAPI implementation requires drivers to 987 * return full work when they ask to be re-scheduled, 988 * or napi_complete and re-start with a fresh interrupt 989 */ 990 normalized = budget; 991 } 992 993 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 994 NAPI_POLL_EXIT, NULL, NULL, normalized, 0); 995 996 NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized); 997 return normalized; 998 out: 999 return rc; 1000 } 1001 qdf_export_symbol(hif_napi_poll); 1002 1003 void hif_update_napi_max_poll_time(struct CE_state *ce_state, 1004 int ce_id, 1005 int cpu_id) 1006 { 1007 struct hif_softc *hif; 1008 struct qca_napi_info *napi_info; 1009 unsigned long long napi_poll_time = sched_clock() - 1010 ce_state->ce_service_start_time; 1011 1012 hif = ce_state->scn; 1013 napi_info = hif->napi_data.napis[ce_id]; 1014 if (napi_poll_time > 1015 napi_info->stats[cpu_id].napi_max_poll_time) 1016 napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time; 1017 } 1018 qdf_export_symbol(hif_update_napi_max_poll_time); 1019 1020 #ifdef HIF_IRQ_AFFINITY 1021 /** 1022 * 1023 * hif_napi_update_yield_stats() - update NAPI yield related stats 1024 * @cpu_id: CPU ID for which stats needs to be updates 1025 * @ce_id: Copy Engine ID for which yield stats needs to be updates 1026 * @time_limit_reached: indicates whether the time limit was reached 1027 * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached 1028 * 1029 * Return: None 1030 */ 1031 void hif_napi_update_yield_stats(struct CE_state *ce_state, 1032 bool time_limit_reached, 1033 bool rxpkt_thresh_reached) 1034 { 1035 struct hif_softc *hif; 1036 struct qca_napi_data *napi_data = NULL; 1037 int ce_id = 0; 1038 int cpu_id = 0; 1039 1040 if (unlikely(!ce_state)) { 1041 QDF_ASSERT(ce_state); 1042 return; 1043 } 1044 1045 hif = ce_state->scn; 1046 1047 if (unlikely(!hif)) { 1048 QDF_ASSERT(hif); 1049 return; 1050 } 1051 napi_data = &(hif->napi_data); 1052 if (unlikely(!napi_data)) { 1053 QDF_ASSERT(napi_data); 1054 return; 1055 } 1056 1057 ce_id = ce_state->id; 1058 cpu_id = qdf_get_cpu(); 1059 1060 if (unlikely(!napi_data->napis[ce_id])) { 1061 return; 1062 } 1063 1064 if (time_limit_reached) 1065 napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++; 1066 else 1067 napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++; 1068 1069 hif_update_napi_max_poll_time(ce_state, ce_id, 1070 cpu_id); 1071 } 1072 1073 /** 1074 * 1075 * hif_napi_stats() - display NAPI CPU statistics 1076 * @napid: pointer to qca_napi_data 1077 * 1078 * Description: 1079 * Prints the various CPU cores on which the NAPI instances /CEs interrupts 1080 * are being executed. Can be called from outside NAPI layer. 1081 * 1082 * Return: None 1083 */ 1084 void hif_napi_stats(struct qca_napi_data *napid) 1085 { 1086 int i; 1087 struct qca_napi_cpu *cpu; 1088 1089 if (!napid) { 1090 qdf_debug("%s: napiid struct is null", __func__); 1091 return; 1092 } 1093 1094 cpu = napid->napi_cpu; 1095 qdf_debug("NAPI CPU TABLE"); 1096 qdf_debug("lilclhead=%d, bigclhead=%d", 1097 napid->lilcl_head, napid->bigcl_head); 1098 for (i = 0; i < NR_CPUS; i++) { 1099 qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d", 1100 i, 1101 cpu[i].state, cpu[i].core_id, cpu[i].cluster_id, 1102 cpu[i].core_mask.bits[0], 1103 cpu[i].thread_mask.bits[0], 1104 cpu[i].max_freq, cpu[i].napis, 1105 cpu[i].cluster_nxt); 1106 } 1107 } 1108 1109 #ifdef FEATURE_NAPI_DEBUG 1110 /* 1111 * Local functions 1112 * - no argument checks, all internal/trusted callers 1113 */ 1114 static void hnc_dump_cpus(struct qca_napi_data *napid) 1115 { 1116 hif_napi_stats(napid); 1117 } 1118 #else 1119 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; 1120 #endif /* FEATURE_NAPI_DEBUG */ 1121 /** 1122 * hnc_link_clusters() - partitions to cpu table into clusters 1123 * @napid: pointer to NAPI data 1124 * 1125 * Takes in a CPU topology table and builds two linked lists 1126 * (big cluster cores, list-head at bigcl_head, and little cluster 1127 * cores, list-head at lilcl_head) out of it. 1128 * 1129 * If there are more than two clusters: 1130 * - bigcl_head and lilcl_head will be different, 1131 * - the cluster with highest cpufreq will be considered the "big" cluster. 1132 * If there are more than one with the highest frequency, the *last* of such 1133 * clusters will be designated as the "big cluster" 1134 * - the cluster with lowest cpufreq will be considered the "li'l" cluster. 1135 * If there are more than one clusters with the lowest cpu freq, the *first* 1136 * of such clusters will be designated as the "little cluster" 1137 * - We only support up to 32 clusters 1138 * Return: 0 : OK 1139 * !0: error (at least one of lil/big clusters could not be found) 1140 */ 1141 #define HNC_MIN_CLUSTER 0 1142 #define HNC_MAX_CLUSTER 1 1143 static int hnc_link_clusters(struct qca_napi_data *napid) 1144 { 1145 int rc = 0; 1146 1147 int i; 1148 int it = 0; 1149 uint32_t cl_done = 0x0; 1150 int cl, curcl, curclhead = 0; 1151 int more; 1152 unsigned int lilfrq = INT_MAX; 1153 unsigned int bigfrq = 0; 1154 unsigned int clfrq = 0; 1155 int prev = 0; 1156 struct qca_napi_cpu *cpus = napid->napi_cpu; 1157 1158 napid->lilcl_head = napid->bigcl_head = -1; 1159 1160 do { 1161 more = 0; 1162 it++; curcl = -1; 1163 for (i = 0; i < NR_CPUS; i++) { 1164 cl = cpus[i].cluster_id; 1165 NAPI_DEBUG("Processing cpu[%d], cluster=%d\n", 1166 i, cl); 1167 if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) { 1168 NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl); 1169 /* continue if ASSERTs are disabled */ 1170 continue; 1171 }; 1172 if (cpumask_weight(&(cpus[i].core_mask)) == 0) { 1173 NAPI_DEBUG("Core mask 0. SKIPPED\n"); 1174 continue; 1175 } 1176 if (cl_done & (0x01 << cl)) { 1177 NAPI_DEBUG("Cluster already processed. SKIPPED\n"); 1178 continue; 1179 } else { 1180 if (more == 0) { 1181 more = 1; 1182 curcl = cl; 1183 curclhead = i; /* row */ 1184 clfrq = cpus[i].max_freq; 1185 prev = -1; 1186 }; 1187 if ((curcl >= 0) && (curcl != cl)) { 1188 NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n", 1189 cl, curcl); 1190 continue; 1191 } 1192 if (cpus[i].max_freq != clfrq) 1193 NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n", 1194 cpus[i].max_freq, clfrq); 1195 if (clfrq >= bigfrq) { 1196 bigfrq = clfrq; 1197 napid->bigcl_head = curclhead; 1198 NAPI_DEBUG("bigcl=%d\n", curclhead); 1199 } 1200 if (clfrq < lilfrq) { 1201 lilfrq = clfrq; 1202 napid->lilcl_head = curclhead; 1203 NAPI_DEBUG("lilcl=%d\n", curclhead); 1204 } 1205 if (prev != -1) 1206 cpus[prev].cluster_nxt = i; 1207 1208 prev = i; 1209 } 1210 } 1211 if (curcl >= 0) 1212 cl_done |= (0x01 << curcl); 1213 1214 } while (more); 1215 1216 if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0))) 1217 rc = -EFAULT; 1218 1219 hnc_dump_cpus(napid); /* if NAPI_DEBUG */ 1220 return rc; 1221 } 1222 #undef HNC_MIN_CLUSTER 1223 #undef HNC_MAX_CLUSTER 1224 1225 /* 1226 * hotplug function group 1227 */ 1228 1229 /** 1230 * hnc_cpu_online_cb() - handles CPU hotplug "up" events 1231 * @context: the associated HIF context 1232 * @cpu: the CPU Id of the CPU the event happened on 1233 * 1234 * Return: None 1235 */ 1236 static void hnc_cpu_online_cb(void *context, uint32_t cpu) 1237 { 1238 struct hif_softc *hif = context; 1239 struct qca_napi_data *napid = &hif->napi_data; 1240 1241 if (cpu >= NR_CPUS) 1242 return; 1243 1244 NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu); 1245 1246 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; 1247 NAPI_DEBUG("%s: CPU %u marked %d", 1248 __func__, cpu, napid->napi_cpu[cpu].state); 1249 1250 NAPI_DEBUG("<--%s", __func__); 1251 } 1252 1253 /** 1254 * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events 1255 * @context: the associated HIF context 1256 * @cpu: the CPU Id of the CPU the event happened on 1257 * 1258 * On transtion to offline, we act on PREP events, because we may need to move 1259 * the irqs/NAPIs to another CPU before it is actually off-lined. 1260 * 1261 * Return: None 1262 */ 1263 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu) 1264 { 1265 struct hif_softc *hif = context; 1266 struct qca_napi_data *napid = &hif->napi_data; 1267 1268 if (cpu >= NR_CPUS) 1269 return; 1270 1271 NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu); 1272 1273 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN; 1274 1275 NAPI_DEBUG("%s: CPU %u marked %d; updating affinity", 1276 __func__, cpu, napid->napi_cpu[cpu].state); 1277 1278 /** 1279 * we need to move any NAPIs on this CPU out. 1280 * if we are in LO throughput mode, then this is valid 1281 * if the CPU is the the low designated CPU. 1282 */ 1283 hif_napi_event(GET_HIF_OPAQUE_HDL(hif), 1284 NAPI_EVT_CPU_STATE, 1285 (void *) 1286 ((size_t)cpu << 16 | napid->napi_cpu[cpu].state)); 1287 1288 NAPI_DEBUG("<--%s", __func__); 1289 } 1290 1291 static int hnc_hotplug_register(struct hif_softc *hif_sc) 1292 { 1293 QDF_STATUS status; 1294 1295 NAPI_DEBUG("-->%s", __func__); 1296 1297 status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler, 1298 hif_sc, 1299 hnc_cpu_online_cb, 1300 hnc_cpu_before_offline_cb); 1301 1302 NAPI_DEBUG("<--%s [%d]", __func__, status); 1303 1304 return qdf_status_to_os_return(status); 1305 } 1306 1307 static void hnc_hotplug_unregister(struct hif_softc *hif_sc) 1308 { 1309 NAPI_DEBUG("-->%s", __func__); 1310 1311 if (hif_sc->napi_data.cpuhp_handler) 1312 qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler); 1313 1314 NAPI_DEBUG("<--%s", __func__); 1315 } 1316 1317 /** 1318 * hnc_install_tput() - installs a callback in the throughput detector 1319 * @register: !0 => register; =0: unregister 1320 * 1321 * installs a callback to be called when wifi driver throughput (tx+rx) 1322 * crosses a threshold. Currently, we are using the same criteria as 1323 * TCP ack suppression (500 packets/100ms by default). 1324 * 1325 * Return: 0 : success 1326 * <0: failure 1327 */ 1328 1329 static int hnc_tput_hook(int install) 1330 { 1331 int rc = 0; 1332 1333 /* 1334 * Nothing, until the bw_calculation accepts registration 1335 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk 1336 * hdd_napi_throughput_policy(...) 1337 */ 1338 return rc; 1339 } 1340 1341 /* 1342 * Implementation of hif_napi_cpu API 1343 */ 1344 1345 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) 1346 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1347 { 1348 cpumask_copy(&(cpus[i].thread_mask), 1349 topology_sibling_cpumask(i)); 1350 } 1351 #else 1352 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1353 { 1354 } 1355 #endif 1356 1357 1358 /** 1359 * hif_napi_cpu_init() - initialization of irq affinity block 1360 * @ctx: pointer to qca_napi_data 1361 * 1362 * called by hif_napi_create, after the first instance is called 1363 * - builds napi_rss_cpus table from cpu topology 1364 * - links cores of the same clusters together 1365 * - installs hot-plug notifier 1366 * - installs throughput trigger notifier (when such mechanism exists) 1367 * 1368 * Return: 0: OK 1369 * <0: error code 1370 */ 1371 int hif_napi_cpu_init(struct hif_opaque_softc *hif) 1372 { 1373 int rc = 0; 1374 int i; 1375 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1376 struct qca_napi_cpu *cpus = napid->napi_cpu; 1377 1378 NAPI_DEBUG("--> "); 1379 1380 if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) { 1381 NAPI_DEBUG("NAPI RSS table already initialized.\n"); 1382 rc = -EALREADY; 1383 goto lab_rss_init; 1384 } 1385 1386 /* build CPU topology table */ 1387 for_each_possible_cpu(i) { 1388 cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask) 1389 ? QCA_NAPI_CPU_UP 1390 : QCA_NAPI_CPU_DOWN)); 1391 cpus[i].core_id = topology_core_id(i); 1392 cpus[i].cluster_id = topology_physical_package_id(i); 1393 cpumask_copy(&(cpus[i].core_mask), 1394 topology_core_cpumask(i)); 1395 record_sibling_cpumask(cpus, i); 1396 cpus[i].max_freq = cpufreq_quick_get_max(i); 1397 cpus[i].napis = 0x0; 1398 cpus[i].cluster_nxt = -1; /* invalid */ 1399 } 1400 1401 /* link clusters together */ 1402 rc = hnc_link_clusters(napid); 1403 if (0 != rc) 1404 goto lab_err_topology; 1405 1406 /* install hotplug notifier */ 1407 rc = hnc_hotplug_register(HIF_GET_SOFTC(hif)); 1408 if (0 != rc) 1409 goto lab_err_hotplug; 1410 1411 /* install throughput notifier */ 1412 rc = hnc_tput_hook(1); 1413 if (0 == rc) 1414 goto lab_rss_init; 1415 1416 lab_err_hotplug: 1417 hnc_tput_hook(0); 1418 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1419 lab_err_topology: 1420 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1421 lab_rss_init: 1422 NAPI_DEBUG("<-- [rc=%d]", rc); 1423 return rc; 1424 } 1425 1426 /** 1427 * hif_napi_cpu_deinit() - clean-up of irq affinity block 1428 * 1429 * called by hif_napi_destroy, when the last instance is removed 1430 * - uninstalls throughput and hotplug notifiers 1431 * - clears cpu topology table 1432 * Return: 0: OK 1433 */ 1434 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) 1435 { 1436 int rc = 0; 1437 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1438 1439 NAPI_DEBUG("-->%s(...)", __func__); 1440 1441 /* uninstall tput notifier */ 1442 rc = hnc_tput_hook(0); 1443 1444 /* uninstall hotplug notifier */ 1445 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1446 1447 /* clear the topology table */ 1448 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1449 1450 NAPI_DEBUG("<--%s[rc=%d]", __func__, rc); 1451 1452 return rc; 1453 } 1454 1455 /** 1456 * hncm_migrate_to() - migrates a NAPI to a CPU 1457 * @napid: pointer to NAPI block 1458 * @ce_id: CE_id of the NAPI instance 1459 * @didx : index in the CPU topology table for the CPU to migrate to 1460 * 1461 * Migrates NAPI (identified by the CE_id) to the destination core 1462 * Updates the napi_map of the destination entry 1463 * 1464 * Return: 1465 * =0 : success 1466 * <0 : error 1467 */ 1468 static int hncm_migrate_to(struct qca_napi_data *napid, 1469 int napi_ce, 1470 int didx) 1471 { 1472 int rc = 0; 1473 QDF_STATUS status; 1474 1475 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); 1476 1477 if (!napid->napis[napi_ce]) 1478 return -EINVAL; 1479 1480 napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx); 1481 1482 irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0); 1483 status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq, 1484 (struct qdf_cpu_mask *) 1485 &napid->napis[napi_ce]->cpumask); 1486 rc = qdf_status_to_os_return(status); 1487 1488 /* unmark the napis bitmap in the cpu table */ 1489 napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce); 1490 /* mark the napis bitmap for the new designated cpu */ 1491 napid->napi_cpu[didx].napis |= (0x01 << napi_ce); 1492 napid->napis[napi_ce]->cpu = didx; 1493 1494 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1495 return rc; 1496 } 1497 /** 1498 * hncm_dest_cpu() - finds a destination CPU for NAPI 1499 * @napid: pointer to NAPI block 1500 * @act : RELOCATE | COLLAPSE | DISPERSE 1501 * 1502 * Finds the designated destionation for the next IRQ. 1503 * RELOCATE: translated to either COLLAPSE or DISPERSE based 1504 * on napid->napi_mode (throughput state) 1505 * COLLAPSE: All have the same destination: the first online CPU in lilcl 1506 * DISPERSE: One of the CPU in bigcl, which has the smallest number of 1507 * NAPIs on it 1508 * 1509 * Return: >=0 : index in the cpu topology table 1510 * : < 0 : error 1511 */ 1512 static int hncm_dest_cpu(struct qca_napi_data *napid, int act) 1513 { 1514 int destidx = -1; 1515 int head, i; 1516 1517 NAPI_DEBUG("-->%s(act=%d)", __func__, act); 1518 if (act == HNC_ACT_RELOCATE) { 1519 if (napid->napi_mode == QCA_NAPI_TPUT_LO) 1520 act = HNC_ACT_COLLAPSE; 1521 else 1522 act = HNC_ACT_DISPERSE; 1523 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", 1524 __func__, act); 1525 } 1526 if (act == HNC_ACT_COLLAPSE) { 1527 head = i = napid->lilcl_head; 1528 retry_collapse: 1529 while (i >= 0) { 1530 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { 1531 destidx = i; 1532 break; 1533 } 1534 i = napid->napi_cpu[i].cluster_nxt; 1535 } 1536 if ((destidx < 0) && (head == napid->lilcl_head)) { 1537 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", 1538 __func__); 1539 head = i = napid->bigcl_head; 1540 goto retry_collapse; 1541 } 1542 } else { /* HNC_ACT_DISPERSE */ 1543 int smallest = 99; /* all 32 bits full */ 1544 int smallidx = -1; 1545 1546 head = i = napid->bigcl_head; 1547 retry_disperse: 1548 while (i >= 0) { 1549 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && 1550 (hweight32(napid->napi_cpu[i].napis) <= smallest)) { 1551 smallest = napid->napi_cpu[i].napis; 1552 smallidx = i; 1553 } 1554 i = napid->napi_cpu[i].cluster_nxt; 1555 } 1556 /* Check if matches with user sepecified CPU mask */ 1557 smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ? 1558 smallidx : -1; 1559 1560 if ((smallidx < 0) && (head == napid->bigcl_head)) { 1561 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", 1562 __func__); 1563 head = i = napid->lilcl_head; 1564 goto retry_disperse; 1565 } 1566 destidx = smallidx; 1567 } 1568 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); 1569 return destidx; 1570 } 1571 /** 1572 * hif_napi_cpu_migrate() - migrate IRQs away 1573 * @cpu: -1: all CPUs <n> specific CPU 1574 * @act: COLLAPSE | DISPERSE 1575 * 1576 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible 1577 * cores. Eligible cores are: 1578 * act=COLLAPSE -> the first online core of the little cluster 1579 * act=DISPERSE -> separate cores of the big cluster, so that each core will 1580 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) 1581 * 1582 * Note that this function is called with a spinlock acquired already. 1583 * 1584 * Return: =0: success 1585 * <0: error 1586 */ 1587 1588 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) 1589 { 1590 int rc = 0; 1591 struct qca_napi_cpu *cpup; 1592 int i, dind; 1593 uint32_t napis; 1594 1595 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", 1596 __func__, cpu, action); 1597 /* the following is really: hif_napi_enabled() with less overhead */ 1598 if (napid->ce_map == 0) { 1599 NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__); 1600 goto hncm_return; 1601 } 1602 1603 cpup = napid->napi_cpu; 1604 1605 switch (action) { 1606 case HNC_ACT_RELOCATE: 1607 case HNC_ACT_DISPERSE: 1608 case HNC_ACT_COLLAPSE: { 1609 /* first find the src napi set */ 1610 if (cpu == HNC_ANY_CPU) 1611 napis = napid->ce_map; 1612 else 1613 napis = cpup[cpu].napis; 1614 /* then clear the napi bitmap on each CPU */ 1615 for (i = 0; i < NR_CPUS; i++) 1616 cpup[i].napis = 0; 1617 /* then for each of the NAPIs to disperse: */ 1618 for (i = 0; i < CE_COUNT_MAX; i++) 1619 if (napis & (1 << i)) { 1620 /* find a destination CPU */ 1621 dind = hncm_dest_cpu(napid, action); 1622 if (dind >= 0) { 1623 NAPI_DEBUG("Migrating NAPI ce%d to %d", 1624 i, dind); 1625 rc = hncm_migrate_to(napid, i, dind); 1626 } else { 1627 NAPI_DEBUG("No dest for NAPI ce%d", i); 1628 hnc_dump_cpus(napid); 1629 rc = -1; 1630 } 1631 } 1632 break; 1633 } 1634 default: { 1635 NAPI_DEBUG("%s: bad action: %d\n", __func__, action); 1636 QDF_BUG(0); 1637 break; 1638 } 1639 } /* switch action */ 1640 1641 hncm_return: 1642 hnc_dump_cpus(napid); 1643 return rc; 1644 } 1645 1646 1647 /** 1648 * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting 1649 * @napid: pointer to qca_napi_data structure 1650 * @bl_flag: blacklist flag to enable/disable blacklisting 1651 * 1652 * The function enables/disables blacklisting for all the copy engine 1653 * interrupts on which NAPI is enabled. 1654 * 1655 * Return: None 1656 */ 1657 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag) 1658 { 1659 int i; 1660 struct qca_napi_info *napii; 1661 1662 for (i = 0; i < CE_COUNT_MAX; i++) { 1663 /* check if NAPI is enabled on the CE */ 1664 if (!(napid->ce_map & (0x01 << i))) 1665 continue; 1666 1667 /*double check that NAPI is allocated for the CE */ 1668 napii = napid->napis[i]; 1669 if (!(napii)) 1670 continue; 1671 1672 if (bl_flag == true) 1673 irq_modify_status(napii->irq, 1674 0, IRQ_NO_BALANCING); 1675 else 1676 irq_modify_status(napii->irq, 1677 IRQ_NO_BALANCING, 0); 1678 HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); 1679 } 1680 } 1681 1682 #ifdef CONFIG_SCHED_CORE_CTL 1683 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ 1684 static inline int hif_napi_core_ctl_set_boost(bool boost) 1685 { 1686 return core_ctl_set_boost(boost); 1687 } 1688 #else 1689 static inline int hif_napi_core_ctl_set_boost(bool boost) 1690 { 1691 return 0; 1692 } 1693 #endif 1694 /** 1695 * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. 1696 * @napid: pointer to qca_napi_data structure 1697 * @op: blacklist operation to perform 1698 * 1699 * The function enables/disables/queries blacklisting for all CE RX 1700 * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables 1701 * core_ctl_set_boost. 1702 * Once blacklisting is enabled, the interrupts will not be managed by the IRQ 1703 * balancer. 1704 * 1705 * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled 1706 * for BLACKLIST_QUERY op - blacklist refcount 1707 * for BLACKLIST_ON op - return value from core_ctl_set_boost API 1708 * for BLACKLIST_OFF op - return value from core_ctl_set_boost API 1709 */ 1710 int hif_napi_cpu_blacklist(struct qca_napi_data *napid, 1711 enum qca_blacklist_op op) 1712 { 1713 int rc = 0; 1714 static int ref_count; /* = 0 by the compiler */ 1715 uint8_t flags = napid->flags; 1716 bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; 1717 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; 1718 1719 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); 1720 1721 if (!(bl_en && ccb_en)) { 1722 rc = -EINVAL; 1723 goto out; 1724 } 1725 1726 switch (op) { 1727 case BLACKLIST_QUERY: 1728 rc = ref_count; 1729 break; 1730 case BLACKLIST_ON: 1731 ref_count++; 1732 rc = 0; 1733 if (ref_count == 1) { 1734 rc = hif_napi_core_ctl_set_boost(true); 1735 NAPI_DEBUG("boost_on() returns %d - refcnt=%d", 1736 rc, ref_count); 1737 hif_napi_bl_irq(napid, true); 1738 } 1739 break; 1740 case BLACKLIST_OFF: 1741 if (ref_count) { 1742 ref_count--; 1743 rc = 0; 1744 if (ref_count == 0) { 1745 rc = hif_napi_core_ctl_set_boost(false); 1746 NAPI_DEBUG("boost_off() returns %d - refcnt=%d", 1747 rc, ref_count); 1748 hif_napi_bl_irq(napid, false); 1749 } 1750 } 1751 break; 1752 default: 1753 NAPI_DEBUG("Invalid blacklist op: %d", op); 1754 rc = -EINVAL; 1755 } /* switch */ 1756 out: 1757 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1758 return rc; 1759 } 1760 1761 /** 1762 * hif_napi_serialize() - [de-]serialize NAPI operations 1763 * @hif: context 1764 * @is_on: 1: serialize, 0: deserialize 1765 * 1766 * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the 1767 * following steps (see hif_napi_event for code): 1768 * - put irqs of all NAPI instances on the same CPU 1769 * - only for the first serialize call: blacklist 1770 * 1771 * hif_napi_serialize(hif, 0): 1772 * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec) 1773 * - at the end of the timer, check the current throughput state and 1774 * implement it. 1775 */ 1776 static unsigned long napi_serialize_reqs; 1777 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) 1778 { 1779 int rc = -EINVAL; 1780 1781 if (hif) 1782 switch (is_on) { 1783 case 0: { /* de-serialize */ 1784 rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL, 1785 (void *) 0); 1786 napi_serialize_reqs = 0; 1787 break; 1788 } /* end de-serialize */ 1789 case 1: { /* serialize */ 1790 rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL, 1791 (void *)napi_serialize_reqs++); 1792 break; 1793 } /* end serialize */ 1794 default: 1795 break; /* no-op */ 1796 } /* switch */ 1797 return rc; 1798 } 1799 1800 #endif /* ifdef HIF_IRQ_AFFINITY */ 1801