1 /* 2 * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: hif_napi.c 21 * 22 * HIF NAPI interface implementation 23 */ 24 25 #include <linux/string.h> /* memset */ 26 27 /* Linux headers */ 28 #include <linux/cpumask.h> 29 #include <linux/cpufreq.h> 30 #include <linux/cpu.h> 31 #include <linux/topology.h> 32 #include <linux/interrupt.h> 33 #include <linux/irq.h> 34 #ifdef CONFIG_SCHED_CORE_CTL 35 #include <linux/sched/core_ctl.h> 36 #endif 37 #include <pld_common.h> 38 #include <linux/pm.h> 39 40 /* Driver headers */ 41 #include <hif_napi.h> 42 #include <hif_debug.h> 43 #include <hif_io32.h> 44 #include <ce_api.h> 45 #include <ce_internal.h> 46 #include <hif_irq_affinity.h> 47 #include "qdf_cpuhp.h" 48 #include "qdf_module.h" 49 #include "qdf_net_if.h" 50 #include "qdf_dev.h" 51 52 enum napi_decision_vector { 53 HIF_NAPI_NOEVENT = 0, 54 HIF_NAPI_INITED = 1, 55 HIF_NAPI_CONF_UP = 2 56 }; 57 #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP) 58 59 #ifdef RECEIVE_OFFLOAD 60 /** 61 * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI 62 * @napi: Rx_thread NAPI 63 * @budget: NAPI BUDGET 64 * 65 * Return: 0 as it is not supposed to be polled at all as it is not scheduled. 66 */ 67 static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget) 68 { 69 HIF_ERROR("This napi_poll should not be polled as we don't schedule it"); 70 QDF_ASSERT(0); 71 return 0; 72 } 73 74 /** 75 * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI 76 * @napii: Handle to napi_info holding rx_thread napi 77 * 78 * Return: None 79 */ 80 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 81 { 82 init_dummy_netdev(&napii->rx_thread_netdev); 83 netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi, 84 hif_rxthread_napi_poll, 64); 85 napi_enable(&napii->rx_thread_napi); 86 } 87 88 /** 89 * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI 90 * @napii: Handle to napi_info holding rx_thread napi 91 * 92 * Return: None 93 */ 94 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 95 { 96 netif_napi_del(&napii->rx_thread_napi); 97 } 98 #else /* RECEIVE_OFFLOAD */ 99 static void hif_init_rx_thread_napi(struct qca_napi_info *napii) 100 { 101 } 102 103 static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii) 104 { 105 } 106 #endif 107 108 /** 109 * hif_napi_create() - creates the NAPI structures for a given CE 110 * @hif : pointer to hif context 111 * @pipe_id: the CE id on which the instance will be created 112 * @poll : poll function to be used for this NAPI instance 113 * @budget : budget to be registered with the NAPI instance 114 * @scale : scale factor on the weight (to scaler budget to 1000) 115 * @flags : feature flags 116 * 117 * Description: 118 * Creates NAPI instances. This function is called 119 * unconditionally during initialization. It creates 120 * napi structures through the proper HTC/HIF calls. 121 * The structures are disabled on creation. 122 * Note that for each NAPI instance a separate dummy netdev is used 123 * 124 * Return: 125 * < 0: error 126 * = 0: <should never happen> 127 * > 0: id of the created object (for multi-NAPI, number of objects created) 128 */ 129 int hif_napi_create(struct hif_opaque_softc *hif_ctx, 130 int (*poll)(struct napi_struct *, int), 131 int budget, 132 int scale, 133 uint8_t flags) 134 { 135 int i; 136 struct qca_napi_data *napid; 137 struct qca_napi_info *napii; 138 struct CE_state *ce_state; 139 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 140 int rc = 0; 141 142 NAPI_DEBUG("-->(budget=%d, scale=%d)", 143 budget, scale); 144 NAPI_DEBUG("hif->napi_data.state = 0x%08x", 145 hif->napi_data.state); 146 NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x", 147 hif->napi_data.ce_map); 148 149 napid = &(hif->napi_data); 150 if (0 == (napid->state & HIF_NAPI_INITED)) { 151 memset(napid, 0, sizeof(struct qca_napi_data)); 152 qdf_spinlock_create(&(napid->lock)); 153 154 napid->state |= HIF_NAPI_INITED; 155 napid->flags = flags; 156 157 rc = hif_napi_cpu_init(hif_ctx); 158 if (rc != 0 && rc != -EALREADY) { 159 HIF_ERROR("NAPI_initialization failed,. %d", rc); 160 rc = napid->ce_map; 161 goto hnc_err; 162 } else 163 rc = 0; 164 165 HIF_DBG("%s: NAPI structures initialized, rc=%d", 166 __func__, rc); 167 } 168 for (i = 0; i < hif->ce_count; i++) { 169 ce_state = hif->ce_id_to_state[i]; 170 NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d", 171 i, ce_state->htt_rx_data, 172 ce_state->htt_tx_data); 173 if (ce_srng_based(hif)) 174 continue; 175 176 if (!ce_state->htt_rx_data) 177 continue; 178 179 /* Now this is a CE where we need NAPI on */ 180 NAPI_DEBUG("Creating NAPI on pipe %d", i); 181 napii = qdf_mem_malloc(sizeof(*napii)); 182 napid->napis[i] = napii; 183 if (!napii) { 184 NAPI_DEBUG("NAPI alloc failure %d", i); 185 rc = -ENOMEM; 186 goto napii_free; 187 } 188 } 189 190 for (i = 0; i < hif->ce_count; i++) { 191 napii = napid->napis[i]; 192 if (!napii) 193 continue; 194 195 NAPI_DEBUG("initializing NAPI for pipe %d", i); 196 memset(napii, 0, sizeof(struct qca_napi_info)); 197 napii->scale = scale; 198 napii->id = NAPI_PIPE2ID(i); 199 napii->hif_ctx = hif_ctx; 200 napii->irq = pld_get_irq(hif->qdf_dev->dev, i); 201 202 if (napii->irq < 0) 203 HIF_WARN("%s: bad IRQ value for CE %d: %d", 204 __func__, i, napii->irq); 205 206 init_dummy_netdev(&(napii->netdev)); 207 208 NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)", 209 &(napii->napi), &(napii->netdev), poll, budget); 210 netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget); 211 212 NAPI_DEBUG("after napi_add"); 213 NAPI_DEBUG("napi=0x%pK, netdev=0x%pK", 214 &(napii->napi), &(napii->netdev)); 215 NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK", 216 napii->napi.dev_list.prev, 217 napii->napi.dev_list.next); 218 NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK", 219 napii->netdev.napi_list.prev, 220 napii->netdev.napi_list.next); 221 222 hif_init_rx_thread_napi(napii); 223 napii->lro_ctx = qdf_lro_init(); 224 NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n", 225 i, napii->id, napii->lro_ctx); 226 227 /* It is OK to change the state variable below without 228 * protection as there should be no-one around yet 229 */ 230 napid->ce_map |= (0x01 << i); 231 HIF_DBG("%s: NAPI id %d created for pipe %d", __func__, 232 napii->id, i); 233 } 234 235 /* no ces registered with the napi */ 236 if (!ce_srng_based(hif) && napid->ce_map == 0) { 237 HIF_WARN("%s: no napis created for copy engines", __func__); 238 rc = -EFAULT; 239 goto napii_free; 240 } 241 242 NAPI_DEBUG("napi map = %x", napid->ce_map); 243 NAPI_DEBUG("NAPI ids created for all applicable pipes"); 244 return napid->ce_map; 245 246 napii_free: 247 for (i = 0; i < hif->ce_count; i++) { 248 napii = napid->napis[i]; 249 napid->napis[i] = NULL; 250 if (napii) 251 qdf_mem_free(napii); 252 } 253 254 hnc_err: 255 NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map); 256 return rc; 257 } 258 qdf_export_symbol(hif_napi_create); 259 260 #ifdef RECEIVE_OFFLOAD 261 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl, 262 void (offld_flush_handler)(void *)) 263 { 264 int i; 265 struct CE_state *ce_state; 266 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 267 struct qca_napi_data *napid; 268 struct qca_napi_info *napii; 269 270 if (!scn) { 271 HIF_ERROR("%s: hif_state NULL!", __func__); 272 QDF_ASSERT(0); 273 return; 274 } 275 276 napid = hif_napi_get_all(hif_hdl); 277 for (i = 0; i < scn->ce_count; i++) { 278 ce_state = scn->ce_id_to_state[i]; 279 if (ce_state && (ce_state->htt_rx_data)) { 280 napii = napid->napis[i]; 281 napii->offld_flush_cb = offld_flush_handler; 282 HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %p\n", 283 i, napii->id, napii->offld_flush_cb); 284 } 285 } 286 } 287 288 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl) 289 { 290 int i; 291 struct CE_state *ce_state; 292 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 293 struct qca_napi_data *napid; 294 struct qca_napi_info *napii; 295 296 if (!scn) { 297 HIF_ERROR("%s: hif_state NULL!", __func__); 298 QDF_ASSERT(0); 299 return; 300 } 301 302 napid = hif_napi_get_all(hif_hdl); 303 for (i = 0; i < scn->ce_count; i++) { 304 ce_state = scn->ce_id_to_state[i]; 305 if (ce_state && (ce_state->htt_rx_data)) { 306 napii = napid->napis[i]; 307 HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n", 308 i, napii->id, napii->offld_flush_cb); 309 /* Not required */ 310 napii->offld_flush_cb = NULL; 311 } 312 } 313 } 314 #endif /* RECEIVE_OFFLOAD */ 315 316 /** 317 * 318 * hif_napi_destroy() - destroys the NAPI structures for a given instance 319 * @hif : pointer to hif context 320 * @ce_id : the CE id whose napi instance will be destroyed 321 * @force : if set, will destroy even if entry is active (de-activates) 322 * 323 * Description: 324 * Destroy a given NAPI instance. This function is called 325 * unconditionally during cleanup. 326 * Refuses to destroy an entry of it is still enabled (unless force=1) 327 * Marks the whole napi_data invalid if all instances are destroyed. 328 * 329 * Return: 330 * -EINVAL: specific entry has not been created 331 * -EPERM : specific entry is still active 332 * 0 < : error 333 * 0 = : success 334 */ 335 int hif_napi_destroy(struct hif_opaque_softc *hif_ctx, 336 uint8_t id, 337 int force) 338 { 339 uint8_t ce = NAPI_ID2PIPE(id); 340 int rc = 0; 341 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 342 343 NAPI_DEBUG("-->(id=%d, force=%d)", id, force); 344 345 if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) { 346 HIF_ERROR("%s: NAPI not initialized or entry %d not created", 347 __func__, id); 348 rc = -EINVAL; 349 } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) { 350 HIF_ERROR("%s: NAPI instance %d (pipe %d) not created", 351 __func__, id, ce); 352 if (hif->napi_data.napis[ce]) 353 HIF_ERROR("%s: memory allocated but ce_map not set %d (pipe %d)", 354 __func__, id, ce); 355 rc = -EINVAL; 356 } else { 357 struct qca_napi_data *napid; 358 struct qca_napi_info *napii; 359 360 napid = &(hif->napi_data); 361 napii = napid->napis[ce]; 362 if (!napii) { 363 if (napid->ce_map & (0x01 << ce)) 364 HIF_ERROR("%s: napii & ce_map out of sync(ce %d)", 365 __func__, ce); 366 return -EINVAL; 367 } 368 369 370 if (hif->napi_data.state == HIF_NAPI_CONF_UP) { 371 if (force) { 372 napi_disable(&(napii->napi)); 373 HIF_DBG("%s: NAPI entry %d force disabled", 374 __func__, id); 375 NAPI_DEBUG("NAPI %d force disabled", id); 376 } else { 377 HIF_ERROR("%s: Cannot destroy active NAPI %d", 378 __func__, id); 379 rc = -EPERM; 380 } 381 } 382 if (0 == rc) { 383 NAPI_DEBUG("before napi_del"); 384 NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK", 385 napii->napi.dev_list.prev, 386 napii->napi.dev_list.next); 387 NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK", 388 napii->netdev.napi_list.prev, 389 napii->netdev.napi_list.next); 390 391 qdf_lro_deinit(napii->lro_ctx); 392 netif_napi_del(&(napii->napi)); 393 hif_deinit_rx_thread_napi(napii); 394 395 napid->ce_map &= ~(0x01 << ce); 396 napid->napis[ce] = NULL; 397 napii->scale = 0; 398 qdf_mem_free(napii); 399 HIF_DBG("%s: NAPI %d destroyed\n", __func__, id); 400 401 /* if there are no active instances and 402 * if they are all destroyed, 403 * set the whole structure to uninitialized state 404 */ 405 if (napid->ce_map == 0) { 406 rc = hif_napi_cpu_deinit(hif_ctx); 407 /* caller is tolerant to receiving !=0 rc */ 408 409 qdf_spinlock_destroy(&(napid->lock)); 410 memset(napid, 411 0, sizeof(struct qca_napi_data)); 412 HIF_DBG("%s: no NAPI instances. Zapped.", 413 __func__); 414 } 415 } 416 } 417 418 return rc; 419 } 420 qdf_export_symbol(hif_napi_destroy); 421 422 #ifdef FEATURE_LRO 423 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id) 424 { 425 struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl); 426 struct qca_napi_data *napid; 427 struct qca_napi_info *napii; 428 429 napid = &(scn->napi_data); 430 napii = napid->napis[NAPI_ID2PIPE(napi_id)]; 431 432 if (napii) 433 return napii->lro_ctx; 434 return 0; 435 } 436 #endif 437 438 /** 439 * 440 * hif_napi_get_all() - returns the address of the whole HIF NAPI structure 441 * @hif: pointer to hif context 442 * 443 * Description: 444 * Returns the address of the whole structure 445 * 446 * Return: 447 * <addr>: address of the whole HIF NAPI structure 448 */ 449 inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx) 450 { 451 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 452 453 return &(hif->napi_data); 454 } 455 456 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid) 457 { 458 int id = NAPI_ID2PIPE(napi_id); 459 460 return napid->napis[id]; 461 } 462 463 /** 464 * 465 * hif_napi_event() - reacts to events that impact NAPI 466 * @hif : pointer to hif context 467 * @evnt: event that has been detected 468 * @data: more data regarding the event 469 * 470 * Description: 471 * This function handles two types of events: 472 * 1- Events that change the state of NAPI (enabled/disabled): 473 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} 474 * The state is retrievable by "hdd_napi_enabled(-1)" 475 * - NAPI will be on if either INI file is on and it has not been disabled 476 * by a subsequent vendor CMD, 477 * or it has been enabled by a vendor CMD. 478 * 2- Events that change the CPU affinity of a NAPI instance/IRQ: 479 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} 480 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode 481 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() 482 * - In LO tput mode, NAPI will yield control if its interrupts to the system 483 * management functions. However in HI throughput mode, NAPI will actively 484 * manage its interrupts/instances (by trying to disperse them out to 485 * separate performance cores). 486 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. 487 * 488 * + In some cases (roaming peer management is the only case so far), a 489 * a client can trigger a "SERIALIZE" event. Basically, this means that the 490 * users is asking NAPI to go into a truly single execution context state. 491 * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, 492 * (if called for the first time) and then moves all IRQs (for NAPI 493 * instances) to be collapsed to a single core. If called multiple times, 494 * it will just re-collapse the CPUs. This is because blacklist-on() API 495 * is reference-counted, and because the API has already been called. 496 * 497 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go 498 * to its "normal" operation. Optionally, they can give a timeout value (in 499 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this 500 * case, NAPI will just set the current throughput state to uninitialized 501 * and set the delay period. Once policy handler is called, it would skip 502 * applying the policy delay period times, and otherwise apply the policy. 503 * 504 * Return: 505 * < 0: some error 506 * = 0: event handled successfully 507 */ 508 int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, 509 void *data) 510 { 511 int rc = 0; 512 uint32_t prev_state; 513 int i; 514 bool state_changed; 515 struct napi_struct *napi; 516 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 517 struct qca_napi_data *napid = &(hif->napi_data); 518 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; 519 enum { 520 BLACKLIST_NOT_PENDING, 521 BLACKLIST_ON_PENDING, 522 BLACKLIST_OFF_PENDING 523 } blacklist_pending = BLACKLIST_NOT_PENDING; 524 525 NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); 526 527 if (ce_srng_based(hif)) 528 return hif_exec_event(hif_ctx, event, data); 529 530 if ((napid->state & HIF_NAPI_INITED) == 0) { 531 NAPI_DEBUG("%s: got event when NAPI not initialized", 532 __func__); 533 return -EINVAL; 534 } 535 qdf_spin_lock_bh(&(napid->lock)); 536 prev_state = napid->state; 537 switch (event) { 538 case NAPI_EVT_INI_FILE: 539 case NAPI_EVT_CMD_STATE: 540 case NAPI_EVT_INT_STATE: { 541 int on = (data != ((void *)0)); 542 543 HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)", 544 __func__, event, 545 on, prev_state); 546 if (on) 547 if (prev_state & HIF_NAPI_CONF_UP) { 548 HIF_DBG("%s: duplicate NAPI conf ON msg", 549 __func__); 550 } else { 551 HIF_DBG("%s: setting state to ON", 552 __func__); 553 napid->state |= HIF_NAPI_CONF_UP; 554 } 555 else /* off request */ 556 if (prev_state & HIF_NAPI_CONF_UP) { 557 HIF_DBG("%s: setting state to OFF", 558 __func__); 559 napid->state &= ~HIF_NAPI_CONF_UP; 560 } else { 561 HIF_DBG("%s: duplicate NAPI conf OFF msg", 562 __func__); 563 } 564 break; 565 } 566 /* case NAPI_INIT_FILE/CMD_STATE */ 567 568 case NAPI_EVT_CPU_STATE: { 569 int cpu = ((unsigned long int)data >> 16); 570 int val = ((unsigned long int)data & 0x0ff); 571 572 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", 573 __func__, cpu, val); 574 575 /* state has already been set by hnc_cpu_notify_cb */ 576 if ((val == QCA_NAPI_CPU_DOWN) && 577 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ 578 (napid->napi_cpu[cpu].napis != 0)) { 579 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", 580 __func__, cpu); 581 rc = hif_napi_cpu_migrate(napid, 582 cpu, 583 HNC_ACT_RELOCATE); 584 napid->napi_cpu[cpu].napis = 0; 585 } 586 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ 587 break; 588 } 589 590 case NAPI_EVT_TPUT_STATE: { 591 tput_mode = (enum qca_napi_tput_state)data; 592 if (tput_mode == QCA_NAPI_TPUT_LO) { 593 /* from TPUT_HI -> TPUT_LO */ 594 NAPI_DEBUG("%s: Moving to napi_tput_LO state", 595 __func__); 596 blacklist_pending = BLACKLIST_OFF_PENDING; 597 /* 598 * Ideally we should "collapse" interrupts here, since 599 * we are "dispersing" interrupts in the "else" case. 600 * This allows the possibility that our interrupts may 601 * still be on the perf cluster the next time we enter 602 * high tput mode. However, the irq_balancer is free 603 * to move our interrupts to power cluster once 604 * blacklisting has been turned off in the "else" case. 605 */ 606 } else { 607 /* from TPUT_LO -> TPUT->HI */ 608 NAPI_DEBUG("%s: Moving to napi_tput_HI state", 609 __func__); 610 rc = hif_napi_cpu_migrate(napid, 611 HNC_ANY_CPU, 612 HNC_ACT_DISPERSE); 613 614 blacklist_pending = BLACKLIST_ON_PENDING; 615 } 616 napid->napi_mode = tput_mode; 617 break; 618 } 619 620 case NAPI_EVT_USR_SERIAL: { 621 unsigned long users = (unsigned long)data; 622 623 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", 624 __func__, users); 625 626 rc = hif_napi_cpu_migrate(napid, 627 HNC_ANY_CPU, 628 HNC_ACT_COLLAPSE); 629 if ((users == 0) && (rc == 0)) 630 blacklist_pending = BLACKLIST_ON_PENDING; 631 break; 632 } 633 case NAPI_EVT_USR_NORMAL: { 634 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); 635 /* 636 * Deserialization timeout is handled at hdd layer; 637 * just mark current mode to uninitialized to ensure 638 * it will be set when the delay is over 639 */ 640 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; 641 break; 642 } 643 default: { 644 HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", 645 __func__, event, (unsigned long) data); 646 break; 647 } /* default */ 648 }; /* switch */ 649 650 651 switch (blacklist_pending) { 652 case BLACKLIST_ON_PENDING: 653 /* assume the control of WLAN IRQs */ 654 hif_napi_cpu_blacklist(napid, BLACKLIST_ON); 655 break; 656 case BLACKLIST_OFF_PENDING: 657 /* yield the control of WLAN IRQs */ 658 hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); 659 break; 660 default: /* nothing to do */ 661 break; 662 } /* switch blacklist_pending */ 663 664 /* we want to perform the comparison in lock: 665 * there is a possiblity of hif_napi_event get called 666 * from two different contexts (driver unload and cpu hotplug 667 * notification) and napid->state get changed 668 * in driver unload context and can lead to race condition 669 * in cpu hotplug context. Therefore, perform the napid->state 670 * comparison before releasing lock. 671 */ 672 state_changed = (prev_state != napid->state); 673 qdf_spin_unlock_bh(&(napid->lock)); 674 675 if (state_changed) { 676 if (napid->state == ENABLE_NAPI_MASK) { 677 rc = 1; 678 for (i = 0; i < CE_COUNT_MAX; i++) { 679 struct qca_napi_info *napii = napid->napis[i]; 680 if (napii) { 681 napi = &(napii->napi); 682 NAPI_DEBUG("%s: enabling NAPI %d", 683 __func__, i); 684 napi_enable(napi); 685 } 686 } 687 } else { 688 rc = 0; 689 for (i = 0; i < CE_COUNT_MAX; i++) { 690 struct qca_napi_info *napii = napid->napis[i]; 691 if (napii) { 692 napi = &(napii->napi); 693 NAPI_DEBUG("%s: disabling NAPI %d", 694 __func__, i); 695 napi_disable(napi); 696 /* in case it is affined, remove it */ 697 qdf_dev_set_irq_affinity(napii->irq, 698 NULL); 699 } 700 } 701 } 702 } else { 703 HIF_DBG("%s: no change in hif napi state (still %d)", 704 __func__, prev_state); 705 } 706 707 NAPI_DEBUG("<--[rc=%d]", rc); 708 return rc; 709 } 710 qdf_export_symbol(hif_napi_event); 711 712 /** 713 * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not 714 * @hif: hif context 715 * @ce : CE instance (or -1, to check if any CEs are enabled) 716 * 717 * Return: bool 718 */ 719 int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce) 720 { 721 int rc; 722 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 723 724 if (-1 == ce) 725 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK)); 726 else 727 rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) && 728 (hif->napi_data.ce_map & (0x01 << ce))); 729 return rc; 730 } 731 qdf_export_symbol(hif_napi_enabled); 732 733 /** 734 * hif_napi_created() - checks whether NAPI is created for given ce or not 735 * @hif: hif context 736 * @ce : CE instance 737 * 738 * Return: bool 739 */ 740 bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce) 741 { 742 int rc; 743 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 744 745 rc = (hif->napi_data.ce_map & (0x01 << ce)); 746 747 return !!rc; 748 } 749 qdf_export_symbol(hif_napi_created); 750 751 /** 752 * hif_napi_enable_irq() - enables bus interrupts after napi_complete 753 * 754 * @hif: hif context 755 * @id : id of NAPI instance calling this (used to determine the CE) 756 * 757 * Return: void 758 */ 759 inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id) 760 { 761 struct hif_softc *scn = HIF_GET_SOFTC(hif); 762 763 hif_irq_enable(scn, NAPI_ID2PIPE(id)); 764 } 765 766 767 /** 768 * hif_napi_schedule() - schedules napi, updates stats 769 * @scn: hif context 770 * @ce_id: index of napi instance 771 * 772 * Return: false if napi didn't enable or already scheduled, otherwise true 773 */ 774 bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id) 775 { 776 int cpu = smp_processor_id(); 777 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx); 778 struct qca_napi_info *napii; 779 780 napii = scn->napi_data.napis[ce_id]; 781 if (qdf_unlikely(!napii)) { 782 HIF_ERROR("%s, scheduling unallocated napi (ce:%d)", 783 __func__, ce_id); 784 qdf_atomic_dec(&scn->active_tasklet_cnt); 785 return false; 786 } 787 788 if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) { 789 NAPI_DEBUG("napi scheduled, return"); 790 qdf_atomic_dec(&scn->active_tasklet_cnt); 791 return false; 792 } 793 794 hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE, 795 NULL, NULL, 0, 0); 796 napii->stats[cpu].napi_schedules++; 797 NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id); 798 napi_schedule(&(napii->napi)); 799 800 return true; 801 } 802 qdf_export_symbol(hif_napi_schedule); 803 804 /** 805 * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed 806 * @napi_info: pointer to qca_napi_info for the napi instance 807 * 808 * Return: true => interrupt already on correct cpu, no correction needed 809 * false => interrupt on wrong cpu, correction done for cpu affinity 810 * of the interrupt 811 */ 812 static inline 813 bool hif_napi_correct_cpu(struct qca_napi_info *napi_info) 814 { 815 bool right_cpu = true; 816 int rc = 0; 817 cpumask_t cpumask; 818 int cpu; 819 struct qca_napi_data *napid; 820 QDF_STATUS ret; 821 822 napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx)); 823 824 if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) { 825 826 cpu = qdf_get_cpu(); 827 if (unlikely((hif_napi_cpu_blacklist(napid, 828 BLACKLIST_QUERY) > 0) && 829 (cpu != napi_info->cpu))) { 830 right_cpu = false; 831 832 NAPI_DEBUG("interrupt on wrong CPU, correcting"); 833 cpumask.bits[0] = (0x01 << napi_info->cpu); 834 835 irq_modify_status(napi_info->irq, IRQ_NO_BALANCING, 0); 836 ret = qdf_dev_set_irq_affinity(napi_info->irq, 837 (struct qdf_cpu_mask *) 838 &cpumask); 839 rc = qdf_status_to_os_return(ret); 840 irq_modify_status(napi_info->irq, 0, IRQ_NO_BALANCING); 841 842 if (rc) 843 HIF_ERROR("error setting irq affinity hint: %d", 844 rc); 845 else 846 napi_info->stats[cpu].cpu_corrected++; 847 } 848 } 849 return right_cpu; 850 } 851 852 #ifdef RECEIVE_OFFLOAD 853 /** 854 * hif_napi_offld_flush_cb() - Call upper layer flush callback 855 * @napi_info: Handle to hif_napi_info 856 * 857 * Return: None 858 */ 859 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 860 { 861 if (napi_info->offld_flush_cb) 862 napi_info->offld_flush_cb(napi_info); 863 } 864 #else 865 static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info) 866 { 867 } 868 #endif 869 870 /** 871 * hif_napi_poll() - NAPI poll routine 872 * @napi : pointer to NAPI struct as kernel holds it 873 * @budget: 874 * 875 * This is the body of the poll function. 876 * The poll function is called by kernel. So, there is a wrapper 877 * function in HDD, which in turn calls this function. 878 * Two main reasons why the whole thing is not implemented in HDD: 879 * a) references to things like ce_service that HDD is not aware of 880 * b) proximity to the implementation of ce_tasklet, which the body 881 * of this function should be very close to. 882 * 883 * NOTE TO THE MAINTAINER: 884 * Consider this function and ce_tasklet very tightly coupled pairs. 885 * Any changes to ce_tasklet or this function may likely need to be 886 * reflected in the counterpart. 887 * 888 * Returns: 889 * int: the amount of work done in this poll (<= budget) 890 */ 891 int hif_napi_poll(struct hif_opaque_softc *hif_ctx, 892 struct napi_struct *napi, 893 int budget) 894 { 895 int rc = 0; /* default: no work done, also takes care of error */ 896 int normalized = 0; 897 int bucket; 898 int cpu = smp_processor_id(); 899 bool poll_on_right_cpu; 900 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 901 struct qca_napi_info *napi_info; 902 struct CE_state *ce_state = NULL; 903 904 if (unlikely(NULL == hif)) { 905 HIF_ERROR("%s: hif context is NULL", __func__); 906 QDF_ASSERT(0); 907 goto out; 908 } 909 910 napi_info = (struct qca_napi_info *) 911 container_of(napi, struct qca_napi_info, napi); 912 913 NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)", 914 __func__, napi_info->id, napi_info->irq, budget); 915 916 napi_info->stats[cpu].napi_polls++; 917 918 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 919 NAPI_POLL_ENTER, NULL, NULL, cpu, 0); 920 921 rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id)); 922 NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs", 923 __func__, rc); 924 925 hif_napi_offld_flush_cb(napi_info); 926 927 /* do not return 0, if there was some work done, 928 * even if it is below the scale 929 */ 930 if (rc) { 931 napi_info->stats[cpu].napi_workdone += rc; 932 normalized = (rc / napi_info->scale); 933 if (normalized == 0) 934 normalized++; 935 bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS); 936 if (bucket >= QCA_NAPI_NUM_BUCKETS) { 937 bucket = QCA_NAPI_NUM_BUCKETS - 1; 938 HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)", 939 bucket, QCA_NAPI_NUM_BUCKETS); 940 } 941 napi_info->stats[cpu].napi_budget_uses[bucket]++; 942 } else { 943 /* if ce_per engine reports 0, then poll should be terminated */ 944 NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI", 945 __func__, __LINE__); 946 } 947 948 ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)]; 949 950 /* 951 * Not using the API hif_napi_correct_cpu directly in the if statement 952 * below since the API may not get evaluated if put at the end if any 953 * prior condition would evaluate to be true. The CPU correction 954 * check should kick in every poll. 955 */ 956 #ifdef NAPI_YIELD_BUDGET_BASED 957 if (ce_state && (ce_state->force_break || 0 == rc)) { 958 #else 959 poll_on_right_cpu = hif_napi_correct_cpu(napi_info); 960 if ((ce_state) && 961 (!ce_check_rx_pending(ce_state) || (0 == rc) || 962 !poll_on_right_cpu)) { 963 #endif 964 napi_info->stats[cpu].napi_completes++; 965 #ifdef NAPI_YIELD_BUDGET_BASED 966 ce_state->force_break = 0; 967 #endif 968 969 hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE, 970 NULL, NULL, 0, 0); 971 if (normalized >= budget) 972 normalized = budget - 1; 973 974 napi_complete(napi); 975 /* enable interrupts */ 976 hif_napi_enable_irq(hif_ctx, napi_info->id); 977 /* support suspend/resume */ 978 qdf_atomic_dec(&(hif->active_tasklet_cnt)); 979 980 NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts", 981 __func__, __LINE__); 982 } else { 983 /* 4.4 kernel NAPI implementation requires drivers to 984 * return full work when they ask to be re-scheduled, 985 * or napi_complete and re-start with a fresh interrupt 986 */ 987 normalized = budget; 988 } 989 990 hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id), 991 NAPI_POLL_EXIT, NULL, NULL, normalized, 0); 992 993 NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized); 994 return normalized; 995 out: 996 return rc; 997 } 998 qdf_export_symbol(hif_napi_poll); 999 1000 void hif_update_napi_max_poll_time(struct CE_state *ce_state, 1001 int ce_id, 1002 int cpu_id) 1003 { 1004 struct hif_softc *hif; 1005 struct qca_napi_info *napi_info; 1006 unsigned long long napi_poll_time = sched_clock() - 1007 ce_state->ce_service_start_time; 1008 1009 hif = ce_state->scn; 1010 napi_info = hif->napi_data.napis[ce_id]; 1011 if (napi_poll_time > 1012 napi_info->stats[cpu_id].napi_max_poll_time) 1013 napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time; 1014 } 1015 1016 #ifdef HIF_IRQ_AFFINITY 1017 /** 1018 * 1019 * hif_napi_update_yield_stats() - update NAPI yield related stats 1020 * @cpu_id: CPU ID for which stats needs to be updates 1021 * @ce_id: Copy Engine ID for which yield stats needs to be updates 1022 * @time_limit_reached: indicates whether the time limit was reached 1023 * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached 1024 * 1025 * Return: None 1026 */ 1027 void hif_napi_update_yield_stats(struct CE_state *ce_state, 1028 bool time_limit_reached, 1029 bool rxpkt_thresh_reached) 1030 { 1031 struct hif_softc *hif; 1032 struct qca_napi_data *napi_data = NULL; 1033 int ce_id = 0; 1034 int cpu_id = 0; 1035 1036 if (unlikely(NULL == ce_state)) { 1037 QDF_ASSERT(NULL != ce_state); 1038 return; 1039 } 1040 1041 hif = ce_state->scn; 1042 1043 if (unlikely(NULL == hif)) { 1044 QDF_ASSERT(NULL != hif); 1045 return; 1046 } 1047 napi_data = &(hif->napi_data); 1048 if (unlikely(NULL == napi_data)) { 1049 QDF_ASSERT(NULL != napi_data); 1050 return; 1051 } 1052 1053 ce_id = ce_state->id; 1054 cpu_id = qdf_get_cpu(); 1055 1056 if (unlikely(!napi_data->napis[ce_id])) { 1057 HIF_INFO("%s: NAPI info is NULL for ce id: %d", 1058 __func__, ce_id); 1059 return; 1060 } 1061 1062 if (time_limit_reached) 1063 napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++; 1064 else 1065 napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++; 1066 1067 hif_update_napi_max_poll_time(ce_state, ce_id, 1068 cpu_id); 1069 } 1070 1071 /** 1072 * 1073 * hif_napi_stats() - display NAPI CPU statistics 1074 * @napid: pointer to qca_napi_data 1075 * 1076 * Description: 1077 * Prints the various CPU cores on which the NAPI instances /CEs interrupts 1078 * are being executed. Can be called from outside NAPI layer. 1079 * 1080 * Return: None 1081 */ 1082 void hif_napi_stats(struct qca_napi_data *napid) 1083 { 1084 int i; 1085 struct qca_napi_cpu *cpu; 1086 1087 if (napid == NULL) { 1088 qdf_debug("%s: napiid struct is null", __func__); 1089 return; 1090 } 1091 1092 cpu = napid->napi_cpu; 1093 qdf_debug("NAPI CPU TABLE"); 1094 qdf_debug("lilclhead=%d, bigclhead=%d", 1095 napid->lilcl_head, napid->bigcl_head); 1096 for (i = 0; i < NR_CPUS; i++) { 1097 qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d", 1098 i, 1099 cpu[i].state, cpu[i].core_id, cpu[i].cluster_id, 1100 cpu[i].core_mask.bits[0], 1101 cpu[i].thread_mask.bits[0], 1102 cpu[i].max_freq, cpu[i].napis, 1103 cpu[i].cluster_nxt); 1104 } 1105 } 1106 1107 #ifdef FEATURE_NAPI_DEBUG 1108 /* 1109 * Local functions 1110 * - no argument checks, all internal/trusted callers 1111 */ 1112 static void hnc_dump_cpus(struct qca_napi_data *napid) 1113 { 1114 hif_napi_stats(napid); 1115 } 1116 #else 1117 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; 1118 #endif /* FEATURE_NAPI_DEBUG */ 1119 /** 1120 * hnc_link_clusters() - partitions to cpu table into clusters 1121 * @napid: pointer to NAPI data 1122 * 1123 * Takes in a CPU topology table and builds two linked lists 1124 * (big cluster cores, list-head at bigcl_head, and little cluster 1125 * cores, list-head at lilcl_head) out of it. 1126 * 1127 * If there are more than two clusters: 1128 * - bigcl_head and lilcl_head will be different, 1129 * - the cluster with highest cpufreq will be considered the "big" cluster. 1130 * If there are more than one with the highest frequency, the *last* of such 1131 * clusters will be designated as the "big cluster" 1132 * - the cluster with lowest cpufreq will be considered the "li'l" cluster. 1133 * If there are more than one clusters with the lowest cpu freq, the *first* 1134 * of such clusters will be designated as the "little cluster" 1135 * - We only support up to 32 clusters 1136 * Return: 0 : OK 1137 * !0: error (at least one of lil/big clusters could not be found) 1138 */ 1139 #define HNC_MIN_CLUSTER 0 1140 #define HNC_MAX_CLUSTER 1 1141 static int hnc_link_clusters(struct qca_napi_data *napid) 1142 { 1143 int rc = 0; 1144 1145 int i; 1146 int it = 0; 1147 uint32_t cl_done = 0x0; 1148 int cl, curcl, curclhead = 0; 1149 int more; 1150 unsigned int lilfrq = INT_MAX; 1151 unsigned int bigfrq = 0; 1152 unsigned int clfrq = 0; 1153 int prev = 0; 1154 struct qca_napi_cpu *cpus = napid->napi_cpu; 1155 1156 napid->lilcl_head = napid->bigcl_head = -1; 1157 1158 do { 1159 more = 0; 1160 it++; curcl = -1; 1161 for (i = 0; i < NR_CPUS; i++) { 1162 cl = cpus[i].cluster_id; 1163 NAPI_DEBUG("Processing cpu[%d], cluster=%d\n", 1164 i, cl); 1165 if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) { 1166 NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl); 1167 /* continue if ASSERTs are disabled */ 1168 continue; 1169 }; 1170 if (cpumask_weight(&(cpus[i].core_mask)) == 0) { 1171 NAPI_DEBUG("Core mask 0. SKIPPED\n"); 1172 continue; 1173 } 1174 if (cl_done & (0x01 << cl)) { 1175 NAPI_DEBUG("Cluster already processed. SKIPPED\n"); 1176 continue; 1177 } else { 1178 if (more == 0) { 1179 more = 1; 1180 curcl = cl; 1181 curclhead = i; /* row */ 1182 clfrq = cpus[i].max_freq; 1183 prev = -1; 1184 }; 1185 if ((curcl >= 0) && (curcl != cl)) { 1186 NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n", 1187 cl, curcl); 1188 continue; 1189 } 1190 if (cpus[i].max_freq != clfrq) 1191 NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n", 1192 cpus[i].max_freq, clfrq); 1193 if (clfrq >= bigfrq) { 1194 bigfrq = clfrq; 1195 napid->bigcl_head = curclhead; 1196 NAPI_DEBUG("bigcl=%d\n", curclhead); 1197 } 1198 if (clfrq < lilfrq) { 1199 lilfrq = clfrq; 1200 napid->lilcl_head = curclhead; 1201 NAPI_DEBUG("lilcl=%d\n", curclhead); 1202 } 1203 if (prev != -1) 1204 cpus[prev].cluster_nxt = i; 1205 1206 prev = i; 1207 } 1208 } 1209 if (curcl >= 0) 1210 cl_done |= (0x01 << curcl); 1211 1212 } while (more); 1213 1214 if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0))) 1215 rc = -EFAULT; 1216 1217 hnc_dump_cpus(napid); /* if NAPI_DEBUG */ 1218 return rc; 1219 } 1220 #undef HNC_MIN_CLUSTER 1221 #undef HNC_MAX_CLUSTER 1222 1223 /* 1224 * hotplug function group 1225 */ 1226 1227 /** 1228 * hnc_cpu_online_cb() - handles CPU hotplug "up" events 1229 * @context: the associated HIF context 1230 * @cpu: the CPU Id of the CPU the event happened on 1231 * 1232 * Return: None 1233 */ 1234 static void hnc_cpu_online_cb(void *context, uint32_t cpu) 1235 { 1236 struct hif_softc *hif = context; 1237 struct qca_napi_data *napid = &hif->napi_data; 1238 1239 if (cpu >= NR_CPUS) 1240 return; 1241 1242 NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu); 1243 1244 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP; 1245 NAPI_DEBUG("%s: CPU %u marked %d", 1246 __func__, cpu, napid->napi_cpu[cpu].state); 1247 1248 NAPI_DEBUG("<--%s", __func__); 1249 } 1250 1251 /** 1252 * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events 1253 * @context: the associated HIF context 1254 * @cpu: the CPU Id of the CPU the event happened on 1255 * 1256 * On transtion to offline, we act on PREP events, because we may need to move 1257 * the irqs/NAPIs to another CPU before it is actually off-lined. 1258 * 1259 * Return: None 1260 */ 1261 static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu) 1262 { 1263 struct hif_softc *hif = context; 1264 struct qca_napi_data *napid = &hif->napi_data; 1265 1266 if (cpu >= NR_CPUS) 1267 return; 1268 1269 NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu); 1270 1271 napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN; 1272 1273 NAPI_DEBUG("%s: CPU %u marked %d; updating affinity", 1274 __func__, cpu, napid->napi_cpu[cpu].state); 1275 1276 /** 1277 * we need to move any NAPIs on this CPU out. 1278 * if we are in LO throughput mode, then this is valid 1279 * if the CPU is the the low designated CPU. 1280 */ 1281 hif_napi_event(GET_HIF_OPAQUE_HDL(hif), 1282 NAPI_EVT_CPU_STATE, 1283 (void *) 1284 ((size_t)cpu << 16 | napid->napi_cpu[cpu].state)); 1285 1286 NAPI_DEBUG("<--%s", __func__); 1287 } 1288 1289 static int hnc_hotplug_register(struct hif_softc *hif_sc) 1290 { 1291 QDF_STATUS status; 1292 1293 NAPI_DEBUG("-->%s", __func__); 1294 1295 status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler, 1296 hif_sc, 1297 hnc_cpu_online_cb, 1298 hnc_cpu_before_offline_cb); 1299 1300 NAPI_DEBUG("<--%s [%d]", __func__, status); 1301 1302 return qdf_status_to_os_return(status); 1303 } 1304 1305 static void hnc_hotplug_unregister(struct hif_softc *hif_sc) 1306 { 1307 NAPI_DEBUG("-->%s", __func__); 1308 1309 if (hif_sc->napi_data.cpuhp_handler) 1310 qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler); 1311 1312 NAPI_DEBUG("<--%s", __func__); 1313 } 1314 1315 /** 1316 * hnc_install_tput() - installs a callback in the throughput detector 1317 * @register: !0 => register; =0: unregister 1318 * 1319 * installs a callback to be called when wifi driver throughput (tx+rx) 1320 * crosses a threshold. Currently, we are using the same criteria as 1321 * TCP ack suppression (500 packets/100ms by default). 1322 * 1323 * Return: 0 : success 1324 * <0: failure 1325 */ 1326 1327 static int hnc_tput_hook(int install) 1328 { 1329 int rc = 0; 1330 1331 /* 1332 * Nothing, until the bw_calculation accepts registration 1333 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk 1334 * hdd_napi_throughput_policy(...) 1335 */ 1336 return rc; 1337 } 1338 1339 /* 1340 * Implementation of hif_napi_cpu API 1341 */ 1342 1343 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) 1344 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1345 { 1346 cpumask_copy(&(cpus[i].thread_mask), 1347 topology_sibling_cpumask(i)); 1348 } 1349 #else 1350 static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i) 1351 { 1352 } 1353 #endif 1354 1355 1356 /** 1357 * hif_napi_cpu_init() - initialization of irq affinity block 1358 * @ctx: pointer to qca_napi_data 1359 * 1360 * called by hif_napi_create, after the first instance is called 1361 * - builds napi_rss_cpus table from cpu topology 1362 * - links cores of the same clusters together 1363 * - installs hot-plug notifier 1364 * - installs throughput trigger notifier (when such mechanism exists) 1365 * 1366 * Return: 0: OK 1367 * <0: error code 1368 */ 1369 int hif_napi_cpu_init(struct hif_opaque_softc *hif) 1370 { 1371 int rc = 0; 1372 int i; 1373 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1374 struct qca_napi_cpu *cpus = napid->napi_cpu; 1375 1376 NAPI_DEBUG("--> "); 1377 1378 if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) { 1379 NAPI_DEBUG("NAPI RSS table already initialized.\n"); 1380 rc = -EALREADY; 1381 goto lab_rss_init; 1382 } 1383 1384 /* build CPU topology table */ 1385 for_each_possible_cpu(i) { 1386 cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask) 1387 ? QCA_NAPI_CPU_UP 1388 : QCA_NAPI_CPU_DOWN)); 1389 cpus[i].core_id = topology_core_id(i); 1390 cpus[i].cluster_id = topology_physical_package_id(i); 1391 cpumask_copy(&(cpus[i].core_mask), 1392 topology_core_cpumask(i)); 1393 record_sibling_cpumask(cpus, i); 1394 cpus[i].max_freq = cpufreq_quick_get_max(i); 1395 cpus[i].napis = 0x0; 1396 cpus[i].cluster_nxt = -1; /* invalid */ 1397 } 1398 1399 /* link clusters together */ 1400 rc = hnc_link_clusters(napid); 1401 if (0 != rc) 1402 goto lab_err_topology; 1403 1404 /* install hotplug notifier */ 1405 rc = hnc_hotplug_register(HIF_GET_SOFTC(hif)); 1406 if (0 != rc) 1407 goto lab_err_hotplug; 1408 1409 /* install throughput notifier */ 1410 rc = hnc_tput_hook(1); 1411 if (0 == rc) 1412 goto lab_rss_init; 1413 1414 lab_err_hotplug: 1415 hnc_tput_hook(0); 1416 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1417 lab_err_topology: 1418 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1419 lab_rss_init: 1420 NAPI_DEBUG("<-- [rc=%d]", rc); 1421 return rc; 1422 } 1423 1424 /** 1425 * hif_napi_cpu_deinit() - clean-up of irq affinity block 1426 * 1427 * called by hif_napi_destroy, when the last instance is removed 1428 * - uninstalls throughput and hotplug notifiers 1429 * - clears cpu topology table 1430 * Return: 0: OK 1431 */ 1432 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif) 1433 { 1434 int rc = 0; 1435 struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data; 1436 1437 NAPI_DEBUG("-->%s(...)", __func__); 1438 1439 /* uninstall tput notifier */ 1440 rc = hnc_tput_hook(0); 1441 1442 /* uninstall hotplug notifier */ 1443 hnc_hotplug_unregister(HIF_GET_SOFTC(hif)); 1444 1445 /* clear the topology table */ 1446 memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS); 1447 1448 NAPI_DEBUG("<--%s[rc=%d]", __func__, rc); 1449 1450 return rc; 1451 } 1452 1453 /** 1454 * hncm_migrate_to() - migrates a NAPI to a CPU 1455 * @napid: pointer to NAPI block 1456 * @ce_id: CE_id of the NAPI instance 1457 * @didx : index in the CPU topology table for the CPU to migrate to 1458 * 1459 * Migrates NAPI (identified by the CE_id) to the destination core 1460 * Updates the napi_map of the destination entry 1461 * 1462 * Return: 1463 * =0 : success 1464 * <0 : error 1465 */ 1466 static int hncm_migrate_to(struct qca_napi_data *napid, 1467 int napi_ce, 1468 int didx) 1469 { 1470 int rc = 0; 1471 cpumask_t cpumask; 1472 QDF_STATUS status; 1473 1474 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx); 1475 1476 cpumask.bits[0] = (1 << didx); 1477 if (!napid->napis[napi_ce]) 1478 return -EINVAL; 1479 1480 irq_modify_status(napid->napis[napi_ce]->irq, IRQ_NO_BALANCING, 0); 1481 status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq, 1482 (struct qdf_cpu_mask *)&cpumask); 1483 rc = qdf_status_to_os_return(status); 1484 1485 /* unmark the napis bitmap in the cpu table */ 1486 napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce); 1487 /* mark the napis bitmap for the new designated cpu */ 1488 napid->napi_cpu[didx].napis |= (0x01 << napi_ce); 1489 napid->napis[napi_ce]->cpu = didx; 1490 1491 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1492 return rc; 1493 } 1494 /** 1495 * hncm_dest_cpu() - finds a destination CPU for NAPI 1496 * @napid: pointer to NAPI block 1497 * @act : RELOCATE | COLLAPSE | DISPERSE 1498 * 1499 * Finds the designated destionation for the next IRQ. 1500 * RELOCATE: translated to either COLLAPSE or DISPERSE based 1501 * on napid->napi_mode (throughput state) 1502 * COLLAPSE: All have the same destination: the first online CPU in lilcl 1503 * DISPERSE: One of the CPU in bigcl, which has the smallest number of 1504 * NAPIs on it 1505 * 1506 * Return: >=0 : index in the cpu topology table 1507 * : < 0 : error 1508 */ 1509 static int hncm_dest_cpu(struct qca_napi_data *napid, int act) 1510 { 1511 int destidx = -1; 1512 int head, i; 1513 1514 NAPI_DEBUG("-->%s(act=%d)", __func__, act); 1515 if (act == HNC_ACT_RELOCATE) { 1516 if (napid->napi_mode == QCA_NAPI_TPUT_LO) 1517 act = HNC_ACT_COLLAPSE; 1518 else 1519 act = HNC_ACT_DISPERSE; 1520 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", 1521 __func__, act); 1522 } 1523 if (act == HNC_ACT_COLLAPSE) { 1524 head = i = napid->lilcl_head; 1525 retry_collapse: 1526 while (i >= 0) { 1527 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { 1528 destidx = i; 1529 break; 1530 } 1531 i = napid->napi_cpu[i].cluster_nxt; 1532 } 1533 if ((destidx < 0) && (head == napid->lilcl_head)) { 1534 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", 1535 __func__); 1536 head = i = napid->bigcl_head; 1537 goto retry_collapse; 1538 } 1539 } else { /* HNC_ACT_DISPERSE */ 1540 int smallest = 99; /* all 32 bits full */ 1541 int smallidx = -1; 1542 1543 head = i = napid->bigcl_head; 1544 retry_disperse: 1545 while (i >= 0) { 1546 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && 1547 (hweight32(napid->napi_cpu[i].napis) <= smallest)) { 1548 smallest = napid->napi_cpu[i].napis; 1549 smallidx = i; 1550 } 1551 i = napid->napi_cpu[i].cluster_nxt; 1552 } 1553 /* Check if matches with user sepecified CPU mask */ 1554 smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ? 1555 smallidx : -1; 1556 1557 if ((smallidx < 0) && (head == napid->bigcl_head)) { 1558 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", 1559 __func__); 1560 head = i = napid->lilcl_head; 1561 goto retry_disperse; 1562 } 1563 destidx = smallidx; 1564 } 1565 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); 1566 return destidx; 1567 } 1568 /** 1569 * hif_napi_cpu_migrate() - migrate IRQs away 1570 * @cpu: -1: all CPUs <n> specific CPU 1571 * @act: COLLAPSE | DISPERSE 1572 * 1573 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible 1574 * cores. Eligible cores are: 1575 * act=COLLAPSE -> the first online core of the little cluster 1576 * act=DISPERSE -> separate cores of the big cluster, so that each core will 1577 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) 1578 * 1579 * Note that this function is called with a spinlock acquired already. 1580 * 1581 * Return: =0: success 1582 * <0: error 1583 */ 1584 1585 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) 1586 { 1587 int rc = 0; 1588 struct qca_napi_cpu *cpup; 1589 int i, dind; 1590 uint32_t napis; 1591 1592 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", 1593 __func__, cpu, action); 1594 /* the following is really: hif_napi_enabled() with less overhead */ 1595 if (napid->ce_map == 0) { 1596 NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__); 1597 goto hncm_return; 1598 } 1599 1600 cpup = napid->napi_cpu; 1601 1602 switch (action) { 1603 case HNC_ACT_RELOCATE: 1604 case HNC_ACT_DISPERSE: 1605 case HNC_ACT_COLLAPSE: { 1606 /* first find the src napi set */ 1607 if (cpu == HNC_ANY_CPU) 1608 napis = napid->ce_map; 1609 else 1610 napis = cpup[cpu].napis; 1611 /* then clear the napi bitmap on each CPU */ 1612 for (i = 0; i < NR_CPUS; i++) 1613 cpup[i].napis = 0; 1614 /* then for each of the NAPIs to disperse: */ 1615 for (i = 0; i < CE_COUNT_MAX; i++) 1616 if (napis & (1 << i)) { 1617 /* find a destination CPU */ 1618 dind = hncm_dest_cpu(napid, action); 1619 if (dind >= 0) { 1620 NAPI_DEBUG("Migrating NAPI ce%d to %d", 1621 i, dind); 1622 rc = hncm_migrate_to(napid, i, dind); 1623 } else { 1624 NAPI_DEBUG("No dest for NAPI ce%d", i); 1625 hnc_dump_cpus(napid); 1626 rc = -1; 1627 } 1628 } 1629 break; 1630 } 1631 default: { 1632 NAPI_DEBUG("%s: bad action: %d\n", __func__, action); 1633 QDF_BUG(0); 1634 break; 1635 } 1636 } /* switch action */ 1637 1638 hncm_return: 1639 hnc_dump_cpus(napid); 1640 return rc; 1641 } 1642 1643 1644 /** 1645 * hif_napi_bl_irq() - calls irq_modify_status to enable/disable blacklisting 1646 * @napid: pointer to qca_napi_data structure 1647 * @bl_flag: blacklist flag to enable/disable blacklisting 1648 * 1649 * The function enables/disables blacklisting for all the copy engine 1650 * interrupts on which NAPI is enabled. 1651 * 1652 * Return: None 1653 */ 1654 static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag) 1655 { 1656 int i; 1657 struct qca_napi_info *napii; 1658 1659 for (i = 0; i < CE_COUNT_MAX; i++) { 1660 /* check if NAPI is enabled on the CE */ 1661 if (!(napid->ce_map & (0x01 << i))) 1662 continue; 1663 1664 /*double check that NAPI is allocated for the CE */ 1665 napii = napid->napis[i]; 1666 if (!(napii)) 1667 continue; 1668 1669 if (bl_flag == true) 1670 irq_modify_status(napii->irq, 1671 0, IRQ_NO_BALANCING); 1672 else 1673 irq_modify_status(napii->irq, 1674 IRQ_NO_BALANCING, 0); 1675 HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); 1676 } 1677 } 1678 1679 #ifdef CONFIG_SCHED_CORE_CTL 1680 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ 1681 static inline int hif_napi_core_ctl_set_boost(bool boost) 1682 { 1683 return core_ctl_set_boost(boost); 1684 } 1685 #else 1686 static inline int hif_napi_core_ctl_set_boost(bool boost) 1687 { 1688 return 0; 1689 } 1690 #endif 1691 /** 1692 * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. 1693 * @napid: pointer to qca_napi_data structure 1694 * @op: blacklist operation to perform 1695 * 1696 * The function enables/disables/queries blacklisting for all CE RX 1697 * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables 1698 * core_ctl_set_boost. 1699 * Once blacklisting is enabled, the interrupts will not be managed by the IRQ 1700 * balancer. 1701 * 1702 * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled 1703 * for BLACKLIST_QUERY op - blacklist refcount 1704 * for BLACKLIST_ON op - return value from core_ctl_set_boost API 1705 * for BLACKLIST_OFF op - return value from core_ctl_set_boost API 1706 */ 1707 int hif_napi_cpu_blacklist(struct qca_napi_data *napid, 1708 enum qca_blacklist_op op) 1709 { 1710 int rc = 0; 1711 static int ref_count; /* = 0 by the compiler */ 1712 uint8_t flags = napid->flags; 1713 bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; 1714 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; 1715 1716 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); 1717 1718 if (!(bl_en && ccb_en)) { 1719 rc = -EINVAL; 1720 goto out; 1721 } 1722 1723 switch (op) { 1724 case BLACKLIST_QUERY: 1725 rc = ref_count; 1726 break; 1727 case BLACKLIST_ON: 1728 ref_count++; 1729 rc = 0; 1730 if (ref_count == 1) { 1731 rc = hif_napi_core_ctl_set_boost(true); 1732 NAPI_DEBUG("boost_on() returns %d - refcnt=%d", 1733 rc, ref_count); 1734 hif_napi_bl_irq(napid, true); 1735 } 1736 break; 1737 case BLACKLIST_OFF: 1738 if (ref_count) { 1739 ref_count--; 1740 rc = 0; 1741 if (ref_count == 0) { 1742 rc = hif_napi_core_ctl_set_boost(false); 1743 NAPI_DEBUG("boost_off() returns %d - refcnt=%d", 1744 rc, ref_count); 1745 hif_napi_bl_irq(napid, false); 1746 } 1747 } 1748 break; 1749 default: 1750 NAPI_DEBUG("Invalid blacklist op: %d", op); 1751 rc = -EINVAL; 1752 } /* switch */ 1753 out: 1754 NAPI_DEBUG("<--%s[%d]", __func__, rc); 1755 return rc; 1756 } 1757 1758 /** 1759 * hif_napi_serialize() - [de-]serialize NAPI operations 1760 * @hif: context 1761 * @is_on: 1: serialize, 0: deserialize 1762 * 1763 * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the 1764 * following steps (see hif_napi_event for code): 1765 * - put irqs of all NAPI instances on the same CPU 1766 * - only for the first serialize call: blacklist 1767 * 1768 * hif_napi_serialize(hif, 0): 1769 * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec) 1770 * - at the end of the timer, check the current throughput state and 1771 * implement it. 1772 */ 1773 static unsigned long napi_serialize_reqs; 1774 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on) 1775 { 1776 int rc = -EINVAL; 1777 1778 if (hif != NULL) 1779 switch (is_on) { 1780 case 0: { /* de-serialize */ 1781 rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL, 1782 (void *) 0); 1783 napi_serialize_reqs = 0; 1784 break; 1785 } /* end de-serialize */ 1786 case 1: { /* serialize */ 1787 rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL, 1788 (void *)napi_serialize_reqs++); 1789 break; 1790 } /* end serialize */ 1791 default: 1792 break; /* no-op */ 1793 } /* switch */ 1794 return rc; 1795 } 1796 1797 #endif /* ifdef HIF_IRQ_AFFINITY */ 1798