1 /* 2 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: hif_irq_afinity.c 21 * 22 * This irq afinity implementation is os dependent, so this can be treated as 23 * an abstraction layer... Should this be moved into a /linux folder? 24 */ 25 26 #include <linux/string.h> /* memset */ 27 28 /* Linux headers */ 29 #include <linux/cpumask.h> 30 #include <linux/cpufreq.h> 31 #include <linux/cpu.h> 32 #include <linux/topology.h> 33 #include <linux/interrupt.h> 34 #include <linux/irq.h> 35 #ifdef CONFIG_SCHED_CORE_CTL 36 #include <linux/sched/core_ctl.h> 37 #endif 38 #include <linux/pm.h> 39 #include <hif_napi.h> 40 #include <hif_irq_affinity.h> 41 #include <hif_exec.h> 42 #include <hif_main.h> 43 44 #if defined(FEATURE_NAPI_DEBUG) && defined(HIF_IRQ_AFFINITY) 45 /* 46 * Local functions 47 * - no argument checks, all internal/trusted callers 48 */ 49 static void hnc_dump_cpus(struct qca_napi_data *napid) 50 { 51 hif_napi_stats(napid); 52 } 53 #else 54 static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ }; 55 #endif /* FEATURE_NAPI_DEBUG */ 56 57 #ifdef HIF_IRQ_AFFINITY 58 /** 59 * 60 * hif_exec_event() - reacts to events that impact irq affinity 61 * @hif : pointer to hif context 62 * @evnt: event that has been detected 63 * @data: more data regarding the event 64 * 65 * Description: 66 * This function handles two types of events: 67 * 1- Events that change the state of NAPI (enabled/disabled): 68 * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE} 69 * The state is retrievable by "hdd_napi_enabled(-1)" 70 * - NAPI will be on if either INI file is on and it has not been disabled 71 * by a subsequent vendor CMD, 72 * or it has been enabled by a vendor CMD. 73 * 2- Events that change the CPU affinity of a NAPI instance/IRQ: 74 * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE} 75 * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode 76 * - NAPI will switch throughput mode based on hdd_napi_throughput_policy() 77 * - In LO tput mode, NAPI will yield control if its interrupts to the system 78 * management functions. However in HI throughput mode, NAPI will actively 79 * manage its interrupts/instances (by trying to disperse them out to 80 * separate performance cores). 81 * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events. 82 * 83 * + In some cases (roaming peer management is the only case so far), a 84 * a client can trigger a "SERIALIZE" event. Basically, this means that the 85 * users is asking NAPI to go into a truly single execution context state. 86 * So, NAPI indicates to msm-irqbalancer that it wants to be blacklisted, 87 * (if called for the first time) and then moves all IRQs (for NAPI 88 * instances) to be collapsed to a single core. If called multiple times, 89 * it will just re-collapse the CPUs. This is because blacklist-on() API 90 * is reference-counted, and because the API has already been called. 91 * 92 * Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go 93 * to its "normal" operation. Optionally, they can give a timeout value (in 94 * multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this 95 * case, NAPI will just set the current throughput state to uninitialized 96 * and set the delay period. Once policy handler is called, it would skip 97 * applying the policy delay period times, and otherwise apply the policy. 98 * 99 * Return: 100 * < 0: some error 101 * = 0: event handled successfully 102 */ 103 int hif_exec_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event, 104 void *data) 105 { 106 int rc = 0; 107 uint32_t prev_state; 108 struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx); 109 struct qca_napi_data *napid = &(hif->napi_data); 110 enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED; 111 enum { 112 BLACKLIST_NOT_PENDING, 113 BLACKLIST_ON_PENDING, 114 BLACKLIST_OFF_PENDING 115 } blacklist_pending = BLACKLIST_NOT_PENDING; 116 117 NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data); 118 119 qdf_spin_lock_bh(&(napid->lock)); 120 prev_state = napid->state; 121 switch (event) { 122 case NAPI_EVT_INI_FILE: 123 case NAPI_EVT_CMD_STATE: 124 case NAPI_EVT_INT_STATE: 125 /* deprecated */ 126 break; 127 128 case NAPI_EVT_CPU_STATE: { 129 int cpu = ((unsigned long int)data >> 16); 130 int val = ((unsigned long int)data & 0x0ff); 131 132 NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d", 133 __func__, cpu, val); 134 135 /* state has already been set by hnc_cpu_notify_cb */ 136 if ((val == QCA_NAPI_CPU_DOWN) && 137 (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */ 138 (napid->napi_cpu[cpu].napis != 0)) { 139 NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d", 140 __func__, cpu); 141 rc = hif_exec_cpu_migrate(napid, 142 cpu, 143 HNC_ACT_RELOCATE); 144 napid->napi_cpu[cpu].napis = 0; 145 } 146 /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */ 147 break; 148 } 149 150 case NAPI_EVT_TPUT_STATE: { 151 tput_mode = (enum qca_napi_tput_state)data; 152 if (tput_mode == QCA_NAPI_TPUT_LO) { 153 /* from TPUT_HI -> TPUT_LO */ 154 NAPI_DEBUG("%s: Moving to napi_tput_LO state", 155 __func__); 156 blacklist_pending = BLACKLIST_OFF_PENDING; 157 /* 158 * Ideally we should "collapse" interrupts here, since 159 * we are "dispersing" interrupts in the "else" case. 160 * This allows the possibility that our interrupts may 161 * still be on the perf cluster the next time we enter 162 * high tput mode. However, the irq_balancer is free 163 * to move our interrupts to power cluster once 164 * blacklisting has been turned off in the "else" case. 165 */ 166 } else { 167 /* from TPUT_LO -> TPUT->HI */ 168 NAPI_DEBUG("%s: Moving to napi_tput_HI state", 169 __func__); 170 rc = hif_exec_cpu_migrate(napid, 171 HNC_ANY_CPU, 172 HNC_ACT_DISPERSE); 173 174 blacklist_pending = BLACKLIST_ON_PENDING; 175 } 176 napid->napi_mode = tput_mode; 177 break; 178 } 179 180 case NAPI_EVT_USR_SERIAL: { 181 unsigned long users = (unsigned long)data; 182 183 NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld", 184 __func__, users); 185 186 rc = hif_exec_cpu_migrate(napid, 187 HNC_ANY_CPU, 188 HNC_ACT_COLLAPSE); 189 if ((users == 0) && (rc == 0)) 190 blacklist_pending = BLACKLIST_ON_PENDING; 191 break; 192 } 193 case NAPI_EVT_USR_NORMAL: { 194 NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__); 195 if (!napid->user_cpu_affin_mask) 196 blacklist_pending = BLACKLIST_OFF_PENDING; 197 /* 198 * Deserialization timeout is handled at hdd layer; 199 * just mark current mode to uninitialized to ensure 200 * it will be set when the delay is over 201 */ 202 napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED; 203 break; 204 } 205 default: { 206 HIF_ERROR("%s: unknown event: %d (data=0x%0lx)", 207 __func__, event, (unsigned long) data); 208 break; 209 } /* default */ 210 }; /* switch */ 211 212 213 switch (blacklist_pending) { 214 case BLACKLIST_ON_PENDING: 215 /* assume the control of WLAN IRQs */ 216 hif_napi_cpu_blacklist(napid, BLACKLIST_ON); 217 break; 218 case BLACKLIST_OFF_PENDING: 219 /* yield the control of WLAN IRQs */ 220 hif_napi_cpu_blacklist(napid, BLACKLIST_OFF); 221 break; 222 default: /* nothing to do */ 223 break; 224 } /* switch blacklist_pending */ 225 226 qdf_spin_unlock_bh(&(napid->lock)); 227 228 NAPI_DEBUG("<--[rc=%d]", rc); 229 return rc; 230 } 231 232 #endif 233 234 /** 235 * hncm_migrate_to() - migrates a NAPI to a CPU 236 * @napid: pointer to NAPI block 237 * @ce_id: CE_id of the NAPI instance 238 * @didx : index in the CPU topology table for the CPU to migrate to 239 * 240 * Migrates NAPI (identified by the CE_id) to the destination core 241 * Updates the napi_map of the destination entry 242 * 243 * Return: 244 * =0 : success 245 * <0 : error 246 */ 247 static int hncm_exec_migrate_to(struct qca_napi_data *napid, uint8_t ctx_id, 248 int didx) 249 { 250 struct hif_exec_context *exec_ctx; 251 int rc = 0; 252 int status = 0; 253 int ind; 254 255 NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, ctx_id, didx); 256 257 exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, ctx_id); 258 if (!exec_ctx) 259 return -EINVAL; 260 261 exec_ctx->cpumask.bits[0] = (1 << didx); 262 263 for (ind = 0; ind < exec_ctx->numirq; ind++) { 264 if (exec_ctx->os_irq[ind]) { 265 irq_modify_status(exec_ctx->os_irq[ind], 266 IRQ_NO_BALANCING, 0); 267 rc = irq_set_affinity_hint(exec_ctx->os_irq[ind], 268 &exec_ctx->cpumask); 269 if (rc) 270 status = rc; 271 } 272 } 273 274 /* unmark the napis bitmap in the cpu table */ 275 napid->napi_cpu[exec_ctx->cpu].napis &= ~(0x01 << ctx_id); 276 /* mark the napis bitmap for the new designated cpu */ 277 napid->napi_cpu[didx].napis |= (0x01 << ctx_id); 278 exec_ctx->cpu = didx; 279 280 NAPI_DEBUG("<--%s[%d]", __func__, rc); 281 return status; 282 } 283 284 /** 285 * hncm_dest_cpu() - finds a destination CPU for NAPI 286 * @napid: pointer to NAPI block 287 * @act : RELOCATE | COLLAPSE | DISPERSE 288 * 289 * Finds the designated destionation for the next IRQ. 290 * RELOCATE: translated to either COLLAPSE or DISPERSE based 291 * on napid->napi_mode (throughput state) 292 * COLLAPSE: All have the same destination: the first online CPU in lilcl 293 * DISPERSE: One of the CPU in bigcl, which has the smallest number of 294 * NAPIs on it 295 * 296 * Return: >=0 : index in the cpu topology table 297 * : < 0 : error 298 */ 299 static int hncm_dest_cpu(struct qca_napi_data *napid, int act) 300 { 301 int destidx = -1; 302 int head, i; 303 304 NAPI_DEBUG("-->%s(act=%d)", __func__, act); 305 if (act == HNC_ACT_RELOCATE) { 306 if (napid->napi_mode == QCA_NAPI_TPUT_LO) 307 act = HNC_ACT_COLLAPSE; 308 else 309 act = HNC_ACT_DISPERSE; 310 NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d", 311 __func__, act); 312 } 313 if (act == HNC_ACT_COLLAPSE) { 314 head = i = napid->lilcl_head; 315 retry_collapse: 316 while (i >= 0) { 317 if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) { 318 destidx = i; 319 break; 320 } 321 i = napid->napi_cpu[i].cluster_nxt; 322 } 323 if ((destidx < 0) && (head == napid->lilcl_head)) { 324 NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl", 325 __func__); 326 head = i = napid->bigcl_head; 327 goto retry_collapse; 328 } 329 } else { /* HNC_ACT_DISPERSE */ 330 int smallest = 99; /* all 32 bits full */ 331 int smallidx = -1; 332 333 head = i = napid->bigcl_head; 334 retry_disperse: 335 while (i >= 0) { 336 if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) && 337 (hweight32(napid->napi_cpu[i].napis) <= smallest)) { 338 smallest = napid->napi_cpu[i].napis; 339 smallidx = i; 340 } 341 i = napid->napi_cpu[i].cluster_nxt; 342 } 343 destidx = smallidx; 344 if ((destidx < 0) && (head == napid->bigcl_head)) { 345 NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl", 346 __func__); 347 head = i = napid->lilcl_head; 348 goto retry_disperse; 349 } 350 } 351 NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx); 352 return destidx; 353 } 354 /** 355 * hif_napi_cpu_migrate() - migrate IRQs away 356 * @cpu: -1: all CPUs <n> specific CPU 357 * @act: COLLAPSE | DISPERSE 358 * 359 * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible 360 * cores. Eligible cores are: 361 * act=COLLAPSE -> the first online core of the little cluster 362 * act=DISPERSE -> separate cores of the big cluster, so that each core will 363 * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis) 364 * 365 * Note that this function is called with a spinlock acquired already. 366 * 367 * Return: =0: success 368 * <0: error 369 */ 370 int hif_exec_cpu_migrate(struct qca_napi_data *napid, int cpu, int action) 371 { 372 int rc = 0; 373 struct qca_napi_cpu *cpup; 374 int i, dind; 375 uint32_t napis; 376 377 378 NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)", 379 __func__, cpu, action); 380 381 if (napid->exec_map == 0) { 382 NAPI_DEBUG("%s: datapath contexts to disperse", __func__); 383 goto hncm_return; 384 } 385 cpup = napid->napi_cpu; 386 387 switch (action) { 388 case HNC_ACT_RELOCATE: 389 case HNC_ACT_DISPERSE: 390 case HNC_ACT_COLLAPSE: { 391 /* first find the src napi set */ 392 if (cpu == HNC_ANY_CPU) 393 napis = napid->exec_map; 394 else 395 napis = cpup[cpu].napis; 396 /* then clear the napi bitmap on each CPU */ 397 for (i = 0; i < NR_CPUS; i++) 398 cpup[i].napis = 0; 399 /* then for each of the NAPIs to disperse: */ 400 for (i = 0; i < HIF_MAX_GROUP; i++) 401 if (napis & (1 << i)) { 402 /* find a destination CPU */ 403 dind = hncm_dest_cpu(napid, action); 404 if (dind >= 0) { 405 rc = hncm_exec_migrate_to(napid, i, 406 dind); 407 } else { 408 NAPI_DEBUG("No dest for NAPI ce%d", i); 409 hnc_dump_cpus(napid); 410 rc = -1; 411 } 412 } 413 break; 414 } 415 default: { 416 NAPI_DEBUG("%s: bad action: %d\n", __func__, action); 417 QDF_BUG(0); 418 break; 419 } 420 } /* switch action */ 421 422 hncm_return: 423 hnc_dump_cpus(napid); 424 return rc; 425 } 426 427 428 /** 429 * hif_exec_bl_irq() - calls irq_modify_status to enable/disable blacklisting 430 * @napid: pointer to qca_napi_data structure 431 * @bl_flag: blacklist flag to enable/disable blacklisting 432 * 433 * The function enables/disables blacklisting for all the copy engine 434 * interrupts on which NAPI is enabled. 435 * 436 * Return: None 437 */ 438 static inline void hif_exec_bl_irq(struct qca_napi_data *napid, bool bl_flag) 439 { 440 int i, j; 441 struct hif_exec_context *exec_ctx; 442 443 for (i = 0; i < HIF_MAX_GROUP; i++) { 444 /* check if NAPI is enabled on the CE */ 445 if (!(napid->exec_map & (0x01 << i))) 446 continue; 447 448 /*double check that NAPI is allocated for the CE */ 449 exec_ctx = hif_exec_get_ctx(&napid->hif_softc->osc, i); 450 if (!(exec_ctx)) 451 continue; 452 453 if (bl_flag == true) 454 for (j = 0; j < exec_ctx->numirq; j++) 455 irq_modify_status(exec_ctx->os_irq[j], 456 0, IRQ_NO_BALANCING); 457 else 458 for (j = 0; j < exec_ctx->numirq; j++) 459 irq_modify_status(exec_ctx->os_irq[j], 460 IRQ_NO_BALANCING, 0); 461 HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i); 462 } 463 } 464 465 #ifdef CONFIG_SCHED_CORE_CTL 466 /* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */ 467 static inline int hif_napi_core_ctl_set_boost(bool boost) 468 { 469 return core_ctl_set_boost(boost); 470 } 471 #else 472 static inline int hif_napi_core_ctl_set_boost(bool boost) 473 { 474 return 0; 475 } 476 #endif 477 478 /** 479 * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts. 480 * @napid: pointer to qca_napi_data structure 481 * @op: blacklist operation to perform 482 * 483 * The function enables/disables/queries blacklisting for all CE RX 484 * interrupts with NAPI enabled. Besides blacklisting, it also enables/disables 485 * core_ctl_set_boost. 486 * Once blacklisting is enabled, the interrupts will not be managed by the IRQ 487 * balancer. 488 * 489 * Return: -EINVAL, in case IRQ_BLACKLISTING and CORE_CTL_BOOST is not enabled 490 * for BLACKLIST_QUERY op - blacklist refcount 491 * for BLACKLIST_ON op - return value from core_ctl_set_boost API 492 * for BLACKLIST_OFF op - return value from core_ctl_set_boost API 493 */ 494 int hif_exec_cpu_blacklist(struct qca_napi_data *napid, 495 enum qca_blacklist_op op) 496 { 497 int rc = 0; 498 static int ref_count; /* = 0 by the compiler */ 499 uint8_t flags = napid->flags; 500 bool bl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING; 501 bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST; 502 503 NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op); 504 505 if (!(bl_en && ccb_en)) { 506 rc = -EINVAL; 507 goto out; 508 } 509 510 switch (op) { 511 case BLACKLIST_QUERY: 512 rc = ref_count; 513 break; 514 case BLACKLIST_ON: 515 ref_count++; 516 rc = 0; 517 if (ref_count == 1) { 518 rc = hif_napi_core_ctl_set_boost(true); 519 NAPI_DEBUG("boost_on() returns %d - refcnt=%d", 520 rc, ref_count); 521 hif_exec_bl_irq(napid, true); 522 } 523 break; 524 case BLACKLIST_OFF: 525 if (ref_count) 526 ref_count--; 527 rc = 0; 528 if (ref_count == 0) { 529 rc = hif_napi_core_ctl_set_boost(false); 530 NAPI_DEBUG("boost_off() returns %d - refcnt=%d", 531 rc, ref_count); 532 hif_exec_bl_irq(napid, false); 533 } 534 break; 535 default: 536 NAPI_DEBUG("Invalid blacklist op: %d", op); 537 rc = -EINVAL; 538 } /* switch */ 539 out: 540 NAPI_DEBUG("<--%s[%d]", __func__, rc); 541 return rc; 542 } 543 544