Lines Matching +full:odd +full:- +full:numbered
1 // SPDX-License-Identifier: GPL-2.0-only
11 * See Documentation/admin-guide/perf/qcom_l3_pmu.rst for more details.
13 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
132 * - config: bits 0-7: event type
138 return (event->attr.config) & L3_EVTYPE_MASK; in get_event_type()
143 return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT)); in event_uses_long_counter()
186 * counters. The PMU only supports chaining of adjacent even/odd pairs
187 * and for simplicity the driver always configures the odd counter to
188 * count the overflows of the lower-numbered even counter. Note that since
195 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__64bit_counter_start()
196 int idx = event->hw.idx; in qcom_l3_cache__64bit_counter_start()
200 /* Set the odd counter to count the overflows of the even counter */ in qcom_l3_cache__64bit_counter_start()
201 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_start()
203 writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_start()
206 local64_set(&event->hw.prev_count, 0); in qcom_l3_cache__64bit_counter_start()
207 writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); in qcom_l3_cache__64bit_counter_start()
208 writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__64bit_counter_start()
214 writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1)); in qcom_l3_cache__64bit_counter_start()
215 writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); in qcom_l3_cache__64bit_counter_start()
218 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1)); in qcom_l3_cache__64bit_counter_start()
219 writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET); in qcom_l3_cache__64bit_counter_start()
220 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); in qcom_l3_cache__64bit_counter_start()
221 writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); in qcom_l3_cache__64bit_counter_start()
227 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__64bit_counter_stop()
228 int idx = event->hw.idx; in qcom_l3_cache__64bit_counter_stop()
229 u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_stop()
232 writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__64bit_counter_stop()
233 writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__64bit_counter_stop()
236 writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__64bit_counter_stop()
241 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__64bit_counter_update()
242 int idx = event->hw.idx; in qcom_l3_cache__64bit_counter_update()
247 prev = local64_read(&event->hw.prev_count); in qcom_l3_cache__64bit_counter_update()
249 hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); in qcom_l3_cache__64bit_counter_update()
250 lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__64bit_counter_update()
251 } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1))); in qcom_l3_cache__64bit_counter_update()
253 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in qcom_l3_cache__64bit_counter_update()
255 local64_add(new - prev, &event->count); in qcom_l3_cache__64bit_counter_update()
269 * the counter. This feature allows the counters to be left free-running
276 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__32bit_counter_start()
277 int idx = event->hw.idx; in qcom_l3_cache__32bit_counter_start()
279 u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_start()
282 writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_start()
285 local64_set(&event->hw.prev_count, 0); in qcom_l3_cache__32bit_counter_start()
286 writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__32bit_counter_start()
289 writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); in qcom_l3_cache__32bit_counter_start()
292 writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET); in qcom_l3_cache__32bit_counter_start()
295 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); in qcom_l3_cache__32bit_counter_start()
296 writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); in qcom_l3_cache__32bit_counter_start()
302 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__32bit_counter_stop()
303 int idx = event->hw.idx; in qcom_l3_cache__32bit_counter_stop()
304 u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_stop()
307 writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__32bit_counter_stop()
310 writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR); in qcom_l3_cache__32bit_counter_stop()
313 writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__32bit_counter_stop()
318 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__32bit_counter_update()
319 int idx = event->hw.idx; in qcom_l3_cache__32bit_counter_update()
323 prev = local64_read(&event->hw.prev_count); in qcom_l3_cache__32bit_counter_update()
324 new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); in qcom_l3_cache__32bit_counter_update()
325 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); in qcom_l3_cache__32bit_counter_update()
327 local64_add(new - prev, &event->count); in qcom_l3_cache__32bit_counter_update()
354 writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__init()
360 writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR); in qcom_l3_cache__init()
362 writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR); in qcom_l3_cache__init()
363 writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR); in qcom_l3_cache__init()
364 writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR); in qcom_l3_cache__init()
365 writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG); in qcom_l3_cache__init()
366 writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL); in qcom_l3_cache__init()
367 writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR); in qcom_l3_cache__init()
370 writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i)); in qcom_l3_cache__init()
371 writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i)); in qcom_l3_cache__init()
374 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA); in qcom_l3_cache__init()
375 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM); in qcom_l3_cache__init()
376 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB); in qcom_l3_cache__init()
377 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM); in qcom_l3_cache__init()
378 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC); in qcom_l3_cache__init()
379 writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM); in qcom_l3_cache__init()
385 writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__init()
392 long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR); in qcom_l3_cache__handle_irq()
399 writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR); in qcom_l3_cache__handle_irq()
405 event = l3pmu->events[idx]; in qcom_l3_cache__handle_irq()
416 ops->update(event); in qcom_l3_cache__handle_irq()
434 writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__pmu_enable()
441 writel_relaxed(0, l3pmu->regs + L3_M_BC_CR); in qcom_l3_cache__pmu_disable()
453 struct perf_event *leader = event->group_leader; in qcom_l3_cache__validate_event_group()
457 if (leader->pmu != event->pmu && !is_software_event(leader)) in qcom_l3_cache__validate_event_group()
466 if (sibling->pmu != event->pmu) in qcom_l3_cache__validate_event_group()
480 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__event_init()
481 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_init()
486 if (event->attr.type != event->pmu->type) in qcom_l3_cache__event_init()
487 return -ENOENT; in qcom_l3_cache__event_init()
490 * Sampling not supported since these events are not core-attributable. in qcom_l3_cache__event_init()
492 if (hwc->sample_period) in qcom_l3_cache__event_init()
493 return -EINVAL; in qcom_l3_cache__event_init()
497 * not attributable to any CPU and therefore cannot attribute per-task. in qcom_l3_cache__event_init()
499 if (event->cpu < 0) in qcom_l3_cache__event_init()
500 return -EINVAL; in qcom_l3_cache__event_init()
504 return -EINVAL; in qcom_l3_cache__event_init()
506 hwc->idx = -1; in qcom_l3_cache__event_init()
512 * but can lead to issues for off-core PMUs, like this one, where in qcom_l3_cache__event_init()
519 event->cpu = cpumask_first(&l3pmu->cpumask); in qcom_l3_cache__event_init()
526 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_start()
529 hwc->state = 0; in qcom_l3_cache__event_start()
530 ops->start(event); in qcom_l3_cache__event_start()
535 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_stop()
538 if (hwc->state & PERF_HES_STOPPED) in qcom_l3_cache__event_stop()
541 ops->stop(event, flags); in qcom_l3_cache__event_stop()
543 ops->update(event); in qcom_l3_cache__event_stop()
544 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in qcom_l3_cache__event_stop()
549 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__event_add()
550 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_add()
557 idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order); in qcom_l3_cache__event_add()
560 return -EAGAIN; in qcom_l3_cache__event_add()
562 hwc->idx = idx; in qcom_l3_cache__event_add()
563 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in qcom_l3_cache__event_add()
564 l3pmu->events[idx] = event; in qcom_l3_cache__event_add()
577 struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); in qcom_l3_cache__event_del()
578 struct hw_perf_event *hwc = &event->hw; in qcom_l3_cache__event_del()
583 l3pmu->events[hwc->idx] = NULL; in qcom_l3_cache__event_del()
584 bitmap_release_region(l3pmu->used_mask, hwc->idx, order); in qcom_l3_cache__event_del()
594 ops->update(event); in qcom_l3_cache__event_read()
601 * - formats, used by perf user space and other tools to configure events
602 * - events, used by perf user space and other tools to create events
604 * perf stat -a -e l3cache_0_0/event=read-miss/ ls
605 * perf stat -a -e l3cache_0_0/event=0x21/ ls
606 * - cpumask, used by perf user space and other tools to know on which CPUs
619 L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"),
637 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); in l3cache_pmu_event_show()
645 L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT),
646 L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS),
647 L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D),
648 L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D),
649 L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT),
650 L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS),
666 return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask); in cpumask_show()
699 if (cpumask_empty(&l3pmu->cpumask)) in qcom_l3_cache_pmu_online_cpu()
700 cpumask_set_cpu(cpu, &l3pmu->cpumask); in qcom_l3_cache_pmu_online_cpu()
710 if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask)) in qcom_l3_cache_pmu_offline_cpu()
715 perf_pmu_migrate_context(&l3pmu->pmu, cpu, target); in qcom_l3_cache_pmu_offline_cpu()
716 cpumask_set_cpu(target, &l3pmu->cpumask); in qcom_l3_cache_pmu_offline_cpu()
730 acpi_dev = ACPI_COMPANION(&pdev->dev); in qcom_l3_cache_pmu_probe()
732 return -ENODEV; in qcom_l3_cache_pmu_probe()
734 l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL); in qcom_l3_cache_pmu_probe()
735 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s", in qcom_l3_cache_pmu_probe()
739 return -ENOMEM; in qcom_l3_cache_pmu_probe()
741 l3pmu->pmu = (struct pmu) { in qcom_l3_cache_pmu_probe()
742 .parent = &pdev->dev, in qcom_l3_cache_pmu_probe()
758 l3pmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc); in qcom_l3_cache_pmu_probe()
759 if (IS_ERR(l3pmu->regs)) in qcom_l3_cache_pmu_probe()
760 return PTR_ERR(l3pmu->regs); in qcom_l3_cache_pmu_probe()
768 ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0, in qcom_l3_cache_pmu_probe()
771 dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n", in qcom_l3_cache_pmu_probe()
772 &memrc->start); in qcom_l3_cache_pmu_probe()
777 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node); in qcom_l3_cache_pmu_probe()
779 dev_err(&pdev->dev, "Error %d registering hotplug", ret); in qcom_l3_cache_pmu_probe()
783 ret = perf_pmu_register(&l3pmu->pmu, name, -1); in qcom_l3_cache_pmu_probe()
785 dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret); in qcom_l3_cache_pmu_probe()
789 dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type); in qcom_l3_cache_pmu_probe()
802 .name = "qcom-l3cache-pmu",