Lines Matching +full:ddr +full:- +full:pmu

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Author: Lin Huang <hl@rock-chips.com>
8 #include <linux/devfreq-event.h>
69 * struct dmc_count_channel - structure to hold counter values from the DDR controller
71 * @clock_cycles: DDR clock cycles
87 * The dfi controller can monitor DDR load. It has an upper and lower threshold
89 * generated to indicate the DDR frequency should be changed.
111 struct pmu pmu; member
123 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_enable()
126 mutex_lock(&dfi->mutex); in rockchip_dfi_enable()
128 dfi->usecount++; in rockchip_dfi_enable()
129 if (dfi->usecount > 1) in rockchip_dfi_enable()
132 ret = clk_prepare_enable(dfi->clk); in rockchip_dfi_enable()
134 dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret); in rockchip_dfi_enable()
138 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_enable()
141 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_enable()
147 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
149 /* set ddr type to dfi */ in rockchip_dfi_enable()
150 switch (dfi->ddr_type) { in rockchip_dfi_enable()
164 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
168 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
170 if (dfi->ddrmon_ctrl_single) in rockchip_dfi_enable()
174 mutex_unlock(&dfi->mutex); in rockchip_dfi_enable()
181 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_disable()
184 mutex_lock(&dfi->mutex); in rockchip_dfi_disable()
186 dfi->usecount--; in rockchip_dfi_disable()
188 WARN_ON_ONCE(dfi->usecount < 0); in rockchip_dfi_disable()
190 if (dfi->usecount > 0) in rockchip_dfi_disable()
193 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_disable()
194 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_disable()
198 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_disable()
200 if (dfi->ddrmon_ctrl_single) in rockchip_dfi_disable()
204 clk_disable_unprepare(dfi->clk); in rockchip_dfi_disable()
206 mutex_unlock(&dfi->mutex); in rockchip_dfi_disable()
212 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_read_counters()
214 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_read_counters()
215 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_read_counters()
217 res->c[i].read_access = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
218 DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
219 res->c[i].write_access = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
220 DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
221 res->c[i].access = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
222 DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
223 res->c[i].clock_cycles = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
224 DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
254 struct dmc_count *last = &dfi->last_event_count; in rockchip_dfi_get_event()
261 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_get_event()
264 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_get_event()
267 a = count.c[i].access - last->c[i].access; in rockchip_dfi_get_event()
268 c = count.c[i].clock_cycles - last->c[i].clock_cycles; in rockchip_dfi_get_event()
276 edata->load_count = access * 4; in rockchip_dfi_get_event()
277 edata->total_count = clock_cycles; in rockchip_dfi_get_event()
279 dfi->last_event_count = count; in rockchip_dfi_get_event()
297 const struct dmc_count *last = &dfi->last_perf_count; in rockchip_ddr_perf_counters_add()
300 for (i = 0; i < dfi->max_channels; i++) { in rockchip_ddr_perf_counters_add()
301 res->c[i].read_access = dfi->total_count.c[i].read_access + in rockchip_ddr_perf_counters_add()
302 (u32)(now->c[i].read_access - last->c[i].read_access); in rockchip_ddr_perf_counters_add()
303 res->c[i].write_access = dfi->total_count.c[i].write_access + in rockchip_ddr_perf_counters_add()
304 (u32)(now->c[i].write_access - last->c[i].write_access); in rockchip_ddr_perf_counters_add()
305 res->c[i].access = dfi->total_count.c[i].access + in rockchip_ddr_perf_counters_add()
306 (u32)(now->c[i].access - last->c[i].access); in rockchip_ddr_perf_counters_add()
307 res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles + in rockchip_ddr_perf_counters_add()
308 (u32)(now->c[i].clock_cycles - last->c[i].clock_cycles); in rockchip_ddr_perf_counters_add()
315 struct pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local
316 struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu); in ddr_perf_cpumask_show()
318 return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu)); in ddr_perf_cpumask_show()
338 PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
340 DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0));
341 DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0)…
343 DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1));
344 DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1)…
346 DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2));
347 DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2)…
349 DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3));
350 DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3)…
352 DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES));
353 DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES));
383 PMU_FORMAT_ATTR(event, "config:0-7");
404 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_init()
406 if (event->attr.type != event->pmu->type) in rockchip_ddr_perf_event_init()
407 return -ENOENT; in rockchip_ddr_perf_event_init()
409 if (event->attach_state & PERF_ATTACH_TASK) in rockchip_ddr_perf_event_init()
410 return -EINVAL; in rockchip_ddr_perf_event_init()
412 if (event->cpu < 0) { in rockchip_ddr_perf_event_init()
413 dev_warn(dfi->dev, "Can't provide per-task data!\n"); in rockchip_ddr_perf_event_init()
414 return -EINVAL; in rockchip_ddr_perf_event_init()
422 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_get_count()
423 int blen = dfi->burst_len; in rockchip_ddr_perf_event_get_count()
432 seq = read_seqbegin(&dfi->count_seqlock); in rockchip_ddr_perf_event_get_count()
434 } while (read_seqretry(&dfi->count_seqlock, seq)); in rockchip_ddr_perf_event_get_count()
436 switch (event->attr.config) { in rockchip_ddr_perf_event_get_count()
441 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
442 count += total.c[i].read_access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
445 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
446 count += total.c[i].write_access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
449 count = total.c[0].read_access * blen * dfi->buswidth[0]; in rockchip_ddr_perf_event_get_count()
452 count = total.c[0].write_access * blen * dfi->buswidth[0]; in rockchip_ddr_perf_event_get_count()
455 count = total.c[1].read_access * blen * dfi->buswidth[1]; in rockchip_ddr_perf_event_get_count()
458 count = total.c[1].write_access * blen * dfi->buswidth[1]; in rockchip_ddr_perf_event_get_count()
461 count = total.c[2].read_access * blen * dfi->buswidth[2]; in rockchip_ddr_perf_event_get_count()
464 count = total.c[2].write_access * blen * dfi->buswidth[2]; in rockchip_ddr_perf_event_get_count()
467 count = total.c[3].read_access * blen * dfi->buswidth[3]; in rockchip_ddr_perf_event_get_count()
470 count = total.c[3].write_access * blen * dfi->buswidth[3]; in rockchip_ddr_perf_event_get_count()
473 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
474 count += total.c[i].access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
486 if (event->attr.config >= PERF_ACCESS_TYPE_MAX) in rockchip_ddr_perf_event_update()
490 prev = local64_xchg(&event->hw.prev_count, now); in rockchip_ddr_perf_event_update()
491 local64_add(now - prev, &event->count); in rockchip_ddr_perf_event_update()
498 local64_set(&event->hw.prev_count, now); in rockchip_ddr_perf_event_start()
503 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_add()
505 dfi->active_events++; in rockchip_ddr_perf_event_add()
507 if (dfi->active_events == 1) { in rockchip_ddr_perf_event_add()
508 dfi->total_count = (struct dmc_count){}; in rockchip_ddr_perf_event_add()
509 rockchip_dfi_read_counters(dfi, &dfi->last_perf_count); in rockchip_ddr_perf_event_add()
510 hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL); in rockchip_ddr_perf_event_add()
526 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_del()
530 dfi->active_events--; in rockchip_ddr_perf_event_del()
532 if (dfi->active_events == 0) in rockchip_ddr_perf_event_del()
533 hrtimer_cancel(&dfi->timer); in rockchip_ddr_perf_event_del()
543 write_seqlock(&dfi->count_seqlock); in rockchip_dfi_timer()
546 dfi->total_count = total; in rockchip_dfi_timer()
547 dfi->last_perf_count = now; in rockchip_dfi_timer()
549 write_sequnlock(&dfi->count_seqlock); in rockchip_dfi_timer()
551 hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC)); in rockchip_dfi_timer()
561 if (cpu != dfi->cpu) in ddr_perf_offline_cpu()
568 perf_pmu_migrate_context(&dfi->pmu, cpu, target); in ddr_perf_offline_cpu()
569 dfi->cpu = target; in ddr_perf_offline_cpu()
578 cpuhp_remove_multi_state(dfi->cpuhp_state); in rockchip_ddr_cpuhp_remove_state()
587 cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node); in rockchip_ddr_cpuhp_remove_instance()
594 perf_pmu_unregister(&dfi->pmu); in rockchip_ddr_perf_remove()
599 struct pmu *pmu = &dfi->pmu; in rockchip_ddr_perf_init() local
602 seqlock_init(&dfi->count_seqlock); in rockchip_ddr_perf_init()
604 pmu->module = THIS_MODULE; in rockchip_ddr_perf_init()
605 pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE; in rockchip_ddr_perf_init()
606 pmu->task_ctx_nr = perf_invalid_context; in rockchip_ddr_perf_init()
607 pmu->attr_groups = attr_groups; in rockchip_ddr_perf_init()
608 pmu->event_init = rockchip_ddr_perf_event_init; in rockchip_ddr_perf_init()
609 pmu->add = rockchip_ddr_perf_event_add; in rockchip_ddr_perf_init()
610 pmu->del = rockchip_ddr_perf_event_del; in rockchip_ddr_perf_init()
611 pmu->start = rockchip_ddr_perf_event_start; in rockchip_ddr_perf_init()
612 pmu->stop = rockchip_ddr_perf_event_stop; in rockchip_ddr_perf_init()
613 pmu->read = rockchip_ddr_perf_event_update; in rockchip_ddr_perf_init()
615 dfi->cpu = raw_smp_processor_id(); in rockchip_ddr_perf_init()
623 dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret); in rockchip_ddr_perf_init()
627 dfi->cpuhp_state = ret; in rockchip_ddr_perf_init()
631 ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi); in rockchip_ddr_perf_init()
635 ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node); in rockchip_ddr_perf_init()
637 dev_err(dfi->dev, "Error %d registering hotplug\n", ret); in rockchip_ddr_perf_init()
641 ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi); in rockchip_ddr_perf_init()
645 hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in rockchip_ddr_perf_init()
646 dfi->timer.function = rockchip_dfi_timer; in rockchip_ddr_perf_init()
648 switch (dfi->ddr_type) { in rockchip_ddr_perf_init()
651 dfi->burst_len = 8; in rockchip_ddr_perf_init()
655 dfi->burst_len = 16; in rockchip_ddr_perf_init()
659 ret = perf_pmu_register(pmu, "rockchip_ddr", -1); in rockchip_ddr_perf_init()
663 return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi); in rockchip_ddr_perf_init()
674 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3399_dfi_init()
677 dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon"); in rk3399_dfi_init()
678 if (IS_ERR(dfi->clk)) in rk3399_dfi_init()
679 return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk), in rk3399_dfi_init()
682 /* get ddr type */ in rk3399_dfi_init()
684 dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val); in rk3399_dfi_init()
686 dfi->channel_mask = GENMASK(1, 0); in rk3399_dfi_init()
687 dfi->max_channels = 2; in rk3399_dfi_init()
689 dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2; in rk3399_dfi_init()
690 dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2; in rk3399_dfi_init()
692 dfi->ddrmon_stride = 0x14; in rk3399_dfi_init()
693 dfi->ddrmon_ctrl_single = true; in rk3399_dfi_init()
700 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3568_dfi_init()
706 /* lower 3 bits of the DDR type */ in rk3568_dfi_init()
707 dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); in rk3568_dfi_init()
710 * For version three and higher the upper two bits of the DDR type are in rk3568_dfi_init()
714 dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; in rk3568_dfi_init()
716 dfi->channel_mask = BIT(0); in rk3568_dfi_init()
717 dfi->max_channels = 1; in rk3568_dfi_init()
719 dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; in rk3568_dfi_init()
721 dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */ in rk3568_dfi_init()
722 dfi->ddrmon_ctrl_single = true; in rk3568_dfi_init()
729 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3588_dfi_init()
736 /* lower 3 bits of the DDR type */ in rk3588_dfi_init()
737 dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); in rk3588_dfi_init()
740 * For version three and higher the upper two bits of the DDR type are in rk3588_dfi_init()
744 dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; in rk3588_dfi_init()
746 dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; in rk3588_dfi_init()
747 dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2; in rk3588_dfi_init()
748 dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2; in rk3588_dfi_init()
749 dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2; in rk3588_dfi_init()
750 dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) | in rk3588_dfi_init()
752 dfi->max_channels = 4; in rk3588_dfi_init()
754 dfi->ddrmon_stride = 0x4000; in rk3588_dfi_init()
760 { .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
761 { .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
762 { .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
770 struct device *dev = &pdev->dev; in rockchip_dfi_probe()
773 struct device_node *np = pdev->dev.of_node, *node; in rockchip_dfi_probe()
777 soc_init = of_device_get_match_data(&pdev->dev); in rockchip_dfi_probe()
779 return -EINVAL; in rockchip_dfi_probe()
783 return -ENOMEM; in rockchip_dfi_probe()
785 dfi->regs = devm_platform_ioremap_resource(pdev, 0); in rockchip_dfi_probe()
786 if (IS_ERR(dfi->regs)) in rockchip_dfi_probe()
787 return PTR_ERR(dfi->regs); in rockchip_dfi_probe()
789 node = of_parse_phandle(np, "rockchip,pmu", 0); in rockchip_dfi_probe()
791 return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n"); in rockchip_dfi_probe()
793 dfi->regmap_pmu = syscon_node_to_regmap(node); in rockchip_dfi_probe()
795 if (IS_ERR(dfi->regmap_pmu)) in rockchip_dfi_probe()
796 return PTR_ERR(dfi->regmap_pmu); in rockchip_dfi_probe()
798 dfi->dev = dev; in rockchip_dfi_probe()
799 mutex_init(&dfi->mutex); in rockchip_dfi_probe()
801 desc = &dfi->desc; in rockchip_dfi_probe()
802 desc->ops = &rockchip_dfi_ops; in rockchip_dfi_probe()
803 desc->driver_data = dfi; in rockchip_dfi_probe()
804 desc->name = np->name; in rockchip_dfi_probe()
810 dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); in rockchip_dfi_probe()
811 if (IS_ERR(dfi->edev)) { in rockchip_dfi_probe()
812 dev_err(&pdev->dev, in rockchip_dfi_probe()
813 "failed to add devfreq-event device\n"); in rockchip_dfi_probe()
814 return PTR_ERR(dfi->edev); in rockchip_dfi_probe()
829 .name = "rockchip-dfi",
837 MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");