Lines Matching +full:cpu +full:- +full:cfg

1 // SPDX-License-Identifier: GPL-2.0-or-later
76 { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
77 { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
86 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
87 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
88 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
89 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
90 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
91 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
92 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
93 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
94 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
95 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
100 cpumask_t cpu; member
112 * Polling period is set to one second, overflow of total-cycles (the fastest
130 return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu); in mmdc_pmu_cpumask_show()
164 PMU_FORMAT_ATTR(event, "config:0-63");
165 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
185 static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg) in mmdc_pmu_read_counter() argument
189 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_read_counter()
191 switch (cfg) { in mmdc_pmu_read_counter()
212 "invalid configuration %d for mmdc counter", cfg); in mmdc_pmu_read_counter()
217 static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) in mmdc_pmu_offline_cpu() argument
222 if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu)) in mmdc_pmu_offline_cpu()
225 target = cpumask_any_but(cpu_online_mask, cpu); in mmdc_pmu_offline_cpu()
229 perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target); in mmdc_pmu_offline_cpu()
230 cpumask_set_cpu(target, &pmu_mmdc->cpu); in mmdc_pmu_offline_cpu()
239 int cfg = event->attr.config; in mmdc_pmu_group_event_is_valid() local
244 if (event->pmu != pmu) in mmdc_pmu_group_event_is_valid()
247 return !test_and_set_bit(cfg, used_counters); in mmdc_pmu_group_event_is_valid()
251 * Each event has a single fixed-purpose counter, so we can only have a
258 struct pmu *pmu = event->pmu; in mmdc_pmu_group_is_valid()
259 struct perf_event *leader = event->group_leader; in mmdc_pmu_group_is_valid()
263 set_bit(leader->attr.config, &counter_mask); in mmdc_pmu_group_is_valid()
280 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_init()
281 int cfg = event->attr.config; in mmdc_pmu_event_init() local
283 if (event->attr.type != event->pmu->type) in mmdc_pmu_event_init()
284 return -ENOENT; in mmdc_pmu_event_init()
286 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in mmdc_pmu_event_init()
287 return -EOPNOTSUPP; in mmdc_pmu_event_init()
289 if (event->cpu < 0) { in mmdc_pmu_event_init()
290 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n"); in mmdc_pmu_event_init()
291 return -EOPNOTSUPP; in mmdc_pmu_event_init()
294 if (event->attr.sample_period) in mmdc_pmu_event_init()
295 return -EINVAL; in mmdc_pmu_event_init()
297 if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS) in mmdc_pmu_event_init()
298 return -EINVAL; in mmdc_pmu_event_init()
301 return -EINVAL; in mmdc_pmu_event_init()
303 event->cpu = cpumask_first(&pmu_mmdc->cpu); in mmdc_pmu_event_init()
309 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_update()
310 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_update()
314 prev_raw_count = local64_read(&hwc->prev_count); in mmdc_pmu_event_update()
316 event->attr.config); in mmdc_pmu_event_update()
317 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, in mmdc_pmu_event_update()
320 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; in mmdc_pmu_event_update()
322 local64_add(delta, &event->count); in mmdc_pmu_event_update()
327 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_start()
328 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_start()
332 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_event_start()
339 hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(), in mmdc_pmu_event_start()
342 local64_set(&hwc->prev_count, 0); in mmdc_pmu_event_start()
349 val = event->attr.config1; in mmdc_pmu_event_start()
355 if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL) in mmdc_pmu_event_start()
363 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_add()
364 struct hw_perf_event *hwc = &event->hw; in mmdc_pmu_event_add()
366 int cfg = event->attr.config; in mmdc_pmu_event_add() local
371 if (pmu_mmdc->mmdc_events[cfg] != NULL) in mmdc_pmu_event_add()
372 return -EAGAIN; in mmdc_pmu_event_add()
374 pmu_mmdc->mmdc_events[cfg] = event; in mmdc_pmu_event_add()
375 pmu_mmdc->active_events++; in mmdc_pmu_event_add()
377 local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg)); in mmdc_pmu_event_add()
384 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_stop()
387 mmdc_base = pmu_mmdc->mmdc_base; in mmdc_pmu_event_stop()
400 struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu); in mmdc_pmu_event_del()
401 int cfg = event->attr.config; in mmdc_pmu_event_del() local
403 pmu_mmdc->mmdc_events[cfg] = NULL; in mmdc_pmu_event_del()
404 pmu_mmdc->active_events--; in mmdc_pmu_event_del()
406 if (pmu_mmdc->active_events == 0) in mmdc_pmu_event_del()
407 hrtimer_cancel(&pmu_mmdc->hrtimer); in mmdc_pmu_event_del()
417 struct perf_event *event = pmu_mmdc->mmdc_events[i]; in mmdc_pmu_overflow_handler()
456 pmu_mmdc->id = ida_alloc(&mmdc_ida, GFP_KERNEL); in mmdc_pmu_init()
458 return pmu_mmdc->id; in mmdc_pmu_init()
465 ida_free(&mmdc_ida, pmu_mmdc->id); in imx_mmdc_remove()
466 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_remove()
467 perf_pmu_unregister(&pmu_mmdc->pmu); in imx_mmdc_remove()
468 iounmap(pmu_mmdc->mmdc_base); in imx_mmdc_remove()
469 clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk); in imx_mmdc_remove()
483 return -ENOMEM; in imx_mmdc_perf_init()
498 ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); in imx_mmdc_perf_init()
502 name = devm_kasprintf(&pdev->dev, in imx_mmdc_perf_init()
505 ret = -ENOMEM; in imx_mmdc_perf_init()
509 pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; in imx_mmdc_perf_init()
510 pmu_mmdc->devtype_data = device_get_match_data(&pdev->dev); in imx_mmdc_perf_init()
512 hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, in imx_mmdc_perf_init()
514 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler; in imx_mmdc_perf_init()
516 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu); in imx_mmdc_perf_init()
518 /* Register the pmu instance for cpu hotplug */ in imx_mmdc_perf_init()
519 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_perf_init()
521 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1); in imx_mmdc_perf_init()
530 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); in imx_mmdc_perf_init()
531 hrtimer_cancel(&pmu_mmdc->hrtimer); in imx_mmdc_perf_init()
533 ida_free(&mmdc_ida, pmu_mmdc->id); in imx_mmdc_perf_init()
546 struct device_node *np = pdev->dev.of_node; in imx_mmdc_probe()
553 mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL); in imx_mmdc_probe()
559 dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n"); in imx_mmdc_probe()
595 .name = "imx-mmdc",