Lines Matching +full:event +full:-

1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
7 * This implementation is based on old RISC-V perf and ARM perf event code
21 static bool riscv_perf_user_access(struct perf_event *event) in riscv_perf_user_access() argument
23 return ((event->attr.type == PERF_TYPE_HARDWARE) || in riscv_perf_user_access()
24 (event->attr.type == PERF_TYPE_HW_CACHE) || in riscv_perf_user_access()
25 (event->attr.type == PERF_TYPE_RAW)) && in riscv_perf_user_access()
26 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) && in riscv_perf_user_access()
27 (event->hw.idx != -1); in riscv_perf_user_access()
30 void arch_perf_update_userpage(struct perf_event *event, in arch_perf_update_userpage() argument
37 userpg->cap_user_time = 0; in arch_perf_update_userpage()
38 userpg->cap_user_time_zero = 0; in arch_perf_update_userpage()
39 userpg->cap_user_time_short = 0; in arch_perf_update_userpage()
40 userpg->cap_user_rdpmc = riscv_perf_user_access(event); in arch_perf_update_userpage()
43 * The counters are 64-bit but the priv spec doesn't mandate all the in arch_perf_update_userpage()
47 if (userpg->cap_user_rdpmc) in arch_perf_update_userpage()
48 userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1; in arch_perf_update_userpage()
53 userpg->time_mult = rd->mult; in arch_perf_update_userpage()
54 userpg->time_shift = rd->shift; in arch_perf_update_userpage()
55 userpg->time_zero = rd->epoch_ns; in arch_perf_update_userpage()
56 userpg->time_cycles = rd->epoch_cyc; in arch_perf_update_userpage()
57 userpg->time_mask = rd->sched_clock_mask; in arch_perf_update_userpage()
64 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); in arch_perf_update_userpage()
65 userpg->time_zero -= ns; in arch_perf_update_userpage()
69 userpg->time_offset = userpg->time_zero - now; in arch_perf_update_userpage()
74 * 32-bit value (now specifies a 64-bit value) - refer in arch_perf_update_userpage()
77 if (userpg->time_shift == 32) { in arch_perf_update_userpage()
78 userpg->time_shift = 31; in arch_perf_update_userpage()
79 userpg->time_mult >>= 1; in arch_perf_update_userpage()
86 userpg->cap_user_time = 1; in arch_perf_update_userpage()
87 userpg->cap_user_time_zero = 1; in arch_perf_update_userpage()
88 userpg->cap_user_time_short = 1; in arch_perf_update_userpage()
139 return -EINVAL; in riscv_pmu_ctr_read_csr()
145 u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event) in riscv_pmu_ctr_get_width_mask() argument
148 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_ctr_get_width_mask()
149 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_ctr_get_width_mask()
151 if (hwc->idx == -1) in riscv_pmu_ctr_get_width_mask()
153 cwidth = rvpmu->ctr_get_width(0); in riscv_pmu_ctr_get_width_mask()
155 cwidth = rvpmu->ctr_get_width(hwc->idx); in riscv_pmu_ctr_get_width_mask()
160 u64 riscv_pmu_event_update(struct perf_event *event) in riscv_pmu_event_update() argument
162 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_update()
163 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_event_update()
168 if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE)) in riscv_pmu_event_update()
171 cmask = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_event_update()
174 prev_raw_count = local64_read(&hwc->prev_count); in riscv_pmu_event_update()
175 new_raw_count = rvpmu->ctr_read(event); in riscv_pmu_event_update()
176 oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, in riscv_pmu_event_update()
180 delta = (new_raw_count - prev_raw_count) & cmask; in riscv_pmu_event_update()
181 local64_add(delta, &event->count); in riscv_pmu_event_update()
182 local64_sub(delta, &hwc->period_left); in riscv_pmu_event_update()
187 void riscv_pmu_stop(struct perf_event *event, int flags) in riscv_pmu_stop() argument
189 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_stop()
190 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_stop()
192 if (!(hwc->state & PERF_HES_STOPPED)) { in riscv_pmu_stop()
193 if (rvpmu->ctr_stop) { in riscv_pmu_stop()
194 rvpmu->ctr_stop(event, 0); in riscv_pmu_stop()
195 hwc->state |= PERF_HES_STOPPED; in riscv_pmu_stop()
197 riscv_pmu_event_update(event); in riscv_pmu_stop()
198 hwc->state |= PERF_HES_UPTODATE; in riscv_pmu_stop()
202 int riscv_pmu_event_set_period(struct perf_event *event) in riscv_pmu_event_set_period() argument
204 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_event_set_period()
205 s64 left = local64_read(&hwc->period_left); in riscv_pmu_event_set_period()
206 s64 period = hwc->sample_period; in riscv_pmu_event_set_period()
208 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_event_set_period()
210 if (unlikely(left <= -period)) { in riscv_pmu_event_set_period()
212 local64_set(&hwc->period_left, left); in riscv_pmu_event_set_period()
213 hwc->last_period = period; in riscv_pmu_event_set_period()
219 local64_set(&hwc->period_left, left); in riscv_pmu_event_set_period()
220 hwc->last_period = period; in riscv_pmu_event_set_period()
233 local64_set(&hwc->prev_count, (u64)-left); in riscv_pmu_event_set_period()
235 perf_event_update_userpage(event); in riscv_pmu_event_set_period()
240 void riscv_pmu_start(struct perf_event *event, int flags) in riscv_pmu_start() argument
242 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_start()
243 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_start()
244 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_start()
248 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in riscv_pmu_start()
250 hwc->state = 0; in riscv_pmu_start()
251 riscv_pmu_event_set_period(event); in riscv_pmu_start()
252 init_val = local64_read(&hwc->prev_count) & max_period; in riscv_pmu_start()
253 rvpmu->ctr_start(event, init_val); in riscv_pmu_start()
254 perf_event_update_userpage(event); in riscv_pmu_start()
257 static int riscv_pmu_add(struct perf_event *event, int flags) in riscv_pmu_add() argument
259 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_add()
260 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_add()
261 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_add()
264 idx = rvpmu->ctr_get_idx(event); in riscv_pmu_add()
268 hwc->idx = idx; in riscv_pmu_add()
269 cpuc->events[idx] = event; in riscv_pmu_add()
270 cpuc->n_events++; in riscv_pmu_add()
271 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in riscv_pmu_add()
273 riscv_pmu_start(event, PERF_EF_RELOAD); in riscv_pmu_add()
276 perf_event_update_userpage(event); in riscv_pmu_add()
281 static void riscv_pmu_del(struct perf_event *event, int flags) in riscv_pmu_del() argument
283 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_del()
284 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_del()
285 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_del()
287 riscv_pmu_stop(event, PERF_EF_UPDATE); in riscv_pmu_del()
288 cpuc->events[hwc->idx] = NULL; in riscv_pmu_del()
290 if (rvpmu->ctr_stop) in riscv_pmu_del()
291 rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET); in riscv_pmu_del()
292 cpuc->n_events--; in riscv_pmu_del()
293 if (rvpmu->ctr_clear_idx) in riscv_pmu_del()
294 rvpmu->ctr_clear_idx(event); in riscv_pmu_del()
295 perf_event_update_userpage(event); in riscv_pmu_del()
296 hwc->idx = -1; in riscv_pmu_del()
299 static void riscv_pmu_read(struct perf_event *event) in riscv_pmu_read() argument
301 riscv_pmu_event_update(event); in riscv_pmu_read()
304 static int riscv_pmu_event_init(struct perf_event *event) in riscv_pmu_event_init() argument
306 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_event_init()
307 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_init()
313 if (has_branch_stack(event)) in riscv_pmu_event_init()
314 return -EOPNOTSUPP; in riscv_pmu_event_init()
316 hwc->flags = 0; in riscv_pmu_event_init()
317 mapped_event = rvpmu->event_map(event, &event_config); in riscv_pmu_event_init()
319 pr_debug("event %x:%llx not supported\n", event->attr.type, in riscv_pmu_event_init()
320 event->attr.config); in riscv_pmu_event_init()
325 * idx is set to -1 because the index of a general event should not be in riscv_pmu_event_init()
326 * decided until binding to some counter in pmu->add(). in riscv_pmu_event_init()
330 hwc->config = event_config; in riscv_pmu_event_init()
331 hwc->idx = -1; in riscv_pmu_event_init()
332 hwc->event_base = mapped_event; in riscv_pmu_event_init()
334 if (rvpmu->event_init) in riscv_pmu_event_init()
335 rvpmu->event_init(event); in riscv_pmu_event_init()
337 if (!is_sampling_event(event)) { in riscv_pmu_event_init()
339 * For non-sampling runs, limit the sample_period to half in riscv_pmu_event_init()
344 cmask = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_event_init()
345 hwc->sample_period = cmask >> 1; in riscv_pmu_event_init()
346 hwc->last_period = hwc->sample_period; in riscv_pmu_event_init()
347 local64_set(&hwc->period_left, hwc->sample_period); in riscv_pmu_event_init()
353 static int riscv_pmu_event_idx(struct perf_event *event) in riscv_pmu_event_idx() argument
355 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_idx()
357 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in riscv_pmu_event_idx()
360 if (rvpmu->csr_index) in riscv_pmu_event_idx()
361 return rvpmu->csr_index(event) + 1; in riscv_pmu_event_idx()
366 static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) in riscv_pmu_event_mapped() argument
368 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_mapped()
370 if (rvpmu->event_mapped) { in riscv_pmu_event_mapped()
371 rvpmu->event_mapped(event, mm); in riscv_pmu_event_mapped()
372 perf_event_update_userpage(event); in riscv_pmu_event_mapped()
376 static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) in riscv_pmu_event_unmapped() argument
378 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_unmapped()
380 if (rvpmu->event_unmapped) { in riscv_pmu_event_unmapped()
381 rvpmu->event_unmapped(event, mm); in riscv_pmu_event_unmapped()
382 perf_event_update_userpage(event); in riscv_pmu_event_unmapped()
396 pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL); in riscv_pmu_alloc()
397 if (!pmu->hw_events) { in riscv_pmu_alloc()
398 pr_info("failed to allocate per-cpu PMU data.\n"); in riscv_pmu_alloc()
403 cpuc = per_cpu_ptr(pmu->hw_events, cpuid); in riscv_pmu_alloc()
404 cpuc->n_events = 0; in riscv_pmu_alloc()
406 cpuc->events[i] = NULL; in riscv_pmu_alloc()
407 cpuc->snapshot_addr = NULL; in riscv_pmu_alloc()
409 pmu->pmu = (struct pmu) { in riscv_pmu_alloc()