Lines Matching +full:dont +full:- +full:validate

5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
119 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_update()
120 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
124 if (unlikely(!hwc->event_base)) in x86_perf_event_update()
131 * exchange a new raw count - then add that new-prev delta in x86_perf_event_update()
134 prev_raw_count = local64_read(&hwc->prev_count); in x86_perf_event_update()
136 rdpmcl(hwc->event_base_rdpmc, new_raw_count); in x86_perf_event_update()
137 } while (!local64_try_cmpxchg(&hwc->prev_count, in x86_perf_event_update()
143 * (event-)time and add that to the generic event. in x86_perf_event_update()
145 * Careful, not all hw sign-extends above the physical width in x86_perf_event_update()
148 delta = (new_raw_count << shift) - (prev_raw_count << shift); in x86_perf_event_update()
151 local64_add(delta, &event->count); in x86_perf_event_update()
152 local64_sub(delta, &hwc->period_left); in x86_perf_event_update()
158 * Find and validate any extra registers to set up.
162 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); in x86_pmu_extra_regs()
166 reg = &event->hw.extra_reg; in x86_pmu_extra_regs()
171 for (er = extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
172 if (er->event != (config & er->config_mask)) in x86_pmu_extra_regs()
174 if (event->attr.config1 & ~er->valid_mask) in x86_pmu_extra_regs()
175 return -EINVAL; in x86_pmu_extra_regs()
177 if (!er->extra_msr_access) in x86_pmu_extra_regs()
178 return -ENXIO; in x86_pmu_extra_regs()
180 reg->idx = er->idx; in x86_pmu_extra_regs()
181 reg->config = event->attr.config1; in x86_pmu_extra_regs()
182 reg->reg = er->msr; in x86_pmu_extra_regs()
260 u64 val, val_fail = -1, val_new= ~0; in check_hw_exists()
261 int i, reg, reg_fail = -1, ret = 0; in check_hw_exists()
263 int reg_safe = -1; in check_hw_exists()
305 if (reg_safe == -1) { in check_hw_exists()
329 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", in check_hw_exists()
369 struct perf_event_attr *attr = &event->attr; in set_ext_hw_attr()
373 config = attr->config; in set_ext_hw_attr()
377 return -EINVAL; in set_ext_hw_attr()
382 return -EINVAL; in set_ext_hw_attr()
387 return -EINVAL; in set_ext_hw_attr()
390 val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result]; in set_ext_hw_attr()
392 return -ENOENT; in set_ext_hw_attr()
394 if (val == -1) in set_ext_hw_attr()
395 return -EINVAL; in set_ext_hw_attr()
397 hwc->config |= val; in set_ext_hw_attr()
398 attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result]; in set_ext_hw_attr()
410 err = -EBUSY; in x86_reserve_hardware()
465 return -EBUSY; in x86_add_exclusive()
483 struct perf_event_attr *attr = &event->attr; in x86_setup_perfctr()
484 struct hw_perf_event *hwc = &event->hw; in x86_setup_perfctr()
488 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
489 hwc->last_period = hwc->sample_period; in x86_setup_perfctr()
490 local64_set(&hwc->period_left, hwc->sample_period); in x86_setup_perfctr()
493 if (attr->type == event->pmu->type) in x86_setup_perfctr()
494 return x86_pmu_extra_regs(event->attr.config, event); in x86_setup_perfctr()
496 if (attr->type == PERF_TYPE_HW_CACHE) in x86_setup_perfctr()
499 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
500 return -EINVAL; in x86_setup_perfctr()
502 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); in x86_setup_perfctr()
507 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
510 return -ENOENT; in x86_setup_perfctr()
512 if (config == -1LL) in x86_setup_perfctr()
513 return -EINVAL; in x86_setup_perfctr()
515 hwc->config |= config; in x86_setup_perfctr()
528 u64 m = event->attr.branch_sample_type; in precise_br_compat()
537 if (!event->attr.exclude_user) in precise_br_compat()
540 if (!event->attr.exclude_kernel) in precise_br_compat()
570 if (event->attr.precise_ip) { in x86_pmu_hw_config()
573 if (event->attr.precise_ip > precise) in x86_pmu_hw_config()
574 return -EOPNOTSUPP; in x86_pmu_hw_config()
578 return -EINVAL; in x86_pmu_hw_config()
582 * whatever the user is asking with attr->branch_sample_type in x86_pmu_hw_config()
584 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
585 u64 *br_type = &event->attr.branch_sample_type; in x86_pmu_hw_config()
589 return -EOPNOTSUPP; in x86_pmu_hw_config()
603 if (!event->attr.exclude_user) in x86_pmu_hw_config()
606 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
612 event->attach_state |= PERF_ATTACH_TASK_DATA; in x86_pmu_hw_config()
618 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; in x86_pmu_hw_config()
623 if (!event->attr.exclude_user) in x86_pmu_hw_config()
624 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; in x86_pmu_hw_config()
625 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
626 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; in x86_pmu_hw_config()
628 if (event->attr.type == event->pmu->type) in x86_pmu_hw_config()
629 event->hw.config |= x86_pmu_get_event_config(event); in x86_pmu_hw_config()
631 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
632 s64 left = event->attr.sample_period; in x86_pmu_hw_config()
634 if (left > event->attr.sample_period) in x86_pmu_hw_config()
635 return -EINVAL; in x86_pmu_hw_config()
639 if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) in x86_pmu_hw_config()
640 return -EINVAL; in x86_pmu_hw_config()
645 if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { in x86_pmu_hw_config()
646 if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) in x86_pmu_hw_config()
647 return -EINVAL; in x86_pmu_hw_config()
649 if (!event->attr.precise_ip) in x86_pmu_hw_config()
650 return -EINVAL; in x86_pmu_hw_config()
664 return -ENODEV; in __x86_pmu_event_init()
671 event->destroy = hw_perf_event_destroy; in __x86_pmu_event_init()
673 event->hw.idx = -1; in __x86_pmu_event_init()
674 event->hw.last_cpu = -1; in __x86_pmu_event_init()
675 event->hw.last_tag = ~0ULL; in __x86_pmu_event_init()
678 event->hw.extra_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
679 event->hw.branch_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
690 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_disable_all()
693 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
716 * It will not be re-enabled in the NMI handler again, because enabled=0. After
731 if (!cpuc->enabled) in x86_pmu_disable()
734 cpuc->n_added = 0; in x86_pmu_disable()
735 cpuc->enabled = 0; in x86_pmu_disable()
747 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
749 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
761 return event->pmu == &pmu; in is_x86_event()
764 if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) in is_x86_event()
779 if (WARN_ON_ONCE(!cpuc->pmu)) in x86_get_pmu()
782 return cpuc->pmu; in x86_get_pmu()
822 sched->max_events = num; in perf_sched_init()
823 sched->max_weight = wmax; in perf_sched_init()
824 sched->max_gp = gpmax; in perf_sched_init()
825 sched->constraints = constraints; in perf_sched_init()
828 if (constraints[idx]->weight == wmin) in perf_sched_init()
832 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
833 sched->state.weight = wmin; in perf_sched_init()
834 sched->state.unassigned = num; in perf_sched_init()
839 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) in perf_sched_save_state()
842 sched->saved[sched->saved_states] = sched->state; in perf_sched_save_state()
843 sched->saved_states++; in perf_sched_save_state()
848 if (!sched->saved_states) in perf_sched_restore_state()
851 sched->saved_states--; in perf_sched_restore_state()
852 sched->state = sched->saved[sched->saved_states]; in perf_sched_restore_state()
856 sched->state.used &= ~BIT_ULL(sched->state.counter); in perf_sched_restore_state()
859 sched->state.counter++; in perf_sched_restore_state()
873 if (!sched->state.unassigned) in __perf_sched_find_counter()
876 if (sched->state.event >= sched->max_events) in __perf_sched_find_counter()
879 c = sched->constraints[sched->state.event]; in __perf_sched_find_counter()
881 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { in __perf_sched_find_counter()
883 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { in __perf_sched_find_counter()
886 if (sched->state.used & mask) in __perf_sched_find_counter()
889 sched->state.used |= mask; in __perf_sched_find_counter()
895 idx = sched->state.counter; in __perf_sched_find_counter()
896 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { in __perf_sched_find_counter()
899 if (c->flags & PERF_X86_EVENT_PAIR) in __perf_sched_find_counter()
902 if (sched->state.used & mask) in __perf_sched_find_counter()
905 if (sched->state.nr_gp++ >= sched->max_gp) in __perf_sched_find_counter()
908 sched->state.used |= mask; in __perf_sched_find_counter()
915 sched->state.counter = idx; in __perf_sched_find_counter()
917 if (c->overlap) in __perf_sched_find_counter()
941 if (!sched->state.unassigned || !--sched->state.unassigned) in perf_sched_next_event()
946 sched->state.event++; in perf_sched_next_event()
947 if (sched->state.event >= sched->max_events) { in perf_sched_next_event()
949 sched->state.event = 0; in perf_sched_next_event()
950 sched->state.weight++; in perf_sched_next_event()
951 if (sched->state.weight > sched->max_weight) in perf_sched_next_event()
954 c = sched->constraints[sched->state.event]; in perf_sched_next_event()
955 } while (c->weight != sched->state.weight); in perf_sched_next_event()
957 sched->state.counter = 0; /* start with first counter */ in perf_sched_next_event()
994 * cpuc->n_events hasn't been updated yet, while for the latter in x86_schedule_events()
995 * cpuc->n_txn contains the number of events added in the current in x86_schedule_events()
998 n0 = cpuc->n_events; in x86_schedule_events()
999 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_schedule_events()
1000 n0 -= cpuc->n_txn; in x86_schedule_events()
1005 c = cpuc->event_constraint[i]; in x86_schedule_events()
1015 * have a dynamic constraint -- for those the constraint can in x86_schedule_events()
1018 if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { in x86_schedule_events()
1019 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
1020 cpuc->event_constraint[i] = c; in x86_schedule_events()
1023 wmin = min(wmin, c->weight); in x86_schedule_events()
1024 wmax = max(wmax, c->weight); in x86_schedule_events()
1033 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
1034 c = cpuc->event_constraint[i]; in x86_schedule_events()
1037 if (hwc->idx == -1) in x86_schedule_events()
1041 if (!test_bit(hwc->idx, c->idxmsk)) in x86_schedule_events()
1044 mask = BIT_ULL(hwc->idx); in x86_schedule_events()
1055 assign[i] = hwc->idx; in x86_schedule_events()
1060 int gpmax = x86_pmu_max_num_counters(cpuc->pmu); in x86_schedule_events()
1072 if (is_ht_workaround_enabled() && !cpuc->is_fake && in x86_schedule_events()
1073 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
1081 gpmax -= cpuc->n_pair; in x86_schedule_events()
1085 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, in x86_schedule_events()
1097 * validate an event group (assign == NULL) in x86_schedule_events()
1104 e = cpuc->event_list[i]; in x86_schedule_events()
1111 cpuc->event_constraint[i] = NULL; in x86_schedule_events()
1117 return unsched ? -EINVAL : 0; in x86_schedule_events()
1124 if (cpuc->n_metric == INTEL_TD_METRIC_NUM) in add_nr_metric_event()
1125 return -EINVAL; in add_nr_metric_event()
1126 cpuc->n_metric++; in add_nr_metric_event()
1127 cpuc->n_txn_metric++; in add_nr_metric_event()
1137 cpuc->n_metric--; in del_nr_metric_event()
1143 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in collect_event()
1146 return -EINVAL; in collect_event()
1148 if (n >= max_count + cpuc->n_metric) in collect_event()
1149 return -EINVAL; in collect_event()
1151 cpuc->event_list[n] = event; in collect_event()
1152 if (is_counter_pair(&event->hw)) { in collect_event()
1153 cpuc->n_pair++; in collect_event()
1154 cpuc->n_txn_pair++; in collect_event()
1169 max_count = x86_pmu_num_counters(cpuc->pmu) + x86_pmu_num_counters_fixed(cpuc->pmu); in collect_events()
1172 n = cpuc->n_events; in collect_events()
1173 if (!cpuc->n_events) in collect_events()
1174 cpuc->pebs_output = 0; in collect_events()
1176 if (!cpuc->is_fake && leader->attr.precise_ip) { in collect_events()
1178 * For PEBS->PT, if !aux_event, the group leader (PT) went in collect_events()
1182 if (is_pebs_pt(leader) && !leader->aux_event) in collect_events()
1183 return -EINVAL; in collect_events()
1188 if (cpuc->pebs_output && in collect_events()
1189 cpuc->pebs_output != is_pebs_pt(leader) + 1) in collect_events()
1190 return -EINVAL; in collect_events()
1192 cpuc->pebs_output = is_pebs_pt(leader) + 1; in collect_events()
1197 return -EINVAL; in collect_events()
1205 if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) in collect_events()
1209 return -EINVAL; in collect_events()
1219 struct hw_perf_event *hwc = &event->hw; in x86_assign_hw_event()
1222 idx = hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
1223 hwc->last_cpu = smp_processor_id(); in x86_assign_hw_event()
1224 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
1228 switch (hwc->idx) { in x86_assign_hw_event()
1231 hwc->config_base = 0; in x86_assign_hw_event()
1232 hwc->event_base = 0; in x86_assign_hw_event()
1239 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: in x86_assign_hw_event()
1240 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; in x86_assign_hw_event()
1241 hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED); in x86_assign_hw_event()
1242 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | in x86_assign_hw_event()
1247 hwc->config_base = x86_pmu_config_addr(hwc->idx); in x86_assign_hw_event()
1248 hwc->event_base = x86_pmu_event_addr(hwc->idx); in x86_assign_hw_event()
1249 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); in x86_assign_hw_event()
1255 * x86_perf_rdpmc_index - Return PMC counter used for event
1272 return event->hw.event_base_rdpmc; in x86_perf_rdpmc_index()
1279 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
1280 hwc->last_cpu == smp_processor_id() && in match_prev_assignment()
1281 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
1291 int i, added = cpuc->n_added; in x86_pmu_enable()
1296 if (cpuc->enabled) in x86_pmu_enable()
1299 if (cpuc->n_added) { in x86_pmu_enable()
1300 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
1308 event = cpuc->event_list[i]; in x86_pmu_enable()
1309 hwc = &event->hw; in x86_pmu_enable()
1313 * - assigned same counter as last time in x86_pmu_enable()
1314 * - running on same CPU as last time in x86_pmu_enable()
1315 * - no other event has used the counter since in x86_pmu_enable()
1317 if (hwc->idx == -1 || in x86_pmu_enable()
1325 if (hwc->state & PERF_HES_STOPPED) in x86_pmu_enable()
1326 hwc->state |= PERF_HES_ARCH; in x86_pmu_enable()
1334 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
1335 event = cpuc->event_list[i]; in x86_pmu_enable()
1336 hwc = &event->hw; in x86_pmu_enable()
1343 if (hwc->state & PERF_HES_ARCH) in x86_pmu_enable()
1347 * if cpuc->enabled = 0, then no wrmsr as in x86_pmu_enable()
1352 cpuc->n_added = 0; in x86_pmu_enable()
1356 cpuc->enabled = 1; in x86_pmu_enable()
1365 * Set the next IRQ period, based on the hwc->period_left value.
1370 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_set_period()
1371 s64 left = local64_read(&hwc->period_left); in x86_perf_event_set_period()
1372 s64 period = hwc->sample_period; in x86_perf_event_set_period()
1373 int ret = 0, idx = hwc->idx; in x86_perf_event_set_period()
1375 if (unlikely(!hwc->event_base)) in x86_perf_event_set_period()
1381 if (unlikely(left <= -period)) { in x86_perf_event_set_period()
1383 local64_set(&hwc->period_left, left); in x86_perf_event_set_period()
1384 hwc->last_period = period; in x86_perf_event_set_period()
1390 local64_set(&hwc->period_left, left); in x86_perf_event_set_period()
1391 hwc->last_period = period; in x86_perf_event_set_period()
1395 * Quirk: certain CPUs dont like it if just 1 hw_event is left: in x86_perf_event_set_period()
1411 local64_set(&hwc->prev_count, (u64)-left); in x86_perf_event_set_period()
1413 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1417 * we currently declare a 48-bit counter width in x86_perf_event_set_period()
1430 __x86_pmu_enable_event(&event->hw, in x86_pmu_enable_event()
1447 hwc = &event->hw; in x86_pmu_add()
1449 n0 = cpuc->n_events; in x86_pmu_add()
1454 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in x86_pmu_add()
1456 hwc->state |= PERF_HES_ARCH; in x86_pmu_add()
1461 * at commit time (->commit_txn) as a whole. in x86_pmu_add()
1463 * If commit fails, we'll call ->del() on all events in x86_pmu_add()
1464 * for which ->add() was called. in x86_pmu_add()
1466 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_add()
1476 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1483 cpuc->n_events = n; in x86_pmu_add()
1484 cpuc->n_added += n - n0; in x86_pmu_add()
1485 cpuc->n_txn += n - n0; in x86_pmu_add()
1501 int idx = event->hw.idx; in x86_pmu_start()
1503 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in x86_pmu_start()
1506 if (WARN_ON_ONCE(idx == -1)) in x86_pmu_start()
1510 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in x86_pmu_start()
1514 event->hw.state = 0; in x86_pmu_start()
1516 cpuc->events[idx] = event; in x86_pmu_start()
1517 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1535 cntr_mask = hybrid(cpuc->pmu, cntr_mask); in perf_event_print_debug()
1536 fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); in perf_event_print_debug()
1537 pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); in perf_event_print_debug()
1562 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1570 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", in perf_event_print_debug()
1572 pr_info("CPU#%d: gen-PMC%d count: %016llx\n", in perf_event_print_debug()
1574 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", in perf_event_print_debug()
1578 if (fixed_counter_disabled(idx, cpuc->pmu)) in perf_event_print_debug()
1582 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", in perf_event_print_debug()
1590 struct hw_perf_event *hwc = &event->hw; in x86_pmu_stop()
1592 if (test_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1594 __clear_bit(hwc->idx, cpuc->active_mask); in x86_pmu_stop()
1595 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1596 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in x86_pmu_stop()
1597 hwc->state |= PERF_HES_STOPPED; in x86_pmu_stop()
1600 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { in x86_pmu_stop()
1606 hwc->state |= PERF_HES_UPTODATE; in x86_pmu_stop()
1613 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in x86_pmu_del()
1618 * The events never got scheduled and ->cancel_txn will truncate in x86_pmu_del()
1621 * XXX assumes any ->del() called during a TXN will only be on in x86_pmu_del()
1624 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_del()
1627 __set_bit(event->hw.idx, cpuc->dirty); in x86_pmu_del()
1634 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1635 if (event == cpuc->event_list[i]) in x86_pmu_del()
1639 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ in x86_pmu_del()
1643 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del()
1644 --cpuc->n_added; in x86_pmu_del()
1649 while (++i < cpuc->n_events) { in x86_pmu_del()
1650 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1651 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del()
1652 cpuc->assign[i-1] = cpuc->assign[i]; in x86_pmu_del()
1654 cpuc->event_constraint[i-1] = NULL; in x86_pmu_del()
1655 --cpuc->n_events; in x86_pmu_del()
1691 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_handle_irq()
1694 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1697 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1708 perf_sample_data_init(&data, 0, event->hw.last_period); in x86_pmu_handle_irq()
1711 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); in x86_pmu_handle_irq()
1752 perf_sample_event_took(finish_clock - start_clock); in perf_event_nmi_handler()
1767 cpuc->kfree_on_online[i] = NULL; in x86_pmu_prepare_cpu()
1786 kfree(cpuc->kfree_on_online[i]); in x86_pmu_online_cpu()
1787 cpuc->kfree_on_online[i] = NULL; in x86_pmu_online_cpu()
1812 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); in pmu_check_apic()
1818 * events (user-space has to fall back and in pmu_check_apic()
1836 if (pmu_attr->id < x86_pmu.max_events) in events_sysfs_show()
1837 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1840 if (pmu_attr->event_str) in events_sysfs_show()
1841 return sprintf(page, "%s\n", pmu_attr->event_str); in events_sysfs_show()
1854 * Report conditional events depending on Hyper-Threading. in events_ht_sysfs_show()
1862 * has to re-read when a thread sibling gets onlined later. in events_ht_sysfs_show()
1866 pmu_attr->event_str_ht : in events_ht_sysfs_show()
1867 pmu_attr->event_str_noht); in events_ht_sysfs_show()
1880 if (hweight64(pmu_attr->pmu_type) == 1) in events_hybrid_sysfs_show()
1881 return sprintf(page, "%s", pmu_attr->event_str); in events_hybrid_sysfs_show()
1885 * event encoding, e.g., the mem-loads event on an Atom PMU has in events_hybrid_sysfs_show()
1894 str = pmu_attr->event_str; in events_hybrid_sysfs_show()
1896 if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type)) in events_hybrid_sysfs_show()
1898 if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) { in events_hybrid_sysfs_show()
1901 return snprintf(page, next_str - str + 1, "%s", str); in events_hybrid_sysfs_show()
1913 EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1915 EVENT_ATTR(cache-references, CACHE_REFERENCES );
1916 EVENT_ATTR(cache-misses, CACHE_MISSES );
1917 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1918 EVENT_ATTR(branch-misses, BRANCH_MISSES );
1919 EVENT_ATTR(bus-cycles, BUS_CYCLES );
1920 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1921 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1922 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1954 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; in is_visible()
2053 pr_info("... fixed-purpose events: %d\n", x86_pmu_num_counters_fixed(pmu)); in x86_pmu_show_pmu_cap()
2080 err = -ENOTSUPP; in init_hw_perf_events()
2098 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
2099 quirk->func(); in init_hw_perf_events()
2169 hybrid_pmu->pmu = pmu; in init_hw_perf_events()
2170 hybrid_pmu->pmu.type = -1; in init_hw_perf_events()
2171 hybrid_pmu->pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
2172 hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; in init_hw_perf_events()
2174 err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, in init_hw_perf_events()
2175 (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1); in init_hw_perf_events()
2216 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
2223 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ in x86_pmu_start_txn()
2225 cpuc->txn_flags = txn_flags; in x86_pmu_start_txn()
2245 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_cancel_txn()
2247 txn_flags = cpuc->txn_flags; in x86_pmu_cancel_txn()
2248 cpuc->txn_flags = 0; in x86_pmu_cancel_txn()
2276 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_commit_txn()
2278 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { in x86_pmu_commit_txn()
2279 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2283 n = cpuc->n_events; in x86_pmu_commit_txn()
2286 return -EAGAIN; in x86_pmu_commit_txn()
2296 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
2298 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2303 * a fake_cpuc is used to validate event groups. Due to
2323 return ERR_PTR(-ENOMEM); in allocate_fake_cpuc()
2324 cpuc->is_fake = 1; in allocate_fake_cpuc()
2330 if (cpumask_empty(&h_pmu->supported_cpus)) in allocate_fake_cpuc()
2332 cpu = cpumask_first(&h_pmu->supported_cpus); in allocate_fake_cpuc()
2335 cpuc->pmu = event_pmu; in allocate_fake_cpuc()
2343 return ERR_PTR(-ENOMEM); in allocate_fake_cpuc()
2347 * validate that we can schedule this event
2355 fake_cpuc = allocate_fake_cpuc(event->pmu); in validate_event()
2361 if (!c || !c->weight) in validate_event()
2362 ret = -EINVAL; in validate_event()
2373 * validate a single event group
2376 * - check events are compatible which each other
2377 * - events do not compete for the same counter
2378 * - number of events <= number of counters
2385 struct perf_event *leader = event->group_leader; in validate_group()
2387 int ret = -EINVAL, n; in validate_group()
2397 pmu = leader->pmu; in validate_group()
2403 pmu = sibling->pmu; in validate_group()
2404 else if (pmu != sibling->pmu) in validate_group()
2409 fake_cpuc = allocate_fake_cpuc(event->pmu); in validate_group()
2422 fake_cpuc->n_events = n; in validate_group()
2427 fake_cpuc->n_events = 0; in validate_group()
2440 if ((event->attr.type != event->pmu->type) && in x86_pmu_event_init()
2441 (event->attr.type != PERF_TYPE_HARDWARE) && in x86_pmu_event_init()
2442 (event->attr.type != PERF_TYPE_HW_CACHE)) in x86_pmu_event_init()
2443 return -ENOENT; in x86_pmu_event_init()
2445 if (is_hybrid() && (event->cpu != -1)) { in x86_pmu_event_init()
2446 pmu = hybrid_pmu(event->pmu); in x86_pmu_event_init()
2447 if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus)) in x86_pmu_event_init()
2448 return -ENOENT; in x86_pmu_event_init()
2453 if (event->group_leader != event) in x86_pmu_event_init()
2459 if (event->destroy) in x86_pmu_event_init()
2460 event->destroy(event); in x86_pmu_event_init()
2461 event->destroy = NULL; in x86_pmu_event_init()
2465 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) in x86_pmu_event_init()
2466 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; in x86_pmu_event_init()
2477 for (i = 0; i < cpuc->n_events; i++) in perf_clear_dirty_counters()
2478 __clear_bit(cpuc->assign[i], cpuc->dirty); in perf_clear_dirty_counters()
2480 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX)) in perf_clear_dirty_counters()
2483 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { in perf_clear_dirty_counters()
2486 if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask))) in perf_clear_dirty_counters()
2489 wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0); in perf_clear_dirty_counters()
2495 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); in perf_clear_dirty_counters()
2500 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_mapped()
2515 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) in x86_pmu_event_mapped()
2521 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_unmapped()
2524 if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) in x86_pmu_event_unmapped()
2530 struct hw_perf_event *hwc = &event->hw; in x86_pmu_event_idx()
2532 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_idx()
2535 if (is_metric_idx(hwc->idx)) in x86_pmu_event_idx()
2538 return hwc->event_base_rdpmc + 1; in x86_pmu_event_idx()
2561 return -EINVAL; in set_attr_rdpmc()
2564 return -ENOTSUPP; in set_attr_rdpmc()
2571 * aka perf-event-bypassing mode. This path is extremely slow, in set_attr_rdpmc()
2649 return -EINVAL; in x86_pmu_check_period()
2655 return -EINVAL; in x86_pmu_check_period()
2718 userpg->cap_user_time = 0; in arch_perf_update_userpage()
2719 userpg->cap_user_time_zero = 0; in arch_perf_update_userpage()
2720 userpg->cap_user_rdpmc = in arch_perf_update_userpage()
2721 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT); in arch_perf_update_userpage()
2722 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2735 userpg->cap_user_time = 1; in arch_perf_update_userpage()
2736 userpg->time_mult = data.cyc2ns_mul; in arch_perf_update_userpage()
2737 userpg->time_shift = data.cyc2ns_shift; in arch_perf_update_userpage()
2738 userpg->time_offset = offset - now; in arch_perf_update_userpage()
2744 if (!event->attr.use_clockid) { in arch_perf_update_userpage()
2745 userpg->cap_user_time_zero = 1; in arch_perf_update_userpage()
2746 userpg->time_zero = offset; in arch_perf_update_userpage()
2758 return regs->flags & X86_EFLAGS_FIXED; in perf_hw_regs()
2772 if (perf_callchain_store(entry, regs->ip)) in perf_callchain_kernel()
2778 unwind_start(&state, current, NULL, (void *)regs->sp); in perf_callchain_kernel()
2803 ldt = READ_ONCE(current->active_mm->context.ldt); in get_segment_base()
2804 if (!ldt || idx >= ldt->nr_entries) in get_segment_base()
2807 desc = &ldt->entries[idx]; in get_segment_base()
2823 * Heuristic-based check if uprobe is installed at the function entry.
2828 * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
2836 if (!current->utask) in is_uprobe_at_func_entry()
2839 auprobe = current->utask->auprobe; in is_uprobe_at_func_entry()
2844 if (auprobe->insn[0] == 0x55) in is_uprobe_at_func_entry()
2847 /* endbr64 (64-bit only) */ in is_uprobe_at_func_entry()
2848 if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn)) in is_uprobe_at_func_entry()
2868 /* 32-bit process in 64-bit kernel. */ in perf_callchain_user32()
2877 cs_base = get_segment_base(regs->cs); in perf_callchain_user32()
2878 ss_base = get_segment_base(regs->ss); in perf_callchain_user32()
2880 fp = compat_ptr(ss_base + regs->bp); in perf_callchain_user32()
2885 !get_user(ret_addr, (const u32 __user *)regs->sp)) in perf_callchain_user32()
2888 while (entry->nr < entry->max_stack) { in perf_callchain_user32()
2892 if (__get_user(frame.next_frame, &fp->next_frame)) in perf_callchain_user32()
2894 if (__get_user(frame.return_address, &fp->return_address)) in perf_callchain_user32()
2926 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) in perf_callchain_user()
2929 fp = (void __user *)regs->bp; in perf_callchain_user()
2931 perf_callchain_store(entry, regs->ip); in perf_callchain_user()
2945 * we should read return address from *regs->sp before proceeding in perf_callchain_user()
2950 !get_user(ret_addr, (const unsigned long __user *)regs->sp)) in perf_callchain_user()
2953 while (entry->nr < entry->max_stack) { in perf_callchain_user()
2957 if (__get_user(frame.next_frame, &fp->next_frame)) in perf_callchain_user()
2959 if (__get_user(frame.return_address, &fp->return_address)) in perf_callchain_user()
2971 * VM86 - the good olde 16 bit days, where the linear address is
2972 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2974 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2977 * X32 - has TIF_X32 set, but is running in x86_64
2979 * X86_64 - CS,DS,SS,ES are all zero based.
2993 if (regs->flags & X86_VM_MASK) in code_segment_base()
2994 return 0x10 * regs->cs; in code_segment_base()
2996 if (user_mode(regs) && regs->cs != __USER_CS) in code_segment_base()
2997 return get_segment_base(regs->cs); in code_segment_base()
3000 regs->cs != __USER32_CS) in code_segment_base()
3001 return get_segment_base(regs->cs); in code_segment_base()
3011 return regs->ip + code_segment_base(regs); in perf_instruction_pointer()
3031 if (regs->flags & PERF_EFLAGS_EXACT) in perf_misc_flags()
3048 * all E-cores are disabled via BIOS. When E-cores are disabled, the in perf_get_x86_pmu_capability()
3049 * base PMU holds the correct number of counters for P-cores. in perf_get_x86_pmu_capability()
3051 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
3052 cap->num_counters_gp = x86_pmu_num_counters(NULL); in perf_get_x86_pmu_capability()
3053 cap->num_counters_fixed = x86_pmu_num_counters_fixed(NULL); in perf_get_x86_pmu_capability()
3054 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
3055 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
3056 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
3057 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()
3058 cap->pebs_ept = x86_pmu.pebs_ept; in perf_get_x86_pmu_capability()