Lines Matching +full:guest +full:- +full:index +full:- +full:bits

1 // SPDX-License-Identifier: GPL-2.0-only
24 * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
26 * further confuse things, non-architectural PMUs use bit 31 as a flag for
35 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
40 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters()
43 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
44 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
53 __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); in reprogram_fixed_counters()
69 * non-architecturals PMUs (PMUs with version '0'). For architectural in intel_rdpmc_ecx_to_pmc()
70 * PMUs, bits 31:16 specify the PMC type and bits 15:0 specify the PMC in intel_rdpmc_ecx_to_pmc()
71 * index. For non-architectural PMUs, bit 31 is a "fast" flag, and in intel_rdpmc_ecx_to_pmc()
72 * bits 30:0 specify the PMC index. in intel_rdpmc_ecx_to_pmc()
74 * Yell and reject attempts to read PMCs for a non-architectural PMU, in intel_rdpmc_ecx_to_pmc()
77 if (WARN_ON_ONCE(!pmu->version)) in intel_rdpmc_ecx_to_pmc()
85 * accessing a non-existent counter. Reject attempts to read all other in intel_rdpmc_ecx_to_pmc()
90 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
91 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
92 bitmask = pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_rdpmc_ecx_to_pmc()
95 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
96 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
97 bitmask = pmu->counter_bitmask[KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
116 return vcpu->arch.perf_capabilities; in vcpu_get_perf_capabilities()
132 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) in intel_pmu_is_valid_lbr_msr() argument
140 ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || in intel_pmu_is_valid_lbr_msr()
141 (index >= records->from && index < records->from + records->nr) || in intel_pmu_is_valid_lbr_msr()
142 (index >= records->to && index < records->to + records->nr); in intel_pmu_is_valid_lbr_msr()
144 if (!ret && records->info) in intel_pmu_is_valid_lbr_msr()
145 ret = (index >= records->info && index < records->info + records->nr); in intel_pmu_is_valid_lbr_msr()
197 if (lbr_desc->event) { in intel_pmu_release_guest_lbr_event()
198 perf_event_release_kernel(lbr_desc->event); in intel_pmu_release_guest_lbr_event()
199 lbr_desc->event = NULL; in intel_pmu_release_guest_lbr_event()
200 vcpu_to_pmu(vcpu)->event_count--; in intel_pmu_release_guest_lbr_event()
212 * - set 'pinned = true' to make it task pinned so that if another in intel_pmu_create_guest_lbr_event()
213 * cpu pinned event reclaims LBR, the event->oncpu will be set to -1; in intel_pmu_create_guest_lbr_event()
214 * - set '.exclude_host = true' to record guest branches behavior; in intel_pmu_create_guest_lbr_event()
216 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf in intel_pmu_create_guest_lbr_event()
220 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and in intel_pmu_create_guest_lbr_event()
223 * event, which helps KVM to save/restore guest LBR records in intel_pmu_create_guest_lbr_event()
238 if (unlikely(lbr_desc->event)) { in intel_pmu_create_guest_lbr_event()
239 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
243 event = perf_event_create_kernel_counter(&attr, -1, in intel_pmu_create_guest_lbr_event()
250 lbr_desc->event = event; in intel_pmu_create_guest_lbr_event()
251 pmu->event_count++; in intel_pmu_create_guest_lbr_event()
252 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
257 * It's safe to access LBR msrs from guest when they have not
259 * the LBR msrs records when the guest LBR event is scheduled in.
265 u32 index = msr_info->index; in intel_pmu_handle_lbr_msrs_access() local
267 if (!intel_pmu_is_valid_lbr_msr(vcpu, index)) in intel_pmu_handle_lbr_msrs_access()
270 if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0) in intel_pmu_handle_lbr_msrs_access()
276 * host LBR value to be leaked to the guest. If LBR has been reclaimed, in intel_pmu_handle_lbr_msrs_access()
277 * return 0 on guest reads. in intel_pmu_handle_lbr_msrs_access()
280 if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { in intel_pmu_handle_lbr_msrs_access()
282 rdmsrl(index, msr_info->data); in intel_pmu_handle_lbr_msrs_access()
284 wrmsrl(index, msr_info->data); in intel_pmu_handle_lbr_msrs_access()
285 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); in intel_pmu_handle_lbr_msrs_access()
289 clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); in intel_pmu_handle_lbr_msrs_access()
294 msr_info->data = 0; in intel_pmu_handle_lbr_msrs_access()
302 u32 msr = msr_info->index; in intel_pmu_get_msr()
306 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
309 msr_info->data = pmu->pebs_enable; in intel_pmu_get_msr()
312 msr_info->data = pmu->ds_area; in intel_pmu_get_msr()
315 msr_info->data = pmu->pebs_data_cfg; in intel_pmu_get_msr()
321 msr_info->data = in intel_pmu_get_msr()
322 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
326 msr_info->data = in intel_pmu_get_msr()
327 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
330 msr_info->data = pmc->eventsel; in intel_pmu_get_msr()
345 u32 msr = msr_info->index; in intel_pmu_set_msr()
346 u64 data = msr_info->data; in intel_pmu_set_msr()
351 if (data & pmu->fixed_ctr_ctrl_rsvd) in intel_pmu_set_msr()
354 if (pmu->fixed_ctr_ctrl != data) in intel_pmu_set_msr()
358 if (data & pmu->pebs_enable_rsvd) in intel_pmu_set_msr()
361 if (pmu->pebs_enable != data) { in intel_pmu_set_msr()
362 diff = pmu->pebs_enable ^ data; in intel_pmu_set_msr()
363 pmu->pebs_enable = data; in intel_pmu_set_msr()
371 pmu->ds_area = data; in intel_pmu_set_msr()
374 if (data & pmu->pebs_data_cfg_rsvd) in intel_pmu_set_msr()
377 pmu->pebs_data_cfg = data; in intel_pmu_set_msr()
383 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
386 if (!msr_info->host_initiated && in intel_pmu_set_msr()
395 reserved_bits = pmu->reserved_bits; in intel_pmu_set_msr()
396 if ((pmc->idx == 2) && in intel_pmu_set_msr()
397 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) in intel_pmu_set_msr()
402 if (data != pmc->eventsel) { in intel_pmu_set_msr()
403 pmc->eventsel = data; in intel_pmu_set_msr()
427 * Forcibly inlined to allow asserting on @index at build time, and there should
430 static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index) in intel_get_fixed_pmc_eventsel() argument
440 BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUTNERS); in intel_get_fixed_pmc_eventsel()
446 eventsel = perf_get_hw_event_config(fixed_pmc_perf_ids[index]); in intel_get_fixed_pmc_eventsel()
447 WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed); in intel_get_fixed_pmc_eventsel()
451 static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits) in intel_pmu_enable_fixed_counter_bits() argument
455 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) in intel_pmu_enable_fixed_counter_bits()
456 pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits); in intel_pmu_enable_fixed_counter_bits()
469 memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); in intel_pmu_refresh()
472 * Setting passthrough of LBR MSRs is done only in the VM-Entry loop, in intel_pmu_refresh()
476 if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm)) in intel_pmu_refresh()
483 eax.full = entry->eax; in intel_pmu_refresh()
484 edx.full = entry->edx; in intel_pmu_refresh()
486 pmu->version = eax.split.version_id; in intel_pmu_refresh()
487 if (!pmu->version) in intel_pmu_refresh()
490 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
494 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
497 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
498 ((1ull << eax.split.mask_length) - 1); in intel_pmu_refresh()
500 if (pmu->version == 1) { in intel_pmu_refresh()
501 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
503 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh()
507 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
508 ((u64)1 << edx.split.bit_width_fixed) - 1; in intel_pmu_refresh()
515 counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
516 (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); in intel_pmu_refresh()
517 pmu->global_ctrl_rsvd = counter_rsvd; in intel_pmu_refresh()
524 pmu->global_status_rsvd = pmu->global_ctrl_rsvd in intel_pmu_refresh()
528 pmu->global_status_rsvd &= in intel_pmu_refresh()
534 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) { in intel_pmu_refresh()
535 pmu->reserved_bits ^= HSW_IN_TX; in intel_pmu_refresh()
536 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); in intel_pmu_refresh()
539 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
540 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
541 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
542 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
547 memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps)); in intel_pmu_refresh()
549 lbr_desc->records.nr = 0; in intel_pmu_refresh()
551 if (lbr_desc->records.nr) in intel_pmu_refresh()
552 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); in intel_pmu_refresh()
556 pmu->pebs_enable_rsvd = counter_rsvd; in intel_pmu_refresh()
557 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; in intel_pmu_refresh()
558 pmu->pebs_data_cfg_rsvd = ~0xff00000full; in intel_pmu_refresh()
561 pmu->pebs_enable_rsvd = in intel_pmu_refresh()
562 ~((1ull << pmu->nr_arch_gp_counters) - 1); in intel_pmu_refresh()
574 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
575 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
576 pmu->gp_counters[i].idx = i; in intel_pmu_init()
577 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
581 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
582 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
583 pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX; in intel_pmu_init()
584 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
585 pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i); in intel_pmu_init()
588 lbr_desc->records.nr = 0; in intel_pmu_init()
589 lbr_desc->event = NULL; in intel_pmu_init()
590 lbr_desc->msr_passthrough = false; in intel_pmu_init()
604 * Guest needs to re-enable LBR to resume branches recording.
618 u8 version = vcpu_to_pmu(vcpu)->version; in intel_pmu_deliver_pmi()
632 for (i = 0; i < lbr->nr; i++) { in vmx_update_intercept_for_lbr_msrs()
633 vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set); in vmx_update_intercept_for_lbr_msrs()
634 vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set); in vmx_update_intercept_for_lbr_msrs()
635 if (lbr->info) in vmx_update_intercept_for_lbr_msrs()
636 vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); in vmx_update_intercept_for_lbr_msrs()
647 if (!lbr_desc->msr_passthrough) in vmx_disable_lbr_msrs_passthrough()
651 lbr_desc->msr_passthrough = false; in vmx_disable_lbr_msrs_passthrough()
658 if (lbr_desc->msr_passthrough) in vmx_enable_lbr_msrs_passthrough()
662 lbr_desc->msr_passthrough = true; in vmx_enable_lbr_msrs_passthrough()
667 * pmu resources (e.g. LBR) that were assigned to the guest. This is
670 * Before entering the non-root mode (with irq disabled here), double
671 * confirm that the pmu features enabled to the guest are not reclaimed
680 if (!lbr_desc->event) { in vmx_passthrough_lbr_msrs()
684 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) in vmx_passthrough_lbr_msrs()
689 if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { in vmx_passthrough_lbr_msrs()
691 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in vmx_passthrough_lbr_msrs()
699 pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id); in vmx_passthrough_lbr_msrs()
713 kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) { in intel_pmu_cross_mapped_check()
715 !pmc_is_globally_enabled(pmc) || !pmc->perf_event) in intel_pmu_cross_mapped_check()
719 * A negative index indicates the event isn't mapped to a in intel_pmu_cross_mapped_check()
722 hw_idx = pmc->perf_event->hw.idx; in intel_pmu_cross_mapped_check()
723 if (hw_idx != pmc->idx && hw_idx > -1) in intel_pmu_cross_mapped_check()
724 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); in intel_pmu_cross_mapped_check()