Searched refs:vcpu_to_pmu (Results 1 – 10 of 10) sorted by relevance
78 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_check_rdpmc_early()90 return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx); in amd_rdpmc_ecx_to_pmc()95 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_msr_idx_to_pmc()106 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_is_valid_msr()129 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_pmu_get_msr()151 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_pmu_set_msr()178 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_pmu_refresh()217 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_pmu_init()
62 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc()152 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr()183 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc()200 vcpu_to_pmu(vcpu)->event_count--; in intel_pmu_release_guest_lbr_event()207 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_create_guest_lbr_event()285 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); in intel_pmu_handle_lbr_msrs_access()289 clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); in intel_pmu_handle_lbr_msrs_access()300 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr()343 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr()461 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh()[all …]
2755 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && in prepare_vmcs02()3032 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), in nested_vmx_check_host_state()3151 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), in nested_vmx_check_guest_state()4714 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) in load_vmcs12_host_state()
2461 if (data && !vcpu_to_pmu(vcpu)->version) in vmx_set_msr()7180 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu); in atomic_switch_perf_msrs()
203 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in pmu_fw_ctr_read_hi()233 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in pmu_ctr_read()274 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_pmu_overflow()343 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_vcpu_pmu_incr_fw()360 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_vcpu_pmu_read_hpm()393 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_pmu_clear_snapshot_area()404 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_vcpu_pmu_snapshot_set_shmem()458 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_vcpu_pmu_num_ctrs()468 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_vcpu_pmu_ctr_info()484 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_riscv_vcpu_pmu_ctr_start()[all …]
21 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_sbi_ext_pmu_handler()85 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); in kvm_sbi_ext_pmu_probe()
498 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_handle_event()584 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_rdpmc()621 return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)); in kvm_pmu_is_valid_msr()631 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_mark_pmc_in_use()640 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_get_msr()665 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_set_msr()724 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_reset()753 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_refresh()796 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_init()806 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_cleanup()[all …]
9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) macro
4985 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_arch_vcpu_load()
63 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context) macro