Lines Matching +full:guest +full:- +full:index +full:- +full:bits

1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
31 #include <asm/pvclock-abi.h>
34 #include <asm/msr-index.h>
38 #include <asm/hyperv-tlfs.h>
81 /* x86-specific vcpu->requests bit members */
150 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
151 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
154 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
233 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
234 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
274 * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP checks
279 * PRIVATE_ACCESS is a KVM-defined flag us to indicate that a fault occurred
280 * when the guest was accessing private memory.
285 /* apic attention bits */
288 * The following bit is set with PV-EOI, unset on EOI.
289 * We detect PV-EOI changes by guest by comparing
290 * this bit with PV-EOI in guest memory.
304 * Upper-level shadow pages having gptes are tracked for write-protection via
306 * not create more than 2^16-1 upper-level shadow pages at a single gfn,
311 * incorporates various mode bits and properties of the SP. Roughly speaking,
313 * is the number of bits that are used to compute the role.
315 * But, even though there are 19 bits in the mask below, not all combinations
318 * - invalid shadow pages are not accounted, so the bits are effectively 18
320 * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
322 * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
324 * - the 4 bits of level are effectively limited to the values 2/3/4/5,
325 * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
329 * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
330 * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
332 * Therefore, the maximum number of possible upper-level shadow pages for a
367 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
368 * don't treat all-zero structure as valid data.
373 * CR4.PKE only affects permission checks for software walks of the guest page
378 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
435 #define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
443 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
444 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
449 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
464 * consists of 16 domains indexed by page fault error code bits [4:1],
466 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
474 * Byte index: page fault error code [4:1]
475 * Bit index: pte permissions in ACC_* format
484 * check zero bits on shadow page table entries, these
485 * bits include not only hardware reserved bits but also
486 * the bits spte never used.
510 * guest or userspace.
513 * doesn't need to reprogram the perf_event every time the guest writes
563 * Overlay the bitmap with a 64-bit atomic so that all bits can be
581 * If a guest counter is cross-mapped to host counter with different
582 * index, its PEBS capability will be temporarily disabled.
597 * redundant check before cleanup if guest don't use vPMU at all.
617 /* Hyper-V SynIC timer */
620 int index; member
628 /* Hyper-V synthetic interrupt controller (SynIC)*/
652 #define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
665 /* Hyper-V per vcpu emulation context */
722 u64 timer_expires; /* In guest epoch */
783 * If the vcpu runs in guest mode with two level paging this still saves
784 * the paging mode of the l1 guest. This context is always used to
789 /* Non-nested MMU for L1 */
796 * Paging state of an L2 guest (used for nested npt)
799 * of an L2 guest. This context is only initialized for page table
817 * QEMU userspace and the guest each have their own FPU state.
818 * In vcpu_run, we switch between the user and guest FPU contexts.
819 * While running a VCPU, the VCPU thread will have the guest FPU
824 * "guest_fpstate" state here contains the guest FPU context, with the
825 * host PRKU bits.
841 /* Exceptions to be injected to the guest. */
843 /* Exception VM-Exits to be synthesized to L1. */
868 * Track whether or not the guest is allowed to use features that are
871 * not always, governed features can be used by the guest if and only
872 * if both KVM and userspace want to expose the feature to the guest.
892 /* set guest stopped flag in pvclock flags field */
954 /* used for guest single stepping over the given code position */
1004 /* be preempted when it's in kernel-mode(cpl=0) */
1010 /* Host CPU on which VM-entry was most recently attempted */
1026 * are not present in the guest's cpuid
1036 * reading the guest memory
1051 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
1058 * software-enabled local APIs to be in the same mode, each addressable APIC to
1088 /* Hyper-V synthetic debugger (SynDbg)*/
1100 /* Current state of Hyper-V TSC page clocksource */
1104 /* TSC page MSR was written by the guest, update pending */
1115 /* Hyper-V emulation context */
1123 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1136 /* How many vCPUs have VP index != vCPU index */
1210 * being used by a HyperV guest.
1234 * first time either APIC ID or APIC base are changed by the guest
1245 * AVIC is inhibited on a vCPU because it runs a nested guest.
1261 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1318 * guest attempts to execute from the region then KVM obviously can't
1319 * create an NX huge page (without hanging the guest).
1368 * preemption-disabled region, so it must be a raw spinlock.
1430 * If exit_on_emulation_error is set, and the in-kernel instruction
1442 /* Guest can access the SGX PROVISIONKEY. */
1474 * - tdp_mmu_roots (above)
1475 * - the link field of kvm_mmu_page structs used by the TDP MMU
1476 * - possible_nx_huge_pages;
1477 * - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1509 * VM-scope maximum vCPU ID. Used to determine the size of structures
1522 * Protected by kvm->slots_lock.
1534 * Protected by kvm->slots_lock.
1605 u32 index; member
1635 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1689 * Does not need to flush GPA->HPA mappings.
1690 * Can potentially get non-canonical addresses through INVLPGs, which
1696 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1697 * does not need to flush GPA->HPA mappings.
1884 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1887 #include <asm/kvm-x86-ops.h>
1909 return -ENOTSUPP; in kvm_arch_flush_remote_tlbs()
1917 return -EOPNOTSUPP; in kvm_arch_flush_remote_tlbs_range()
1924 /* Values are arbitrary, but must be non-zero. */
1931 ((vcpu) && (vcpu)->arch.handling_intr_from_guest && \
1932 (!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI)))
1989 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
1994 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
1999 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
2004 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
2008 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
2015 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
2020 * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
2023 * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
2024 * state and inject single-step #DBs after skipping
2027 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
2036 * If emulation fails for a write to guest page tables,
2038 * gfn and resumes the guest to retry the non-emulatable
2040 * doesn't allow forward progress for a self-changing
2065 int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2066 int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
2067 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
2068 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2069 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
2202 #define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
2207 #define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
2250 #define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */
2259 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2279 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2325 return (irq->delivery_mode == APIC_DM_FIXED || in kvm_irq_is_postable()
2326 irq->delivery_mode == APIC_DM_LOWEST); in kvm_irq_is_postable()
2367 * remaining 31 lower bits must be 0 to preserve ABI.