/linux-6.12.1/arch/x86/kvm/svm/ |
D | nested.c | 62 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr() 81 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3() 99 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context() 120 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) in nested_vmcb_needs_vls_intercept() 139 g = &svm->nested.ctl; in recalc_intercepts() 205 if (!svm->nested.force_msr_bitmap_recalc) { in nested_svm_vmrun_msrpm() 206 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_vmrun_msrpm() 210 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS))) in nested_svm_vmrun_msrpm() 215 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm() 231 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm() [all …]
|
D | hyperv.h | 18 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_hv_update_vm_vp_ids() 24 hv_vcpu->nested.pa_page_gpa = hve->partition_assist_page; in nested_svm_hv_update_vm_vp_ids() 25 hv_vcpu->nested.vm_id = hve->hv_vm_id; in nested_svm_hv_update_vm_vp_ids() 26 hv_vcpu->nested.vp_id = hve->hv_vp_id; in nested_svm_hv_update_vm_vp_ids() 32 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_l2_tlb_flush_enabled()
|
D | svm.h | 273 struct svm_nested_state nested; member 506 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK); in nested_vgif_enabled() 515 return svm->nested.vmcb02.ptr; in get_vgif_vmcb() 552 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; in nested_npt_enabled() 558 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); in nested_vnmi_enabled() 631 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); in nested_svm_virtualize_tpr() 636 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); in nested_exit_on_smi() 641 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); in nested_exit_on_intr() 646 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); in nested_exit_on_nmi()
|
/linux-6.12.1/arch/x86/kvm/vmx/ |
D | nested.c | 184 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_failValid() 197 if (vmx->nested.current_vmptr == INVALID_GPA && in nested_vmx_fail() 225 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs() 235 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs() 236 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs() 239 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; in nested_release_evmcs() 242 hv_vcpu->nested.pa_page_gpa = INVALID_GPA; in nested_release_evmcs() 243 hv_vcpu->nested.vm_id = 0; in nested_release_evmcs() 244 hv_vcpu->nested.vp_id = 0; in nested_release_evmcs() 267 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr) in nested_evmcs_handle_vmclear() [all …]
|
D | nested.h | 45 return to_vmx(vcpu)->nested.cached_vmcs12; in get_vmcs12() 53 return to_vmx(vcpu)->nested.cached_shadow_vmcs12; in get_shadow_vmcs12() 66 return vmx->nested.current_vmptr != -1ull || in vmx_has_valid_vmcs12() 74 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; in nested_get_vpid02() 107 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); in nested_cpu_vmx_misc_cr3_count() 117 return to_vmx(vcpu)->nested.msrs.misc_low & in nested_cpu_has_vmwrite_any_field() 123 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; in nested_cpu_has_zero_length_injection() 128 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & in nested_cpu_supports_monitor_trap_flag() 134 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & in nested_cpu_has_vmx_shadow_vmcs() 265 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; in nested_guest_cr0_valid() [all …]
|
D | hyperv.h | 27 return evmptr_is_valid(vmx->nested.hv_evmcs_vmptr); in nested_vmx_is_evmptr12_valid() 37 return evmptr_is_set(vmx->nested.hv_evmcs_vmptr); in nested_vmx_is_evmptr12_set() 42 return vmx->nested.hv_evmcs; in nested_vmx_evmcs() 52 to_vmx(vcpu)->nested.enlightened_vmcs_enabled; in guest_cpuid_has_evmcs()
|
D | hyperv.c | 41 (!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled)) in nested_get_evmcs_version() 203 vmx->nested.enlightened_vmcs_enabled = true; in nested_enable_evmcs() 215 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in nested_evmcs_l2_tlb_flush_enabled()
|
D | vmx.c | 123 static bool __read_mostly nested = 1; variable 124 module_param(nested, bool, 0444); 1310 if (vmx->nested.need_vmcs12_to_shadow_sync) in vmx_prepare_switch_to_guest() 1804 vmx->nested.mtf_pending = true; in vmx_update_emulated_instruction() 1807 vmx->nested.mtf_pending = false; in vmx_update_emulated_instruction() 2003 if (!nested) in vmx_get_feature_msr() 2005 return vmx_get_vmx_msr(&vmcs_config.nested, msr, data); in vmx_get_feature_msr() 2090 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, in vmx_get_msr() 2293 ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) || in vmx_set_msr() 2294 (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS))) in vmx_set_msr() [all …]
|
/linux-6.12.1/Documentation/virt/kvm/x86/ |
D | running-nested-guests.rst | 4 Running nested guests with KVM 7 A nested guest is the ability to run a guest inside another guest (it 36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest" 44 resulting in at least four levels in a nested setup — L0 (bare 46 (guest hypervisor), L3 (nested guest). 56 There are several scenarios where nested KVM can be useful, to name a 61 Provider, using nested KVM lets you rent a large enough "guest 63 multiple nested guests (level-2 guests), running different OSes, on 66 - Live migration of "guest hypervisors" and their nested guests, for 76 Enabling "nested" (x86) [all …]
|
D | index.rst | 16 nested-vmx 17 running-nested-guests
|
D | nested-vmx.rst | 16 hypervisors (which use VMX) with their own nested guests. It does so by 20 We describe in much greater detail the theory behind the nested VMX feature, 32 In nested virtualization, we have three levels: The host (KVM), which we call 33 L0, the guest hypervisor, which we call L1, and its nested guest, which we 37 Running nested VMX 40 The nested VMX feature is enabled by default since Linux kernel v4.20. For 41 older Linux kernel, it can be enabled by giving the "nested=1" option to the 65 As a VMX implementation, nested VMX presents a VMCS structure to L1.
|
/linux-6.12.1/rust/macros/ |
D | zeroable.rs | 24 let mut nested = 0; in derive() localVariable 28 TokenTree::Punct(p) if nested == 0 && p.as_char() == ',' => { in derive() 37 TokenTree::Punct(p) if nested == 0 && p.as_char() == '\'' => { in derive() 41 TokenTree::Punct(p) if nested == 0 && p.as_char() == ':' => { in derive() 49 nested += 1; in derive() 53 assert!(nested > 0); in derive() 54 nested -= 1; in derive() 60 assert_eq!(nested, 0); in derive()
|
/linux-6.12.1/Documentation/networking/devlink/ |
D | index.rst | 33 lock of both nested and parent instances at the same time, devlink 35 instance lock of the nested instance could be taken. 37 nested relationship: 39 - ``devl_nested_devlink_set()`` - called to setup devlink -> nested 40 devlink relationship (could be user for multiple nested instances. 42 nested devlink relationship. 44 nested devlink relationship. 46 The nested devlink info is exposed to the userspace over object-specific
|
/linux-6.12.1/Documentation/networking/ |
D | ethtool-netlink.rst | 53 Each request or reply message contains a nested attribute with common header. 96 type is used. For arbitrary length bitmaps, ethtool netlink uses a nested 116 Compact form: nested (bitset) attribute contents: 141 Bit-by-bit form: nested (bitset) attribute contents: 148 | ``ETHTOOL_A_BITSET_BITS`` | nested | array of bits | 150 | | ``ETHTOOL_A_BITSET_BITS_BIT+`` | nested | one bit | 332 | ``ETHTOOL_A_STRSET_HEADER`` | nested | request header | 334 | ``ETHTOOL_A_STRSET_STRINGSETS`` | nested | string set to request | 336 | | ``ETHTOOL_A_STRINGSETS_STRINGSET+`` | nested | one string set | 344 | ``ETHTOOL_A_STRSET_HEADER`` | nested | reply header | [all …]
|
/linux-6.12.1/tools/perf/bench/ |
D | epoll-ctl.c | 57 static unsigned int nested = 0; variable 79 …OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)… 105 if (nested > EPOLL_MAXNESTS) in nest_epollfd() 106 nested = EPOLL_MAXNESTS; in nest_epollfd() 107 printinfo("Nesting level(s): %d\n", nested); in nest_epollfd() 109 epollfdp = calloc(nested, sizeof(int)); in nest_epollfd() 113 for (i = 0; i < nested; i++) { in nest_epollfd() 122 for (i = nested - 1; i; i--) { in nest_epollfd() 345 if (nested) in bench_epoll_ctl()
|
D | epoll-wait.c | 105 static unsigned int nested = 0; variable 138 …OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)… 246 if (nested > EPOLL_MAXNESTS) in nest_epollfd() 247 nested = EPOLL_MAXNESTS; in nest_epollfd() 249 epollfdp = calloc(nested, sizeof(*epollfdp)); in nest_epollfd() 253 for (i = 0; i < nested; i++) { in nest_epollfd() 262 for (i = nested - 1; i; i--) { in nest_epollfd() 325 if (nested) in do_threads() 460 if (nested) in bench_epoll_wait() 465 printinfo("Nesting level(s): %d\n", nested); in bench_epoll_wait()
|
/linux-6.12.1/net/netfilter/ipset/ |
D | ip_set_bitmap_gen.h | 95 struct nlattr *nested; in mtype_head() local 98 nested = nla_nest_start(skb, IPSET_ATTR_DATA); in mtype_head() 99 if (!nested) in mtype_head() 108 nla_nest_end(skb, nested); in mtype_head() 207 struct nlattr *adt, *nested; in mtype_list() local 229 nested = nla_nest_start(skb, IPSET_ATTR_DATA); in mtype_list() 230 if (!nested) { in mtype_list() 243 nla_nest_end(skb, nested); in mtype_list() 253 nla_nest_cancel(skb, nested); in mtype_list()
|
D | ip_set_list_set.c | 457 struct nlattr *nested; in list_set_head() local 460 nested = nla_nest_start(skb, IPSET_ATTR_DATA); in list_set_head() 461 if (!nested) in list_set_head() 470 nla_nest_end(skb, nested); in list_set_head() 482 struct nlattr *atd, *nested; in list_set_list() local 500 nested = nla_nest_start(skb, IPSET_ATTR_DATA); in list_set_list() 501 if (!nested) in list_set_list() 508 nla_nest_end(skb, nested); in list_set_list() 518 nla_nest_cancel(skb, nested); in list_set_list()
|
/linux-6.12.1/arch/riscv/kernel/ |
D | kernel_mode_vector.c | 190 #define riscv_v_start_kernel_context(nested) (-ENOENT) argument 209 bool nested = false; in kernel_vector_begin() local 216 if (riscv_v_start_kernel_context(&nested)) { in kernel_vector_begin() 221 if (!nested) in kernel_vector_begin()
|
/linux-6.12.1/scripts/coccinelle/locks/ |
D | flags.cocci | 2 /// Find nested lock+irqsave functions that use the same flags variables 73 cocci.print_secs("nested lock+irqsave that reuses flags",p2) 80 msg="ERROR: nested lock+irqsave that reuses flags from line %s." % (p1[0].line)
|
/linux-6.12.1/arch/powerpc/kvm/ |
D | book3s_hv_p9_entry.c | 137 if (!vcpu->arch.nested) { in store_vcpu_state() 201 if (!vcpu->arch.nested) { in restore_p9_host_os_sprs() 303 struct kvm_nested_guest *nested = vcpu->arch.nested; in switch_mmu_to_guest_radix() local 307 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; in switch_mmu_to_guest_radix() 454 struct kvm_nested_guest *nested) in check_need_tlb_flush() argument 460 if (nested) in check_need_tlb_flush() 461 need_tlb_flush = &nested->need_tlb_flush; in check_need_tlb_flush() 536 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_vcpu_entry_p9() local 703 check_need_tlb_flush(kvm, vc->pcpu, nested); in kvmhv_vcpu_entry_p9()
|
D | book3s_64_mmu_radix.c | 110 if (vcpu->arch.nested) in kvmhv_copy_tofrom_guest_radix() 111 lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_copy_tofrom_guest_radix() 793 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, in kvmppc_hv_handle_set_rc() argument 809 if (nested) in kvmppc_hv_handle_set_rc() 1310 struct kvm_nested_guest *nested; in debugfs_radix_read() local 1346 nested = NULL; in debugfs_radix_read() 1352 if (nested) { in debugfs_radix_read() 1353 kvmhv_put_nested(nested); in debugfs_radix_read() 1354 nested = NULL; in debugfs_radix_read() 1365 nested = kvmhv_get_nested(kvm, p->lpid, false); in debugfs_radix_read() [all …]
|
/linux-6.12.1/arch/um/os-Linux/ |
D | signal.c | 196 int nested, bail; in hard_handler() local 212 nested = pending & 1; in hard_handler() 227 if (!nested) in hard_handler() 228 pending = from_irq_stack(nested); in hard_handler()
|
/linux-6.12.1/Documentation/dev-tools/ |
D | ktap.rst | 16 KTAP test results describe a series of tests (which may be nested: i.e., test 29 information, in particular nested test results, may be lost. Also note that 46 start of the nested test results. This differs from TAP14, which uses a 59 Plan lines follow version lines to indicate the number of nested tests. 185 In KTAP, tests can be nested. This is done by having a test include within its 201 An example of a test with two nested subtests: 214 An example format with multiple levels of nested testing: 240 allows an arbitrary number of tests to be nested no yes 245 The TAP14 specification does permit nested tests, but instead of using another 246 nested version line, uses a line of the form
|
/linux-6.12.1/Documentation/arch/x86/ |
D | kernel-stacks.rst | 28 hardware interrupt (i.e. not a nested hardware interrupt) then the 54 nested IST interrupts then the handler must adjust the IST values on 59 nested. For example, a debug interrupt can safely be interrupted by an 62 IST events with the same code to be nested. However in most cases, the 95 To handle nested #DB correctly there exist two instances of DB stacks. On 97 so a nested #DB starts from a clean stack. The nested #DB switches
|