Lines Matching +full:no +full:- +full:poll +full:- +full:on +full:- +full:init

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 #define pr_fmt(fmt) "kvm-guest: " fmt
13 #include <linux/init.h>
58 early_param("no-kvmapf", parse_no_kvmapf);
67 early_param("no-steal-acc", parse_no_stealacc);
76 * No need for any "IO delay" on KVM
102 hlist_for_each(p, &b->list) { in _find_apf_task()
105 if (n->token == token) in _find_apf_task()
118 raw_spin_lock(&b->lock); in kvm_async_pf_queue_task()
121 /* dummy entry exist -> wake up was delivered ahead of PF */ in kvm_async_pf_queue_task()
122 hlist_del(&e->link); in kvm_async_pf_queue_task()
123 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
128 n->token = token; in kvm_async_pf_queue_task()
129 n->cpu = smp_processor_id(); in kvm_async_pf_queue_task()
130 init_swait_queue_head(&n->wq); in kvm_async_pf_queue_task()
131 hlist_add_head(&n->link, &b->list); in kvm_async_pf_queue_task()
132 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
137 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
168 hlist_del_init(&n->link); in apf_task_wake_one()
169 if (swq_has_sleeper(&n->wq)) in apf_task_wake_one()
170 swake_up_one(&n->wq); in apf_task_wake_one()
182 raw_spin_lock(&b->lock); in apf_task_wake_all()
183 hlist_for_each_safe(p, next, &b->list) { in apf_task_wake_all()
185 if (n->cpu == smp_processor_id()) in apf_task_wake_all()
188 raw_spin_unlock(&b->lock); in apf_task_wake_all()
204 raw_spin_lock(&b->lock); in kvm_async_pf_task_wake()
210 * as the allocator is preemptible on PREEMPT_RT kernels. in kvm_async_pf_task_wake()
213 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
217 * Continue looping on allocation failure, eventually in kvm_async_pf_task_wake()
230 dummy->token = token; in kvm_async_pf_task_wake()
231 dummy->cpu = smp_processor_id(); in kvm_async_pf_task_wake()
232 init_swait_queue_head(&dummy->wq); in kvm_async_pf_task_wake()
233 hlist_add_head(&dummy->link, &b->list); in kvm_async_pf_task_wake()
238 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
274 if (unlikely(!(regs->flags & X86_EFLAGS_IF))) in __kvm_handle_async_pf()
340 * This relies on __test_and_clear_bit to modify the memory in kvm_guest_apic_eoi_write()
343 * there's no need for lock or memory barriers. in kvm_guest_apic_eoi_write()
413 version = src->version; in kvm_steal_clock()
415 steal = src->steal; in kvm_steal_clock()
417 } while ((version & 1) || (version != src->version)); in kvm_steal_clock()
432 * hotplugged will have their per-cpu variable already mapped as
533 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { in __send_ipi_mask()
534 ipi_bitmap <<= min - apic_id; in __send_ipi_mask()
541 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
546 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); in __send_ipi_mask()
552 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
660 * We have to call flush only on online vCPUs. And in kvm_flush_tlb_multi()
661 * queue flush_on_enter for pre-empted vCPUs in kvm_flush_tlb_multi()
666 * skip check for local vCPU - it will never be cleared from in kvm_flush_tlb_multi()
670 state = READ_ONCE(src->preempted); in kvm_flush_tlb_multi()
672 if (try_cmpxchg(&src->preempted, &state, in kvm_flush_tlb_multi()
701 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() in kvm_smp_prepare_boot_cpu()
790 return !!(src->preempted & KVM_VCPU_PREEMPTED); in __kvm_vcpu_is_preempted()
796 #include <asm/asm-offsets.h>
801 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
875 return 0; /* So we don't blow up on old processors */ in __kvm_cpuid_base()
885 static int kvm_cpuid_base = -1; in kvm_cpuid_base()
887 if (kvm_cpuid_base == -1) in kvm_cpuid_base()
955 for (i = 0; i < e820_table->nr_entries; i++) { in kvm_init_platform()
956 struct e820_entry *entry = &e820_table->entries[i]; in kvm_init_platform()
958 if (entry->type != E820_TYPE_RAM) in kvm_init_platform()
961 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE); in kvm_init_platform()
963 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr, in kvm_init_platform()
973 __end_bss_decrypted - __start_bss_decrypted, 0); in kvm_init_platform()
985 /* Set WB as the default cache mode for SEV-SNP and TDX */ in kvm_init_platform()
993 ghcb_set_rbx(ghcb, regs->bx); in kvm_sev_es_hcall_prepare()
994 ghcb_set_rcx(ghcb, regs->cx); in kvm_sev_es_hcall_prepare()
995 ghcb_set_rdx(ghcb, regs->dx); in kvm_sev_es_hcall_prepare()
996 ghcb_set_rsi(ghcb, regs->si); in kvm_sev_es_hcall_prepare()
1001 /* No checking of the return state needed */ in kvm_sev_es_hcall_finish()
1010 .init.guest_late_init = kvm_guest_init,
1011 .init.x2apic_available = kvm_para_available,
1012 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
1013 .init.init_platform = kvm_init_platform,
1054 * in irq spinlock slowpath and no spurious interrupt occur to save us. in kvm_wait()
1081 pr_info("PV spinlocks disabled, no host support\n"); in kvm_spinlock_init()
1139 pr_err_once("host does not support poll control\n"); in arch_haltpoll_enable()
1144 /* Enable guest halt poll disables host halt poll */ in arch_haltpoll_enable()
1154 /* Disable guest halt poll enables host halt poll */ in arch_haltpoll_disable()