Lines Matching +full:halt +full:- +full:regs

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 #define pr_fmt(fmt) "kvm-guest: " fmt
58 early_param("no-kvmapf", parse_no_kvmapf);
67 early_param("no-steal-acc", parse_no_stealacc);
102 hlist_for_each(p, &b->list) { in _find_apf_task()
105 if (n->token == token) in _find_apf_task()
118 raw_spin_lock(&b->lock); in kvm_async_pf_queue_task()
121 /* dummy entry exist -> wake up was delivered ahead of PF */ in kvm_async_pf_queue_task()
122 hlist_del(&e->link); in kvm_async_pf_queue_task()
123 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
128 n->token = token; in kvm_async_pf_queue_task()
129 n->cpu = smp_processor_id(); in kvm_async_pf_queue_task()
130 init_swait_queue_head(&n->wq); in kvm_async_pf_queue_task()
131 hlist_add_head(&n->link, &b->list); in kvm_async_pf_queue_task()
132 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
137 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
168 hlist_del_init(&n->link); in apf_task_wake_one()
169 if (swq_has_sleeper(&n->wq)) in apf_task_wake_one()
170 swake_up_one(&n->wq); in apf_task_wake_one()
182 raw_spin_lock(&b->lock); in apf_task_wake_all()
183 hlist_for_each_safe(p, next, &b->list) { in apf_task_wake_all()
185 if (n->cpu == smp_processor_id()) in apf_task_wake_all()
188 raw_spin_unlock(&b->lock); in apf_task_wake_all()
204 raw_spin_lock(&b->lock); in kvm_async_pf_task_wake()
213 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
230 dummy->token = token; in kvm_async_pf_task_wake()
231 dummy->cpu = smp_processor_id(); in kvm_async_pf_task_wake()
232 init_swait_queue_head(&dummy->wq); in kvm_async_pf_task_wake()
233 hlist_add_head(&dummy->link, &b->list); in kvm_async_pf_task_wake()
238 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
258 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) in __kvm_handle_async_pf() argument
266 state = irqentry_enter(regs); in __kvm_handle_async_pf()
274 if (unlikely(!(regs->flags & X86_EFLAGS_IF))) in __kvm_handle_async_pf()
278 if (unlikely(!(user_mode(regs)))) in __kvm_handle_async_pf()
287 irqentry_exit(regs, state); in __kvm_handle_async_pf()
293 struct pt_regs *old_regs = set_irq_regs(regs); in DEFINE_IDTENTRY_SYSVEC()
413 version = src->version; in kvm_steal_clock()
415 steal = src->steal; in kvm_steal_clock()
417 } while ((version & 1) || (version != src->version)); in kvm_steal_clock()
432 * hotplugged will have their per-cpu variable already mapped as
533 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { in __send_ipi_mask()
534 ipi_bitmap <<= min - apic_id; in __send_ipi_mask()
541 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
546 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); in __send_ipi_mask()
552 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
661 * queue flush_on_enter for pre-empted vCPUs in kvm_flush_tlb_multi()
666 * skip check for local vCPU - it will never be cleared from in kvm_flush_tlb_multi()
670 state = READ_ONCE(src->preempted); in kvm_flush_tlb_multi()
672 if (try_cmpxchg(&src->preempted, &state, in kvm_flush_tlb_multi()
701 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() in kvm_smp_prepare_boot_cpu()
776 static void kvm_crash_shutdown(struct pt_regs *regs) in kvm_crash_shutdown() argument
779 native_machine_crash_shutdown(regs); in kvm_crash_shutdown()
790 return !!(src->preempted & KVM_VCPU_PREEMPTED); in __kvm_vcpu_is_preempted()
796 #include <asm/asm-offsets.h>
801 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
885 static int kvm_cpuid_base = -1; in kvm_cpuid_base()
887 if (kvm_cpuid_base == -1) in kvm_cpuid_base()
955 for (i = 0; i < e820_table->nr_entries; i++) { in kvm_init_platform()
956 struct e820_entry *entry = &e820_table->entries[i]; in kvm_init_platform()
958 if (entry->type != E820_TYPE_RAM) in kvm_init_platform()
961 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE); in kvm_init_platform()
963 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr, in kvm_init_platform()
973 __end_bss_decrypted - __start_bss_decrypted, 0); in kvm_init_platform()
985 /* Set WB as the default cache mode for SEV-SNP and TDX */ in kvm_init_platform()
990 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) in kvm_sev_es_hcall_prepare() argument
993 ghcb_set_rbx(ghcb, regs->bx); in kvm_sev_es_hcall_prepare()
994 ghcb_set_rcx(ghcb, regs->cx); in kvm_sev_es_hcall_prepare()
995 ghcb_set_rdx(ghcb, regs->dx); in kvm_sev_es_hcall_prepare()
996 ghcb_set_rsi(ghcb, regs->si); in kvm_sev_es_hcall_prepare()
999 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) in kvm_sev_es_hcall_finish() argument
1052 * halt until it's our turn and kicked. Note that we do safe halt in kvm_wait()
1058 halt(); in kvm_wait()
1144 /* Enable guest halt poll disables host halt poll */ in arch_haltpoll_enable()
1154 /* Disable guest halt poll enables host halt poll */ in arch_haltpoll_disable()