Lines Matching +full:end +full:- +full:of +full:- +full:conversion

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
16 #include <asm/insn-eval.h>
82 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
84 * @reportdata: Address of the input buffer which contains user-defined
86 * @tdreport: Address of the output buffer to store TDREPORT.
92 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
107 return -EINVAL; in tdx_mcall_get_report0()
108 return -EIO; in tdx_mcall_get_report0()
116 * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
118 * @buf: Address of the directly mapped shared kernel buffer which
121 * @size: size of the tdquote buffer (4KB-aligned).
179 * information like GPA width, number of available vcpus, debug mode in tdx_parse_tdinfo()
181 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL in tdx_parse_tdinfo()
187 * The highest bit of a guest physical address is the "sharing" bit. in tdx_parse_tdinfo()
190 * The GPA width that comes out of this call is critical. TDX guests in tdx_parse_tdinfo()
194 *cc_mask = BIT_ULL(gpa_width - 1); in tdx_parse_tdinfo()
199 * TD-private memory. Only VMM-shared memory (MMIO) will #VE. in tdx_parse_tdinfo()
214 * The TDX module spec states that #VE may be injected for a limited set of
217 * - Emulation of the architectural #VE injection on EPT violation;
219 * - As a result of guest TD execution of a disallowed instruction,
222 * - A notification to the guest TD about anomalous behavior;
224 * The last one is opt-in and is not used by the kernel.
236 switch (ve->exit_reason) { in ve_instr_len()
242 /* It is safe to use ve->instr_len for #VE due instructions */ in ve_instr_len()
243 return ve->instr_len; in ve_instr_len()
246 * For EPT violations, ve->insn_len is not defined. For those, in ve_instr_len()
250 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); in ve_instr_len()
253 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); in ve_instr_len()
254 return ve->instr_len; in ve_instr_len()
268 * can be found in TDX Guest-Host-Communication Interface in __halt()
272 * enabled status (RFLAGS.IF) of the TD guest and to determine in __halt()
286 return -EIO; in handle_halt()
307 .r12 = regs->cx, in read_msr()
312 * can be found in TDX Guest-Host-Communication Interface in read_msr()
316 return -EIO; in read_msr()
318 regs->ax = lower_32_bits(args.r11); in read_msr()
319 regs->dx = upper_32_bits(args.r11); in read_msr()
328 .r12 = regs->cx, in write_msr()
329 .r13 = (u64)regs->dx << 32 | regs->ax, in write_msr()
334 * can be found in TDX Guest-Host-Communication Interface in write_msr()
338 return -EIO; in write_msr()
348 .r12 = regs->ax, in handle_cpuid()
349 .r13 = regs->cx, in handle_cpuid()
356 * Return all-zeros for any CPUID outside the range. It matches CPU in handle_cpuid()
357 * behaviour for non-supported leaf. in handle_cpuid()
359 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { in handle_cpuid()
360 regs->ax = regs->bx = regs->cx = regs->dx = 0; in handle_cpuid()
366 * ABI can be found in TDX Guest-Host-Communication Interface in handle_cpuid()
370 return -EIO; in handle_cpuid()
373 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of in handle_cpuid()
377 regs->ax = args.r12; in handle_cpuid()
378 regs->bx = args.r13; in handle_cpuid()
379 regs->cx = args.r14; in handle_cpuid()
380 regs->dx = args.r15; in handle_cpuid()
417 /* Only in-kernel MMIO is supported */ in handle_mmio()
419 return -EFAULT; in handle_mmio()
421 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) in handle_mmio()
422 return -EFAULT; in handle_mmio()
425 return -EINVAL; in handle_mmio()
429 return -EINVAL; in handle_mmio()
434 return -EINVAL; in handle_mmio()
437 if (!fault_in_kernel_space(ve->gla)) { in handle_mmio()
439 return -EINVAL; in handle_mmio()
452 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) in handle_mmio()
453 return -EFAULT; in handle_mmio()
459 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
460 return -EIO; in handle_mmio()
464 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
465 return -EIO; in handle_mmio()
479 return -EINVAL; in handle_mmio()
482 return -EINVAL; in handle_mmio()
486 if (!mmio_read(size, ve->gpa, &val)) in handle_mmio()
487 return -EIO; in handle_mmio()
491 /* Zero-extend for 32-bit operation */ in handle_mmio()
509 return -EINVAL; in handle_mmio()
532 * in TDX Guest-Host-Communication Interface (GHCI) section titled in handle_in()
537 /* Update part of the register affected by the emulated instruction */ in handle_in()
538 regs->ax &= ~mask; in handle_in()
540 regs->ax |= args.r11 & mask; in handle_in()
551 * in TDX Guest-Host-Communication Interface (GHCI) section titled in handle_out()
555 PORT_WRITE, port, regs->ax & mask); in handle_out()
568 u32 exit_qual = ve->exit_qual; in handle_io()
573 return -EIO; in handle_io()
585 return -EIO; in handle_io()
591 * Early #VE exception handler. Only handles a subset of port I/O.
608 regs->ip += insn_len; in tdx_early_handle_ve()
634 ve->exit_reason = args.rcx; in tdx_get_ve_info()
635 ve->exit_qual = args.rdx; in tdx_get_ve_info()
636 ve->gla = args.r8; in tdx_get_ve_info()
637 ve->gpa = args.r9; in tdx_get_ve_info()
638 ve->instr_len = lower_32_bits(args.r10); in tdx_get_ve_info()
639 ve->instr_info = upper_32_bits(args.r10); in tdx_get_ve_info()
645 * On success, returns the number of bytes RIP should be incremented (>=0)
646 * or -errno on error.
650 switch (ve->exit_reason) { in virt_exception_user()
654 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_user()
655 return -EIO; in virt_exception_user()
667 * On success, returns the number of bytes RIP should be incremented (>=0)
668 * or -errno on error.
672 switch (ve->exit_reason) { in virt_exception_kernel()
682 if (is_private_gpa(ve->gpa)) in virt_exception_kernel()
683 panic("Unexpected EPT-violation on private memory."); in virt_exception_kernel()
688 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_kernel()
689 return -EIO; in virt_exception_kernel()
705 regs->ip += insn_len; in tdx_handle_virt_exception()
713 * TDX guest is responsible for flushing TLB on private->shared in tdx_tlb_flush_required()
714 * transition. VMM is responsible for flushing on shared->private. in tdx_tlb_flush_required()
722 * flush to avoid integrity failures in the face of a buggy or in tdx_tlb_flush_required()
740 * Notify the VMM about page mapping conversion. More info about ABI
741 * can be found in TDX Guest-Host-Communication Interface (GHCI),
744 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc) in tdx_map_gpa() argument
753 end |= cc_mkdec(0); in tdx_map_gpa()
761 .r13 = end - start }; in tdx_map_gpa()
774 if (map_fail_paddr < start || map_fail_paddr >= end) in tdx_map_gpa()
791 * Inform the VMM of the guest's intent for this physical page: shared with
793 * of the page in response.
798 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE); in tdx_enc_status_changed() local
800 if (!tdx_map_gpa(start, end, enc)) in tdx_enc_status_changed()
803 /* shared->private conversion requires memory to be accepted before use */ in tdx_enc_status_changed()
805 return tdx_accept_memory(start, end); in tdx_enc_status_changed()
814 * Only handle shared->private conversion here. in tdx_enc_status_change_prepare()
818 return -EIO; in tdx_enc_status_change_prepare()
827 * Only handle private->shared conversion here. in tdx_enc_status_change_finish()
831 return -EIO; in tdx_enc_status_change_finish()
841 /* Stop new private<->shared conversions */
854 pr_warn("Failed to stop shared<->private conversions\n"); in tdx_kexec_begin()
860 unsigned long addr, end; in tdx_kexec_finish() local
869 end = PAGE_OFFSET + get_max_mapped(); in tdx_kexec_finish()
871 while (addr < end) { in tdx_kexec_finish()
884 * conversion to shared. in tdx_kexec_finish()
901 * a pre-reserved memory range that is always private. in tdx_kexec_finish()
911 pr_err("Failed to unshare range %#lx-%#lx\n", in tdx_kexec_finish()
934 .r9 = -1ULL, in tdx_early_init()
958 * as flag, not as part of physical address. in tdx_early_init()
962 physical_mask &= cc_mask - 1; in tdx_early_init()
970 * - Private mapping => Shared Page == Guest shutdown in tdx_early_init()
971 * - Shared mapping => Private Page == Recoverable #VE in tdx_early_init()
997 * Intel-TDX has a secure RDMSR hypercall, but that needs to be in tdx_early_init()