Lines Matching +full:post +full:- +full:processing

1 // SPDX-License-Identifier: GPL-2.0
7 * This file is not compiled stand-alone. It contains code shared
8 * between the pre-decompression boot code and the running Linux kernel
9 * and is included directly into both code-bases.
31 * non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
48 /* I/O parameters for CPUID-related helpers */
76 * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
101 * These will be initialized based on CPUID table so that non-present
102 * all-zero leaves (for sparse tables) can be differentiated from
103 * invalid/out-of-range leaves. This is needed since all-zero leaves
104 * still need to be post-processed.
113 error("RDRAND instruction not supported - no trusted source of randomness available\n"); in sev_es_check_cpu_features()
195 ghcb->save.sw_exit_code = 0; in vc_ghcb_invalidate()
196 __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); in vc_ghcb_invalidate()
213 ctxt->regs = regs; in vc_init_em_ctxt()
223 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn()
230 ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); in verify_exception_info()
235 u64 info = ghcb->save.sw_exit_info_2; in verify_exception_info()
242 ctxt->fi.vector = v; in verify_exception_info()
245 ctxt->fi.error_code = info >> 32; in verify_exception_info()
256 switch (call->rax_out) { in svsm_process_result_codes()
261 return -EAGAIN; in svsm_process_result_codes()
263 return -EINVAL; in svsm_process_result_codes()
269 * - Load the SVSM register state (RAX, RCX, RDX, R8 and R9)
270 * - Set the CA call pending field to 1
271 * - Issue VMGEXIT
272 * - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9)
273 * - Perform atomic exchange of the CA call pending field
275 * - See the "Secure VM Service Module for SEV-SNP Guests" specification for
277 * - The calling convention loosely follows the Microsoft X64 calling
279 * - RAX specifies the SVSM protocol/callid as input and the return code
284 register unsigned long rax asm("rax") = call->rax; in svsm_issue_call()
285 register unsigned long rcx asm("rcx") = call->rcx; in svsm_issue_call()
286 register unsigned long rdx asm("rdx") = call->rdx; in svsm_issue_call()
287 register unsigned long r8 asm("r8") = call->r8; in svsm_issue_call()
288 register unsigned long r9 asm("r9") = call->r9; in svsm_issue_call()
290 call->caa->call_pending = 1; in svsm_issue_call()
296 *pending = xchg(&call->caa->call_pending, *pending); in svsm_issue_call()
298 call->rax_out = rax; in svsm_issue_call()
299 call->rcx_out = rcx; in svsm_issue_call()
300 call->rdx_out = rdx; in svsm_issue_call()
301 call->r8_out = r8; in svsm_issue_call()
302 call->r9_out = r9; in svsm_issue_call()
325 return -EINVAL; in svsm_perform_msr_protocol()
328 return -EINVAL; in svsm_perform_msr_protocol()
331 return -EINVAL; in svsm_perform_msr_protocol()
345 * in the boot, so use rip-relative references as needed. in svsm_perform_ghcb_protocol()
347 ghcb->protocol_version = RIP_REL_REF(ghcb_version); in svsm_perform_ghcb_protocol()
348 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; in svsm_perform_ghcb_protocol()
359 return -EINVAL; in svsm_perform_ghcb_protocol()
368 return -EINVAL; in svsm_perform_ghcb_protocol()
380 ghcb->protocol_version = ghcb_version; in sev_es_ghcb_hv_call()
381 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; in sev_es_ghcb_hv_call()
401 return -EIO; in __sev_cpuid_hv()
413 * MSR protocol does not support fetching non-zero subfunctions, but is in __sev_cpuid_hv_msr()
414 * sufficient to handle current early-boot cases. Should that change, in __sev_cpuid_hv_msr()
417 * can be added here to use GHCB-page protocol for cases that occur late in __sev_cpuid_hv_msr()
420 if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn) in __sev_cpuid_hv_msr()
421 return -EINVAL; in __sev_cpuid_hv_msr()
423 ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax); in __sev_cpuid_hv_msr()
424 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx); in __sev_cpuid_hv_msr()
425 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx); in __sev_cpuid_hv_msr()
426 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx); in __sev_cpuid_hv_msr()
436 ghcb_set_rax(ghcb, leaf->fn); in __sev_cpuid_hv_ghcb()
437 ghcb_set_rcx(ghcb, leaf->subfn); in __sev_cpuid_hv_ghcb()
443 /* xgetbv will cause #UD - use reset value for xcr0 */ in __sev_cpuid_hv_ghcb()
456 leaf->eax = ghcb->save.rax; in __sev_cpuid_hv_ghcb()
457 leaf->ebx = ghcb->save.rbx; in __sev_cpuid_hv_ghcb()
458 leaf->ecx = ghcb->save.rcx; in __sev_cpuid_hv_ghcb()
459 leaf->edx = ghcb->save.rdx; in __sev_cpuid_hv_ghcb()
472 * mapping. Use RIP-relative addressing to obtain the correct address
474 * switch-over to kernel virtual addresses later.
508 for (i = 0; i < cpuid_table->count; i++) { in snp_cpuid_calc_xsave_size()
509 const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; in snp_cpuid_calc_xsave_size()
511 if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64)) in snp_cpuid_calc_xsave_size()
513 if (!(xfeatures_en & (BIT_ULL(e->ecx_in)))) in snp_cpuid_calc_xsave_size()
515 if (xfeatures_found & (BIT_ULL(e->ecx_in))) in snp_cpuid_calc_xsave_size()
518 xfeatures_found |= (BIT_ULL(e->ecx_in)); in snp_cpuid_calc_xsave_size()
521 xsave_size += e->eax; in snp_cpuid_calc_xsave_size()
523 xsave_size = max(xsave_size, e->eax + e->ebx); in snp_cpuid_calc_xsave_size()
543 for (i = 0; i < cpuid_table->count; i++) { in snp_cpuid_get_validated_func()
544 const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; in snp_cpuid_get_validated_func()
546 if (e->eax_in != leaf->fn) in snp_cpuid_get_validated_func()
549 if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn) in snp_cpuid_get_validated_func()
558 if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1)) in snp_cpuid_get_validated_func()
559 if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in) in snp_cpuid_get_validated_func()
562 leaf->eax = e->eax; in snp_cpuid_get_validated_func()
563 leaf->ebx = e->ebx; in snp_cpuid_get_validated_func()
564 leaf->ecx = e->ecx; in snp_cpuid_get_validated_func()
565 leaf->edx = e->edx; in snp_cpuid_get_validated_func()
584 switch (leaf->fn) { in snp_cpuid_postprocess()
589 leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0)); in snp_cpuid_postprocess()
591 leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9)); in snp_cpuid_postprocess()
595 leaf->ecx |= BIT(27); in snp_cpuid_postprocess()
599 leaf->ecx &= ~BIT(4); in snp_cpuid_postprocess()
601 leaf->ecx |= BIT(4); in snp_cpuid_postprocess()
608 leaf->edx = leaf_hv.edx; in snp_cpuid_postprocess()
615 if (leaf->subfn != 0 && leaf->subfn != 1) in snp_cpuid_postprocess()
620 if (leaf->subfn == 1) { in snp_cpuid_postprocess()
622 if (leaf->eax & BIT(3)) { in snp_cpuid_postprocess()
634 * bit 3) since SNP-capable hardware has these feature in snp_cpuid_postprocess()
639 if (!(leaf->eax & (BIT(1) | BIT(3)))) in snp_cpuid_postprocess()
640 return -EINVAL; in snp_cpuid_postprocess()
647 return -EINVAL; in snp_cpuid_postprocess()
649 leaf->ebx = xsave_size; in snp_cpuid_postprocess()
656 leaf->eax = leaf_hv.eax; in snp_cpuid_postprocess()
658 leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0)); in snp_cpuid_postprocess()
660 leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0)); in snp_cpuid_postprocess()
663 /* No fix-ups needed, use values as-is. */ in snp_cpuid_postprocess()
671 * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
679 if (!cpuid_table->count) in snp_cpuid()
680 return -EOPNOTSUPP; in snp_cpuid()
686 * same as out-of-range values (all-zero). This is useful here in snp_cpuid()
691 * out-of-range entries and in-range zero entries, since the in snp_cpuid()
694 * CPU-specific information during post-processing. So if it's in snp_cpuid()
696 * within a valid CPUID range, proceed with post-processing in snp_cpuid()
698 * post-processing and just return zeros immediately. in snp_cpuid()
700 leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; in snp_cpuid()
702 /* Skip post-processing for out-of-range zero leafs. */ in snp_cpuid()
703 if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) || in snp_cpuid()
704 (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) || in snp_cpuid()
705 (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max)))) in snp_cpuid()
713 * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
715 * hypervisor and only the CPUID exit-code.
719 unsigned int subfn = lower_bits(regs->cx, 32); in do_vc_no_ghcb()
720 unsigned int fn = lower_bits(regs->ax, 32); in do_vc_no_ghcb()
721 u16 opcode = *(unsigned short *)regs->ip; in do_vc_no_ghcb()
740 if (ret != -EOPNOTSUPP) in do_vc_no_ghcb()
747 regs->ax = leaf.eax; in do_vc_no_ghcb()
748 regs->bx = leaf.ebx; in do_vc_no_ghcb()
749 regs->cx = leaf.ecx; in do_vc_no_ghcb()
750 regs->dx = leaf.edx; in do_vc_no_ghcb()
753 * This is a VC handler and the #VC is only raised when SEV-ES is in do_vc_no_ghcb()
756 * into the no-sev path. This could map sensitive data unencrypted and in do_vc_no_ghcb()
760 * - Availability of CPUID leaf 0x8000001f in do_vc_no_ghcb()
761 * - SEV CPUID bit. in do_vc_no_ghcb()
763 * The hypervisor might still report the wrong C-bit position, but this in do_vc_no_ghcb()
767 if (fn == 0x80000000 && (regs->ax < 0x8000001f)) in do_vc_no_ghcb()
770 else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) in do_vc_no_ghcb()
774 /* Skip over the CPUID two-byte opcode */ in do_vc_no_ghcb()
775 regs->ip += 2; in do_vc_no_ghcb()
788 if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { in vc_insn_string_check()
789 ctxt->fi.vector = X86_TRAP_PF; in vc_insn_string_check()
790 ctxt->fi.error_code = X86_PF_USER; in vc_insn_string_check()
791 ctxt->fi.cr2 = address; in vc_insn_string_check()
793 ctxt->fi.error_code |= X86_PF_WRITE; in vc_insn_string_check()
807 int i, b = backwards ? -1 : 1; in vc_insn_string_read()
833 int i, s = backwards ? -1 : 1; in vc_insn_string_write()
874 struct insn *insn = &ctxt->insn; in vc_ioio_exitinfo()
880 switch (insn->opcode.bytes[0]) { in vc_ioio_exitinfo()
886 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
894 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
901 port = (u8)insn->immediate.value & 0xffff; in vc_ioio_exitinfo()
908 port = (u8)insn->immediate.value & 0xffff; in vc_ioio_exitinfo()
915 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
922 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
931 switch (insn->opcode.bytes[0]) { in vc_ioio_exitinfo()
944 *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 in vc_ioio_exitinfo()
946 size = (insn->opnd_bytes == 2) ? 2 : 4; in vc_ioio_exitinfo()
949 switch (insn->addr_bytes) { in vc_ioio_exitinfo()
969 struct pt_regs *regs = ctxt->regs; in vc_handle_ioio()
981 bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); in vc_handle_ioio()
990 * has a chance to take interrupts and re-schedule while the in vc_handle_ioio()
994 ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; in vc_handle_ioio()
996 op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; in vc_handle_ioio()
1000 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); in vc_handle_ioio()
1005 (void *)(es_base + regs->si), in vc_handle_ioio()
1006 ghcb->shared_buffer, io_bytes, in vc_handle_ioio()
1027 (void *)(es_base + regs->di), in vc_handle_ioio()
1028 ghcb->shared_buffer, io_bytes, in vc_handle_ioio()
1034 regs->di -= exit_bytes; in vc_handle_ioio()
1036 regs->di += exit_bytes; in vc_handle_ioio()
1039 regs->si -= exit_bytes; in vc_handle_ioio()
1041 regs->si += exit_bytes; in vc_handle_ioio()
1045 regs->cx -= exit_info_2; in vc_handle_ioio()
1047 ret = regs->cx ? ES_RETRY : ES_OK; in vc_handle_ioio()
1057 rax = lower_bits(regs->ax, bits); in vc_handle_ioio()
1068 regs->ax = lower_bits(ghcb->save.rax, bits); in vc_handle_ioio()
1077 struct pt_regs *regs = ctxt->regs; in vc_handle_cpuid_snp()
1081 leaf.fn = regs->ax; in vc_handle_cpuid_snp()
1082 leaf.subfn = regs->cx; in vc_handle_cpuid_snp()
1085 regs->ax = leaf.eax; in vc_handle_cpuid_snp()
1086 regs->bx = leaf.ebx; in vc_handle_cpuid_snp()
1087 regs->cx = leaf.ecx; in vc_handle_cpuid_snp()
1088 regs->dx = leaf.edx; in vc_handle_cpuid_snp()
1097 struct pt_regs *regs = ctxt->regs; in vc_handle_cpuid()
1105 if (snp_cpuid_ret != -EOPNOTSUPP) in vc_handle_cpuid()
1108 ghcb_set_rax(ghcb, regs->ax); in vc_handle_cpuid()
1109 ghcb_set_rcx(ghcb, regs->cx); in vc_handle_cpuid()
1115 /* xgetbv will cause #GP - use reset value for xcr0 */ in vc_handle_cpuid()
1128 regs->ax = ghcb->save.rax; in vc_handle_cpuid()
1129 regs->bx = ghcb->save.rbx; in vc_handle_cpuid()
1130 regs->cx = ghcb->save.rcx; in vc_handle_cpuid()
1131 regs->dx = ghcb->save.rdx; in vc_handle_cpuid()
1151 ctxt->regs->ax = ghcb->save.rax; in vc_handle_rdtsc()
1152 ctxt->regs->dx = ghcb->save.rdx; in vc_handle_rdtsc()
1154 ctxt->regs->cx = ghcb->save.rcx; in vc_handle_rdtsc()
1174 hdr = (struct setup_data *)bp->hdr.setup_data; in find_cc_blob_setup_data()
1177 if (hdr->type == SETUP_CC_BLOB) { in find_cc_blob_setup_data()
1179 return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address; in find_cc_blob_setup_data()
1181 hdr = (struct setup_data *)hdr->next; in find_cc_blob_setup_data()
1201 if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE) in setup_cpuid_table()
1204 cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys; in setup_cpuid_table()
1205 if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX) in setup_cpuid_table()
1211 /* Initialize CPUID ranges for range-checking. */ in setup_cpuid_table()
1212 for (i = 0; i < cpuid_table->count; i++) { in setup_cpuid_table()
1213 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; in setup_cpuid_table()
1215 if (fn->eax_in == 0x0) in setup_cpuid_table()
1216 RIP_REL_REF(cpuid_std_range_max) = fn->eax; in setup_cpuid_table()
1217 else if (fn->eax_in == 0x40000000) in setup_cpuid_table()
1218 RIP_REL_REF(cpuid_hyp_range_max) = fn->eax; in setup_cpuid_table()
1219 else if (fn->eax_in == 0x80000000) in setup_cpuid_table()
1220 RIP_REL_REF(cpuid_ext_range_max) = fn->eax; in setup_cpuid_table()
1239 pfn = pc->entry[pc->cur_index].pfn; in svsm_pval_terminate()
1240 action = pc->entry[pc->cur_index].action; in svsm_pval_terminate()
1241 page_size = pc->entry[pc->cur_index].page_size; in svsm_pval_terminate()
1262 pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; in svsm_pval_4k_page()
1265 pc->num_entries = 1; in svsm_pval_4k_page()
1266 pc->cur_index = 0; in svsm_pval_4k_page()
1267 pc->entry[0].page_size = RMP_PG_SIZE_4K; in svsm_pval_4k_page()
1268 pc->entry[0].action = validate; in svsm_pval_4k_page()
1269 pc->entry[0].ignore_cf = 0; in svsm_pval_4k_page()
1270 pc->entry[0].pfn = paddr >> PAGE_SHIFT; in svsm_pval_4k_page()
1288 * This can be called very early during boot, so use rIP-relative in pvalidate_4k_page()
1310 for (i = 0; i <= desc->hdr.end_entry; i++) { in pval_pages()
1311 e = &desc->entries[i]; in pval_pages()
1313 pfn = e->gfn; in pval_pages()
1315 size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; in pval_pages()
1316 validate = e->operation == SNP_PAGE_STATE_PRIVATE; in pval_pages()
1342 pc->num_entries = 0; in svsm_build_ca_from_pfn_range()
1343 pc->cur_index = 0; in svsm_build_ca_from_pfn_range()
1345 pe = &pc->entry[0]; in svsm_build_ca_from_pfn_range()
1348 pe->page_size = RMP_PG_SIZE_4K; in svsm_build_ca_from_pfn_range()
1349 pe->action = action; in svsm_build_ca_from_pfn_range()
1350 pe->ignore_cf = 0; in svsm_build_ca_from_pfn_range()
1351 pe->pfn = pfn; in svsm_build_ca_from_pfn_range()
1356 pc->num_entries++; in svsm_build_ca_from_pfn_range()
1357 if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) in svsm_build_ca_from_pfn_range()
1371 pc->num_entries = 0; in svsm_build_ca_from_psc_desc()
1372 pc->cur_index = 0; in svsm_build_ca_from_psc_desc()
1374 pe = &pc->entry[0]; in svsm_build_ca_from_psc_desc()
1375 e = &desc->entries[desc_entry]; in svsm_build_ca_from_psc_desc()
1377 while (desc_entry <= desc->hdr.end_entry) { in svsm_build_ca_from_psc_desc()
1378 pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; in svsm_build_ca_from_psc_desc()
1379 pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; in svsm_build_ca_from_psc_desc()
1380 pe->ignore_cf = 0; in svsm_build_ca_from_psc_desc()
1381 pe->pfn = e->gfn; in svsm_build_ca_from_psc_desc()
1387 pc->num_entries++; in svsm_build_ca_from_psc_desc()
1388 if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) in svsm_build_ca_from_psc_desc()
1413 * The SVSM calling area (CA) can support processing 510 entries at a in svsm_pval_pages()
1421 pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; in svsm_pval_pages()
1428 for (i = 0; i <= desc->hdr.end_entry;) { in svsm_pval_pages()
1443 pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { in svsm_pval_pages()
1444 /* Save this entry for post-processing at 4K */ in svsm_pval_pages()
1445 pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; in svsm_pval_pages()
1448 pc->cur_index++; in svsm_pval_pages()
1449 if (pc->cur_index < pc->num_entries) in svsm_pval_pages()
1450 ret = -EAGAIN; in svsm_pval_pages()
1454 } while (ret == -EAGAIN); in svsm_pval_pages()
1497 data = (struct snp_psc_desc *)ghcb->shared_buffer; in vmgexit_psc()
1498 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); in vmgexit_psc()
1502 * before processing all the entries. Check whether all the entries in vmgexit_psc()
1505 * reference the data->hdr everywhere. in vmgexit_psc()
1512 cur_entry = data->hdr.cur_entry; in vmgexit_psc()
1513 end_entry = data->hdr.end_entry; in vmgexit_psc()
1515 while (data->hdr.cur_entry <= data->hdr.end_entry) { in vmgexit_psc()
1525 if (WARN(ret || ghcb->save.sw_exit_info_2, in vmgexit_psc()
1527 ret, ghcb->save.sw_exit_info_2)) { in vmgexit_psc()
1533 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { in vmgexit_psc()
1539 * Sanity check that entry processing is not going backwards. in vmgexit_psc()
1542 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, in vmgexit_psc()
1543 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n", in vmgexit_psc()
1544 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { in vmgexit_psc()
1557 unsigned int opcode = (unsigned int)ctxt->insn.opcode.value; in vc_check_opcode_bytes()
1558 u8 modrm = ctxt->insn.modrm.value; in vc_check_opcode_bytes()
1614 X86_MODRM_REG(ctxt->insn.modrm.value) == 7) in vc_check_opcode_bytes()
1626 X86_MODRM_REG(ctxt->insn.modrm.value) == 7) in vc_check_opcode_bytes()
1640 opcode, exit_code, ctxt->regs->ip); in vc_check_opcode_bytes()
1666 * changes of a lesser-privileged VMPL are a don't-care. in svsm_setup_ca()
1668 * Use a rip-relative reference to obtain the proper address, since this in svsm_setup_ca()
1679 if (!cc_info || !cc_info->secrets_phys || cc_info->secrets_len != PAGE_SIZE) in svsm_setup_ca()
1682 secrets_page = (struct snp_secrets_page *)cc_info->secrets_phys; in svsm_setup_ca()
1683 if (!secrets_page->svsm_size) in svsm_setup_ca()
1686 if (!secrets_page->svsm_guest_vmpl) in svsm_setup_ca()
1689 RIP_REL_REF(snp_vmpl) = secrets_page->svsm_guest_vmpl; in svsm_setup_ca()
1691 caa = secrets_page->svsm_caa; in svsm_setup_ca()
1694 * An open-coded PAGE_ALIGNED() in order to avoid including in svsm_setup_ca()
1695 * kernel-proper headers into the decompressor. in svsm_setup_ca()
1697 if (caa & (PAGE_SIZE - 1)) in svsm_setup_ca()
1709 for (i = 0; i < cpuid_table->count; i++) { in svsm_setup_ca()
1710 struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; in svsm_setup_ca()
1712 if (fn->eax_in == 0x8000001f) in svsm_setup_ca()
1713 fn->eax |= BIT(28); in svsm_setup_ca()