diff options
| author | Ingo Molnar <mingo@kernel.org> | 2026-02-05 10:01:42 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2026-02-05 10:01:43 +0100 |
| commit | 847c9647bbd5593fef92eaaa42c323c26aecb9b1 (patch) | |
| tree | 3839e6581e38c06ef9809a77c6b23471feb1db88 | |
| parent | 93da75dfdc9d76c1771f884e6447af553c32b862 (diff) | |
| parent | 8c89d3ad3095808ac130c535ad7ed3d1344d5986 (diff) | |
| download | tip-master.tar.gz | |
# New commits in x86/sev:
8c89d3ad3095 ("x86/sev: Don't emit BSS_DECRYPTED section unless it is in use")
af05e558988e ("x86/sev: Use kfree_sensitive() when freeing a SNP message descriptor")
9183c97e7e22 ("x86/sev: Rename sev_es_ghcb_handle_msr() to __vc_handle_msr()")
e21279b73ef6 ("x86/sev: Carve out the SVSM code into a separate compilation unit")
f01c6489ad6c ("x86/sev: Add internal header guards")
c1e8980fabf5 ("x86/sev: Move the internal header")
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | arch/x86/boot/startup/sev-startup.c | 3 | ||||
| -rw-r--r-- | arch/x86/coco/sev/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/coco/sev/core.c | 387 | ||||
| -rw-r--r-- | arch/x86/coco/sev/internal.h (renamed from arch/x86/include/asm/sev-internal.h) | 34 | ||||
| -rw-r--r-- | arch/x86/coco/sev/noinstr.c | 3 | ||||
| -rw-r--r-- | arch/x86/coco/sev/svsm.c | 362 | ||||
| -rw-r--r-- | arch/x86/coco/sev/vc-handle.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 21 |
8 files changed, 421 insertions, 398 deletions
diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c index 09725428d3e657..789e99d38d17ba 100644 --- a/arch/x86/boot/startup/sev-startup.c +++ b/arch/x86/boot/startup/sev-startup.c @@ -27,7 +27,6 @@ #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/sev.h> -#include <asm/sev-internal.h> #include <asm/insn-eval.h> #include <asm/fpu/xcr.h> #include <asm/processor.h> @@ -41,6 +40,8 @@ #include <asm/cpuid/api.h> #include <asm/cmdline.h> +#include "../../coco/sev/internal.h" + /* Include code shared with pre-decompression boot stage */ #include "sev-shared.c" diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile index b2e9ec2f69014f..3111a2ee672218 100644 --- a/arch/x86/coco/sev/Makefile +++ b/arch/x86/coco/sev/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += core.o noinstr.o vc-handle.o +obj-y += core.o noinstr.o vc-handle.o svsm.o # Clang 14 and older may fail to respect __no_sanitize_undefined when inlining UBSAN_SANITIZE_noinstr.o := n diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index 9ae3b11754e655..1b86f48c7e84a0 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -31,7 +31,6 @@ #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/sev.h> -#include <asm/sev-internal.h> #include <asm/insn-eval.h> #include <asm/fpu/xcr.h> #include <asm/processor.h> @@ -46,6 +45,8 @@ #include <asm/cmdline.h> #include <asm/msr.h> +#include "internal.h" + /* Bitmap of SEV features supported by the hypervisor */ u64 sev_hv_features __ro_after_init; SYM_PIC_ALIAS(sev_hv_features); @@ -54,40 +55,6 @@ SYM_PIC_ALIAS(sev_hv_features); u64 sev_secrets_pa __ro_after_init; SYM_PIC_ALIAS(sev_secrets_pa); -/* For early boot SVSM communication */ -struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); -SYM_PIC_ALIAS(boot_svsm_ca_page); - -/* - * SVSM related information: - * During boot, the page tables are set up as identity mapped and later - * changed to use kernel virtual addresses. Maintain separate virtual and - * physical addresses for the CAA to allow SVSM functions to be used during - * early boot, both with identity mapped virtual addresses and proper kernel - * virtual addresses. - */ -u64 boot_svsm_caa_pa __ro_after_init; -SYM_PIC_ALIAS(boot_svsm_caa_pa); - -DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); -DEFINE_PER_CPU(u64, svsm_caa_pa); - -static inline struct svsm_ca *svsm_get_caa(void) -{ - if (sev_cfg.use_cas) - return this_cpu_read(svsm_caa); - else - return rip_rel_ptr(&boot_svsm_ca_page); -} - -static inline u64 svsm_get_caa_pa(void) -{ - if (sev_cfg.use_cas) - return this_cpu_read(svsm_caa_pa); - else - return boot_svsm_caa_pa; -} - /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ #define AP_INIT_CS_LIMIT 0xffff #define AP_INIT_DS_LIMIT 0xffff @@ -217,95 +184,6 @@ static u64 __init get_jump_table_addr(void) return ret; } -static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) -{ - struct es_em_ctxt ctxt; - u8 pending = 0; - - vc_ghcb_invalidate(ghcb); - - /* - * Fill in protocol and format specifiers. This can be called very early - * in the boot, so use rip-relative references as needed. - */ - ghcb->protocol_version = ghcb_version; - ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; - - ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); - ghcb_set_sw_exit_info_1(ghcb, 0); - ghcb_set_sw_exit_info_2(ghcb, 0); - - sev_es_wr_ghcb_msr(__pa(ghcb)); - - svsm_issue_call(call, &pending); - - if (pending) - return -EINVAL; - - switch (verify_exception_info(ghcb, &ctxt)) { - case ES_OK: - break; - case ES_EXCEPTION: - vc_forward_exception(&ctxt); - fallthrough; - default: - return -EINVAL; - } - - return svsm_process_result_codes(call); -} - -static int svsm_perform_call_protocol(struct svsm_call *call) -{ - struct ghcb_state state; - unsigned long flags; - struct ghcb *ghcb; - int ret; - - flags = native_local_irq_save(); - - if (sev_cfg.ghcbs_initialized) - ghcb = __sev_get_ghcb(&state); - else if (boot_ghcb) - ghcb = boot_ghcb; - else - ghcb = NULL; - - do { - ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) - : __pi_svsm_perform_msr_protocol(call); - } while (ret == -EAGAIN); - - if (sev_cfg.ghcbs_initialized) - __sev_put_ghcb(&state); - - native_local_irq_restore(flags); - - return ret; -} - -static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, - int ret, u64 svsm_ret) -{ - WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", - pfn, action, page_size, ret, svsm_ret); - - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); -} - -static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) -{ - unsigned int page_size; - bool action; - u64 pfn; - - pfn = pc->entry[pc->cur_index].pfn; - action = pc->entry[pc->cur_index].action; - page_size = pc->entry[pc->cur_index].page_size; - - __pval_terminate(pfn, action, page_size, ret, svsm_ret); -} - static void pval_pages(struct snp_psc_desc *desc) { struct psc_entry *e; @@ -342,152 +220,6 @@ static void pval_pages(struct snp_psc_desc *desc) } } -static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, - struct svsm_pvalidate_call *pc) -{ - struct svsm_pvalidate_entry *pe; - - /* Nothing in the CA yet */ - pc->num_entries = 0; - pc->cur_index = 0; - - pe = &pc->entry[0]; - - while (pfn < pfn_end) { - pe->page_size = RMP_PG_SIZE_4K; - pe->action = action; - pe->ignore_cf = 0; - pe->rsvd = 0; - pe->pfn = pfn; - - pe++; - pfn++; - - pc->num_entries++; - if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) - break; - } - - return pfn; -} - -static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, - struct svsm_pvalidate_call *pc) -{ - struct svsm_pvalidate_entry *pe; - struct psc_entry *e; - - /* Nothing in the CA yet */ - pc->num_entries = 0; - pc->cur_index = 0; - - pe = &pc->entry[0]; - e = &desc->entries[desc_entry]; - - while (desc_entry <= desc->hdr.end_entry) { - pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; - pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; - pe->ignore_cf = 0; - pe->rsvd = 0; - pe->pfn = e->gfn; - - pe++; - e++; - - desc_entry++; - pc->num_entries++; - if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) - break; - } - - return desc_entry; -} - -static void svsm_pval_pages(struct snp_psc_desc *desc) -{ - struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; - unsigned int i, pv_4k_count = 0; - struct svsm_pvalidate_call *pc; - struct svsm_call call = {}; - unsigned long flags; - bool action; - u64 pc_pa; - int ret; - - /* - * This can be called very early in the boot, use native functions in - * order to avoid paravirt issues. - */ - flags = native_local_irq_save(); - - /* - * The SVSM calling area (CA) can support processing 510 entries at a - * time. Loop through the Page State Change descriptor until the CA is - * full or the last entry in the descriptor is reached, at which time - * the SVSM is invoked. This repeats until all entries in the descriptor - * are processed. - */ - call.caa = svsm_get_caa(); - - pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; - pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); - - /* Protocol 0, Call ID 1 */ - call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); - call.rcx = pc_pa; - - for (i = 0; i <= desc->hdr.end_entry;) { - i = svsm_build_ca_from_psc_desc(desc, i, pc); - - do { - ret = svsm_perform_call_protocol(&call); - if (!ret) - continue; - - /* - * Check if the entry failed because of an RMP mismatch (a - * PVALIDATE at 2M was requested, but the page is mapped in - * the RMP as 4K). - */ - - if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && - pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { - /* Save this entry for post-processing at 4K */ - pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; - - /* Skip to the next one unless at the end of the list */ - pc->cur_index++; - if (pc->cur_index < pc->num_entries) - ret = -EAGAIN; - else - ret = 0; - } - } while (ret == -EAGAIN); - - if (ret) - svsm_pval_terminate(pc, ret, call.rax_out); - } - - /* Process any entries that failed to be validated at 2M and validate them at 4K */ - for (i = 0; i < pv_4k_count; i++) { - u64 pfn, pfn_end; - - action = pv_4k[i].action; - pfn = pv_4k[i].pfn; - pfn_end = pfn + 512; - - while (pfn < pfn_end) { - pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); - - ret = svsm_perform_call_protocol(&call); - if (ret) - svsm_pval_terminate(pc, ret, call.rax_out); - } - } - - native_local_irq_restore(flags); -} - static void pvalidate_pages(struct snp_psc_desc *desc) { struct psc_entry *e; @@ -1257,7 +989,7 @@ u64 savic_ghcb_msr_read(u32 reg) ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); - res = sev_es_ghcb_handle_msr(ghcb, &ctxt, false); + res = __vc_handle_msr(ghcb, &ctxt, false); if (res != ES_OK) { pr_err("Secure AVIC MSR (0x%llx) read returned error (%d)\n", msr, res); /* MSR read failures are treated as fatal errors */ @@ -1287,7 +1019,7 @@ void savic_ghcb_msr_write(u32 reg, u64 value) ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); - res = sev_es_ghcb_handle_msr(ghcb, &ctxt, true); + res = __vc_handle_msr(ghcb, &ctxt, true); if (res != ES_OK) { pr_err("Secure AVIC MSR (0x%llx) write returned error (%d)\n", msr, res); /* MSR writes should never fail. Any failure is fatal error for SNP guest */ @@ -1588,56 +1320,6 @@ static int __init report_snp_info(void) } arch_initcall(report_snp_info); -static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) -{ - /* If (new) lengths have been returned, propagate them up */ - if (call->rcx_out != call->rcx) - input->manifest_buf.len = call->rcx_out; - - if (call->rdx_out != call->rdx) - input->certificates_buf.len = call->rdx_out; - - if (call->r8_out != call->r8) - input->report_buf.len = call->r8_out; -} - -int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, - struct svsm_attest_call *input) -{ - struct svsm_attest_call *ac; - unsigned long flags; - u64 attest_call_pa; - int ret; - - if (!snp_vmpl) - return -EINVAL; - - local_irq_save(flags); - - call->caa = svsm_get_caa(); - - ac = (struct svsm_attest_call *)call->caa->svsm_buffer; - attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); - - *ac = *input; - - /* - * Set input registers for the request and set RDX and R8 to known - * values in order to detect length values being returned in them. - */ - call->rax = call_id; - call->rcx = attest_call_pa; - call->rdx = -1; - call->r8 = -1; - ret = svsm_perform_call_protocol(call); - update_attest_input(call, input); - - local_irq_restore(flags); - - return ret; -} -EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); - static int snp_issue_guest_request(struct snp_guest_req *req) { struct snp_req_data *input = &req->input; @@ -1702,64 +1384,6 @@ e_restore_irq: return ret; } -/** - * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device - * - * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND - * which is the only request used so far. - * - * Return: true if the platform provides a vTPM SVSM device, false otherwise. - */ -static bool snp_svsm_vtpm_probe(void) -{ - struct svsm_call call = {}; - - /* The vTPM device is available only if a SVSM is present */ - if (!snp_vmpl) - return false; - - call.caa = svsm_get_caa(); - call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY); - - if (svsm_perform_call_protocol(&call)) - return false; - - /* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */ - return call.rcx_out & BIT_ULL(8); -} - -/** - * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM - * @buffer: A buffer used to both send the command and receive the response. - * - * Execute a SVSM_VTPM_CMD call as defined by - * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00 - * - * All command request/response buffers have a common structure as specified by - * the following table: - * Byte Size In/Out Description - * Offset (Bytes) - * 0x000 4 In Platform command - * Out Platform command response size - * - * Each command can build upon this common request/response structure to create - * a structure specific to the command. See include/linux/tpm_svsm.h for more - * details. - * - * Return: 0 on success, -errno on failure - */ -int snp_svsm_vtpm_send_command(u8 *buffer) -{ - struct svsm_call call = {}; - - call.caa = svsm_get_caa(); - call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD); - call.rcx = __pa(buffer); - - return svsm_perform_call_protocol(&call); -} -EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command); - static struct platform_device sev_guest_device = { .name = "sev-guest", .id = -1, @@ -2008,8 +1632,7 @@ void snp_msg_free(struct snp_msg_desc *mdesc) free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); iounmap((__force void __iomem *)mdesc->secrets); - memset(mdesc, 0, sizeof(*mdesc)); - kfree(mdesc); + kfree_sensitive(mdesc); } EXPORT_SYMBOL_GPL(snp_msg_free); diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/coco/sev/internal.h index c58c47c68ab6fe..b1d0c66a651a60 100644 --- a/arch/x86/include/asm/sev-internal.h +++ b/arch/x86/coco/sev/internal.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __X86_COCO_SEV_INTERNAL_H__ +#define __X86_COCO_SEV_INTERNAL_H__ #define DR7_RESET_VALUE 0x400 @@ -64,6 +66,9 @@ extern u64 boot_svsm_caa_pa; enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt); void vc_forward_exception(struct es_em_ctxt *ctxt); +void svsm_pval_pages(struct snp_psc_desc *desc); +int svsm_perform_call_protocol(struct svsm_call *call); +bool snp_svsm_vtpm_probe(void); static inline u64 sev_es_rd_ghcb_msr(void) { @@ -80,8 +85,35 @@ static __always_inline void sev_es_wr_ghcb_msr(u64 val) native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); } -enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write); +enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write); u64 get_hv_features(void); const struct snp_cpuid_table *snp_cpuid_get_table(void); + +static inline struct svsm_ca *svsm_get_caa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa); + else + return rip_rel_ptr(&boot_svsm_ca_page); +} + +static inline u64 svsm_get_caa_pa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa_pa); + else + return boot_svsm_caa_pa; +} + +static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, + int ret, u64 svsm_ret) +{ + WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", + pfn, action, page_size, ret, svsm_ret); + + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); +} + +#endif /* __X86_COCO_SEV_INTERNAL_H__ */ diff --git a/arch/x86/coco/sev/noinstr.c b/arch/x86/coco/sev/noinstr.c index b527eafb631235..9d94aca4a698f6 100644 --- a/arch/x86/coco/sev/noinstr.c +++ b/arch/x86/coco/sev/noinstr.c @@ -16,7 +16,8 @@ #include <asm/msr.h> #include <asm/ptrace.h> #include <asm/sev.h> -#include <asm/sev-internal.h> + +#include "internal.h" static __always_inline bool on_vc_stack(struct pt_regs *regs) { diff --git a/arch/x86/coco/sev/svsm.c b/arch/x86/coco/sev/svsm.c new file mode 100644 index 00000000000000..2acf4a76afe7a9 --- /dev/null +++ b/arch/x86/coco/sev/svsm.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SVSM support code + */ + +#include <linux/types.h> + +#include <asm/sev.h> + +#include "internal.h" + +/* For early boot SVSM communication */ +struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); +SYM_PIC_ALIAS(boot_svsm_ca_page); + +/* + * SVSM related information: + * During boot, the page tables are set up as identity mapped and later + * changed to use kernel virtual addresses. Maintain separate virtual and + * physical addresses for the CAA to allow SVSM functions to be used during + * early boot, both with identity mapped virtual addresses and proper kernel + * virtual addresses. + */ +u64 boot_svsm_caa_pa __ro_after_init; +SYM_PIC_ALIAS(boot_svsm_caa_pa); + +DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); +DEFINE_PER_CPU(u64, svsm_caa_pa); + +static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) +{ + struct es_em_ctxt ctxt; + u8 pending = 0; + + vc_ghcb_invalidate(ghcb); + + /* + * Fill in protocol and format specifiers. This can be called very early + * in the boot, so use rip-relative references as needed. + */ + ghcb->protocol_version = ghcb_version; + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; + + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + + svsm_issue_call(call, &pending); + + if (pending) + return -EINVAL; + + switch (verify_exception_info(ghcb, &ctxt)) { + case ES_OK: + break; + case ES_EXCEPTION: + vc_forward_exception(&ctxt); + fallthrough; + default: + return -EINVAL; + } + + return svsm_process_result_codes(call); +} + +int svsm_perform_call_protocol(struct svsm_call *call) +{ + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + int ret; + + flags = native_local_irq_save(); + + if (sev_cfg.ghcbs_initialized) + ghcb = __sev_get_ghcb(&state); + else if (boot_ghcb) + ghcb = boot_ghcb; + else + ghcb = NULL; + + do { + ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) + : __pi_svsm_perform_msr_protocol(call); + } while (ret == -EAGAIN); + + if (sev_cfg.ghcbs_initialized) + __sev_put_ghcb(&state); + + native_local_irq_restore(flags); + + return ret; +} + +static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, + struct svsm_pvalidate_call *pc) +{ + struct svsm_pvalidate_entry *pe; + + /* Nothing in the CA yet */ + pc->num_entries = 0; + pc->cur_index = 0; + + pe = &pc->entry[0]; + + while (pfn < pfn_end) { + pe->page_size = RMP_PG_SIZE_4K; + pe->action = action; + pe->ignore_cf = 0; + pe->rsvd = 0; + pe->pfn = pfn; + + pe++; + pfn++; + + pc->num_entries++; + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) + break; + } + + return pfn; +} + +static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, + struct svsm_pvalidate_call *pc) +{ + struct svsm_pvalidate_entry *pe; + struct psc_entry *e; + + /* Nothing in the CA yet */ + pc->num_entries = 0; + pc->cur_index = 0; + + pe = &pc->entry[0]; + e = &desc->entries[desc_entry]; + + while (desc_entry <= desc->hdr.end_entry) { + pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; + pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; + pe->ignore_cf = 0; + pe->rsvd = 0; + pe->pfn = e->gfn; + + pe++; + e++; + + desc_entry++; + pc->num_entries++; + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) + break; + } + + return desc_entry; +} + +static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) +{ + unsigned int page_size; + bool action; + u64 pfn; + + pfn = pc->entry[pc->cur_index].pfn; + action = pc->entry[pc->cur_index].action; + page_size = pc->entry[pc->cur_index].page_size; + + __pval_terminate(pfn, action, page_size, ret, svsm_ret); +} + +void svsm_pval_pages(struct snp_psc_desc *desc) +{ + struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; + unsigned int i, pv_4k_count = 0; + struct svsm_pvalidate_call *pc; + struct svsm_call call = {}; + unsigned long flags; + bool action; + u64 pc_pa; + int ret; + + /* + * This can be called very early in the boot, use native functions in + * order to avoid paravirt issues. + */ + flags = native_local_irq_save(); + + /* + * The SVSM calling area (CA) can support processing 510 entries at a + * time. Loop through the Page State Change descriptor until the CA is + * full or the last entry in the descriptor is reached, at which time + * the SVSM is invoked. This repeats until all entries in the descriptor + * are processed. + */ + call.caa = svsm_get_caa(); + + pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; + pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); + + /* Protocol 0, Call ID 1 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); + call.rcx = pc_pa; + + for (i = 0; i <= desc->hdr.end_entry;) { + i = svsm_build_ca_from_psc_desc(desc, i, pc); + + do { + ret = svsm_perform_call_protocol(&call); + if (!ret) + continue; + + /* + * Check if the entry failed because of an RMP mismatch (a + * PVALIDATE at 2M was requested, but the page is mapped in + * the RMP as 4K). + */ + + if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && + pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { + /* Save this entry for post-processing at 4K */ + pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; + + /* Skip to the next one unless at the end of the list */ + pc->cur_index++; + if (pc->cur_index < pc->num_entries) + ret = -EAGAIN; + else + ret = 0; + } + } while (ret == -EAGAIN); + + if (ret) + svsm_pval_terminate(pc, ret, call.rax_out); + } + + /* Process any entries that failed to be validated at 2M and validate them at 4K */ + for (i = 0; i < pv_4k_count; i++) { + u64 pfn, pfn_end; + + action = pv_4k[i].action; + pfn = pv_4k[i].pfn; + pfn_end = pfn + 512; + + while (pfn < pfn_end) { + pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); + + ret = svsm_perform_call_protocol(&call); + if (ret) + svsm_pval_terminate(pc, ret, call.rax_out); + } + } + + native_local_irq_restore(flags); +} + +static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) +{ + /* If (new) lengths have been returned, propagate them up */ + if (call->rcx_out != call->rcx) + input->manifest_buf.len = call->rcx_out; + + if (call->rdx_out != call->rdx) + input->certificates_buf.len = call->rdx_out; + + if (call->r8_out != call->r8) + input->report_buf.len = call->r8_out; +} + +int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, + struct svsm_attest_call *input) +{ + struct svsm_attest_call *ac; + unsigned long flags; + u64 attest_call_pa; + int ret; + + if (!snp_vmpl) + return -EINVAL; + + local_irq_save(flags); + + call->caa = svsm_get_caa(); + + ac = (struct svsm_attest_call *)call->caa->svsm_buffer; + attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); + + *ac = *input; + + /* + * Set input registers for the request and set RDX and R8 to known + * values in order to detect length values being returned in them. + */ + call->rax = call_id; + call->rcx = attest_call_pa; + call->rdx = -1; + call->r8 = -1; + ret = svsm_perform_call_protocol(call); + update_attest_input(call, input); + + local_irq_restore(flags); + + return ret; +} +EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); + +/** + * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM + * @buffer: A buffer used to both send the command and receive the response. + * + * Execute a SVSM_VTPM_CMD call as defined by + * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00 + * + * All command request/response buffers have a common structure as specified by + * the following table: + * Byte Size In/Out Description + * Offset (Bytes) + * 0x000 4 In Platform command + * Out Platform command response size + * + * Each command can build upon this common request/response structure to create + * a structure specific to the command. See include/linux/tpm_svsm.h for more + * details. + * + * Return: 0 on success, -errno on failure + */ +int snp_svsm_vtpm_send_command(u8 *buffer) +{ + struct svsm_call call = {}; + + call.caa = svsm_get_caa(); + call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD); + call.rcx = __pa(buffer); + + return svsm_perform_call_protocol(&call); +} +EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command); + +/** + * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device + * + * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND + * which is the only request used so far. + * + * Return: true if the platform provides a vTPM SVSM device, false otherwise. + */ +bool snp_svsm_vtpm_probe(void) +{ + struct svsm_call call = {}; + + /* The vTPM device is available only if a SVSM is present */ + if (!snp_vmpl) + return false; + + call.caa = svsm_get_caa(); + call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY); + + if (svsm_perform_call_protocol(&call)) + return false; + + /* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */ + return call.rcx_out & BIT_ULL(8); +} diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c index f08c7505ed82a6..d98b5c08ef00d8 100644 --- a/arch/x86/coco/sev/vc-handle.c +++ b/arch/x86/coco/sev/vc-handle.c @@ -23,7 +23,6 @@ #include <asm/init.h> #include <asm/stacktrace.h> #include <asm/sev.h> -#include <asm/sev-internal.h> #include <asm/insn-eval.h> #include <asm/fpu/xcr.h> #include <asm/processor.h> @@ -35,6 +34,8 @@ #include <asm/apic.h> #include <asm/cpuid/api.h> +#include "internal.h" + static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned long vaddr, phys_addr_t *paddr) { @@ -403,7 +404,7 @@ static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool return ES_OK; } -enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write) +enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write) { struct pt_regs *regs = ctxt->regs; enum es_result ret; @@ -447,7 +448,7 @@ enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { - return sev_es_ghcb_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30); + return __vc_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30); } static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d7af4a64c211b7..3a24a3fc55f5b4 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -67,7 +67,18 @@ const_cpu_current_top_of_stack = cpu_current_top_of_stack; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); +#else + +#define X86_ALIGN_RODATA_BEGIN +#define X86_ALIGN_RODATA_END \ + . = ALIGN(PAGE_SIZE); \ + __end_rodata_aligned = .; +#define ALIGN_ENTRY_TEXT_BEGIN +#define ALIGN_ENTRY_TEXT_END +#endif + +#ifdef CONFIG_AMD_MEM_ENCRYPT /* * This section contains data which will be mapped as decrypted. Memory * encryption operates on a page basis. Make this section PMD-aligned @@ -88,17 +99,9 @@ const_cpu_current_top_of_stack = cpu_current_top_of_stack; __pi___end_bss_decrypted = .; \ #else - -#define X86_ALIGN_RODATA_BEGIN -#define X86_ALIGN_RODATA_END \ - . = ALIGN(PAGE_SIZE); \ - __end_rodata_aligned = .; - -#define ALIGN_ENTRY_TEXT_BEGIN -#define ALIGN_ENTRY_TEXT_END #define BSS_DECRYPTED - #endif + #if defined(CONFIG_X86_64) && defined(CONFIG_KEXEC_CORE) #define KEXEC_RELOCATE_KERNEL \ . = ALIGN(0x100); \ |
