[PATCH] x86/sev: Fix position dependent variable references in startup code
Borislav Petkov
bp at alien8.de
Tue Feb 6 03:07:23 PST 2024
On Sat, Feb 03, 2024 at 01:53:06PM +0100, Ard Biesheuvel wrote:
> arch/x86/coco/core.c | 7 +----
> arch/x86/include/asm/asm.h | 13 ++++++++++
> arch/x86/include/asm/coco.h | 8 +++++-
> arch/x86/include/asm/mem_encrypt.h | 13 ++++++----
> arch/x86/kernel/sev-shared.c | 12 ++++-----
> arch/x86/kernel/sev.c | 4 +--
> arch/x86/mm/mem_encrypt_identity.c | 27 +++++++++-----------
> 7 files changed, 49 insertions(+), 35 deletions(-)
Not bad - some touchups ontop like calling it "rip_rel_ref" everywhere
like other code shortens "rIP-relative reference" and making the asm
wrapper __always_inline.
Thx.
---
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 41b408b3dfb6..ca8eed1d496a 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -115,14 +115,15 @@
#ifndef __ASSEMBLY__
#ifndef __pic__
-static inline __pure void *rip_relative_ptr(void *p)
+static __always_inline __pure void *rip_rel_ptr(void *p)
{
asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
+
return p;
}
-#define RIP_RELATIVE_REF(var) (*(typeof(&(var)))rip_relative_ptr(&(var)))
+#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
#else
-#define RIP_RELATIVE_REF(var) (var)
+#define RIP_REL_REF(var) (var)
#endif
#endif
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index d6865e0f7587..21940ef8d290 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -17,7 +17,7 @@ extern u64 cc_mask;
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
static inline void cc_set_mask(u64 mask)
{
- RIP_RELATIVE_REF(cc_mask) = mask;
+ RIP_REL_REF(cc_mask) = mask;
}
u64 cc_mkenc(u64 val);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 6fa2ba58ed3f..b31eb9fd5954 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -61,7 +61,7 @@ void __init sev_es_init_vc_handling(void);
static inline u64 sme_get_me_mask(void)
{
- return RIP_RELATIVE_REF(sme_me_mask);
+ return RIP_REL_REF(sme_me_mask);
}
#define __bss_decrypted __section(".bss..decrypted")
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index a54711300d0b..a200bd72fadc 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -562,9 +562,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
/* Skip post-processing for out-of-range zero leafs. */
- if (!(leaf->fn <= RIP_RELATIVE_REF(cpuid_std_range_max) ||
- (leaf->fn >= 0x40000000 && leaf->fn <= RIP_RELATIVE_REF(cpuid_hyp_range_max)) ||
- (leaf->fn >= 0x80000000 && leaf->fn <= RIP_RELATIVE_REF(cpuid_ext_range_max))))
+ if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
+ (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
+ (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
return 0;
}
@@ -1074,11 +1074,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
if (fn->eax_in == 0x0)
- RIP_RELATIVE_REF(cpuid_std_range_max) = fn->eax;
+ RIP_REL_REF(cpuid_std_range_max) = fn->eax;
else if (fn->eax_in == 0x40000000)
- RIP_RELATIVE_REF(cpuid_hyp_range_max) = fn->eax;
+ RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
else if (fn->eax_in == 0x80000000)
- RIP_RELATIVE_REF(cpuid_ext_range_max) = fn->eax;
+ RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
}
}
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 1cf348e19556..1ef7ae806a01 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -748,7 +748,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
* This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function.
*/
- if (!(RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
return;
/*
@@ -767,7 +767,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
* This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function.
*/
- if (!(RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
return;
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index d8d14133b654..0166ab1780cc 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -305,7 +305,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
* function.
*/
if (!sme_get_me_mask() ||
- RIP_RELATIVE_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+ RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
return;
/*
@@ -542,7 +542,7 @@ void __init sme_enable(struct boot_params *bp)
me_mask = 1UL << (ebx & 0x3f);
/* Check the SEV MSR whether SEV or SME is enabled */
- RIP_RELATIVE_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+ RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
@@ -595,7 +595,7 @@ void __init sme_enable(struct boot_params *bp)
return;
out:
- RIP_RELATIVE_REF(sme_me_mask) = me_mask;
+ RIP_REL_REF(sme_me_mask) = me_mask;
physical_mask &= ~me_mask;
cc_vendor = CC_VENDOR_AMD;
cc_set_mask(me_mask);
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
More information about the linux-arm-kernel
mailing list