[PATCH v11 3/3] x86/snp: Convert shared memory back to private on kexec

Borislav Petkov bp at alien8.de
Fri Jul 5 07:28:33 PDT 2024


On Tue, Jul 02, 2024 at 07:58:11PM +0000, Ashish Kalra wrote:
> +static void unshare_all_bss_decrypted_memory(void)
> +{
> +	unsigned long vaddr, vaddr_end;
> +	unsigned int level;
> +	unsigned int npages;
> +	pte_t *pte;
> +
> +	vaddr = (unsigned long)__start_bss_decrypted;
> +	vaddr_end = (unsigned long)__start_bss_decrypted_unused;
> +	npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
> +	for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
> +		pte = lookup_address(vaddr, &level);
> +		if (!pte || !pte_decrypted(*pte) || pte_none(*pte))
> +			continue;
> +
> +		set_pte_enc(pte, level, (void *)vaddr);
> +	}
> +	vaddr = (unsigned long)__start_bss_decrypted;
> +	snp_set_memory_private(vaddr, npages);
> +}

Merge the whole unsharing dance into a single function:

diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 5013c3afb0c4..f263ceada006 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -1049,58 +1049,47 @@ static bool make_pte_private(pte_t *pte, unsigned long addr, int pages, int leve
 	return true;
 }
 
-static void unshare_all_bss_decrypted_memory(void)
-{
-	unsigned long vaddr, vaddr_end;
-	unsigned int level;
-	unsigned int npages;
-	pte_t *pte;
-
-	vaddr = (unsigned long)__start_bss_decrypted;
-	vaddr_end = (unsigned long)__start_bss_decrypted_unused;
-	npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
-	for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
-		pte = lookup_address(vaddr, &level);
-		if (!pte || !pte_decrypted(*pte) || pte_none(*pte))
-			continue;
-
-		set_pte_enc(pte, level, (void *)vaddr);
-	}
-	vaddr = (unsigned long)__start_bss_decrypted;
-	snp_set_memory_private(vaddr, npages);
-}
-
+/* Walk the direct mapping and convert all shared memory back to private. */
 static void unshare_all_memory(void)
 {
-	unsigned long addr, end;
-
-	/*
-	 * Walk direct mapping and convert all shared memory back to private.
-	 */
+	unsigned long addr, end, size;
+	unsigned int npages, level;
+	pte_t *pte;
 
+	/* Unshare the direct mapping. */
 	addr = PAGE_OFFSET;
 	end  = PAGE_OFFSET + get_max_mapped();
 
 	while (addr < end) {
-		unsigned long size;
-		unsigned int level;
-		pte_t *pte;
-
 		pte = lookup_address(addr, &level);
 		size = page_level_size(level);
 
-		if (pte && pte_decrypted(*pte) && !pte_none(*pte)) {
-			int pages = size / PAGE_SIZE;
-
-			if (!make_pte_private(pte, addr, pages, level)) {
-				pr_err("Failed to unshare range %#lx-%#lx\n",
-				       addr, addr + size);
-			}
+		if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) {
+			addr += size;
+			continue;
 		}
-		addr += size;
+
+		npages = size / PAGE_SIZE;
+
+		if (!make_pte_private(pte, addr, npages, level))
+			pr_err("Failed to unshare range %#lx-%#lx\n",
+				addr, addr + size);
 	}
 
-	unshare_all_bss_decrypted_memory();
+	/* Unshare all bss decrypted memory. */
+	addr = (unsigned long)__start_bss_decrypted;
+	end  = (unsigned long)__start_bss_decrypted_unused;
+	npages = (end - addr) >> PAGE_SHIFT;
+
+	for (; addr < end; addr += PAGE_SIZE) {
+		pte = lookup_address(addr, &level);
+		if (!pte || !pte_decrypted(*pte) || pte_none(*pte))
+			continue;
+
+		set_pte_enc(pte, level, (void *)addr);
+	}
+	addr = (unsigned long)__start_bss_decrypted;
+	snp_set_memory_private(addr, npages);
 
 	__flush_tlb_all();
 
@@ -1114,8 +1103,9 @@ void snp_kexec_begin(void)
 
 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
 		return;
+
 	/*
-	 * Crash kernel reaches here with interrupts disabled: can't wait for
+	 * Crash kernel ends up here with interrupts disabled: can't wait for
 	 * conversions to finish.
 	 *
 	 * If race happened, just report and proceed.
@@ -1124,7 +1114,6 @@ void snp_kexec_begin(void)
 		pr_warn("Failed to stop shared<->private conversions\n");
 }
 
-/* Walk direct mapping and convert all shared memory back to private */
 void snp_kexec_finish(void)
 {
 	struct sev_es_runtime_data *data;


-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette



More information about the kexec mailing list