[PATCHv10 06/18] x86/mm: Make x86_platform.guest.enc_status_change_*() return errno

Michael Kelley mhklinux at outlook.com
Fri May 3 09:29:00 PDT 2024


From: Kirill A. Shutemov <kirill.shutemov at linux.intel.com> Sent: Tuesday, April 9, 2024 4:30 AM
> 
> TDX is going to have more than one reason to fail
> enc_status_change_prepare().
> 
> Change the callback to return errno instead of assuming -EIO;
> enc_status_change_finish() changed too to keep the interface symmetric.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov at linux.intel.com>
> Reviewed-by: Dave Hansen <dave.hansen at intel.com>
> Reviewed-by: Kai Huang <kai.huang at intel.com>
> Tested-by: Tao Liu <ltao at redhat.com>
> ---
>  arch/x86/coco/tdx/tdx.c         | 20 +++++++++++---------
>  arch/x86/hyperv/ivm.c           | 22 ++++++++++------------
>  arch/x86/include/asm/x86_init.h |  4 ++--
>  arch/x86/kernel/x86_init.c      |  4 ++--
>  arch/x86/mm/mem_encrypt_amd.c   |  8 ++++----
>  arch/x86/mm/pat/set_memory.c    |  8 +++++---
>  6 files changed, 34 insertions(+), 32 deletions(-)
> 
> diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
> index c1cb90369915..26fa47db5782 100644
> --- a/arch/x86/coco/tdx/tdx.c
> +++ b/arch/x86/coco/tdx/tdx.c
> @@ -798,28 +798,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
>  	return true;
>  }
> 
> -static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
> -					  bool enc)
> +static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
> +					 bool enc)
>  {
>  	/*
>  	 * Only handle shared->private conversion here.
>  	 * See the comment in tdx_early_init().
>  	 */
> -	if (enc)
> -		return tdx_enc_status_changed(vaddr, numpages, enc);
> -	return true;
> +	if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
> +		return -EIO;
> +
> +	return 0;
>  }
> 
> -static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
> +static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
>  					 bool enc)
>  {
>  	/*
>  	 * Only handle private->shared conversion here.
>  	 * See the comment in tdx_early_init().
>  	 */
> -	if (!enc)
> -		return tdx_enc_status_changed(vaddr, numpages, enc);
> -	return true;
> +	if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
> +		return -EIO;
> +
> +	return 0;
>  }
> 
>  void __init tdx_early_init(void)
> diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
> index 768d73de0d09..b4a851d27c7c 100644
> --- a/arch/x86/hyperv/ivm.c
> +++ b/arch/x86/hyperv/ivm.c
> @@ -523,9 +523,9 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
>   * transition is complete, hv_vtom_set_host_visibility() marks the pages
>   * as "present" again.
>   */
> -static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
> +static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
>  {
> -	return !set_memory_np(kbuffer, pagecount);
> +	return set_memory_np(kbuffer, pagecount);
>  }
> 
>  /*
> @@ -536,20 +536,19 @@ static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc
>   * with host. This function works as wrap of hv_mark_gpa_visibility()
>   * with memory base and size.
>   */
> -static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
> +static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
>  {
>  	enum hv_mem_host_visibility visibility = enc ?
>  			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
>  	u64 *pfn_array;
>  	phys_addr_t paddr;
> +	int i, pfn, err;
>  	void *vaddr;
>  	int ret = 0;
> -	bool result = true;
> -	int i, pfn;
> 
>  	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
>  	if (!pfn_array) {
> -		result = false;
> +		ret = -ENOMEM;
>  		goto err_set_memory_p;
>  	}
> 
> @@ -568,10 +567,8 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
>  		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
>  			ret = hv_mark_gpa_visibility(pfn, pfn_array,
>  						     visibility);
> -			if (ret) {
> -				result = false;
> +			if (ret)
>  				goto err_free_pfn_array;
> -			}
>  			pfn = 0;
>  		}
>  	}
> @@ -586,10 +583,11 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
>  	 * order to avoid leaving the memory range in a "broken" state. Setting
>  	 * the PRESENT bits shouldn't fail, but return an error if it does.
>  	 */
> -	if (set_memory_p(kbuffer, pagecount))
> -		result = false;
> +	err = set_memory_p(kbuffer, pagecount);
> +	if (err && !ret)
> +		ret = err;
> 
> -	return result;
> +	return ret;
>  }
> 
>  static bool hv_vtom_tlb_flush_required(bool private)
> diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
> index 6149eabe200f..28ac3cb9b987 100644
> --- a/arch/x86/include/asm/x86_init.h
> +++ b/arch/x86/include/asm/x86_init.h
> @@ -151,8 +151,8 @@ struct x86_init_acpi {
>   * @enc_cache_flush_required	Returns true if a cache flush is needed before changing page encryption status
>   */
>  struct x86_guest {
> -	bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
> -	bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
> +	int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
> +	int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
>  	bool (*enc_tlb_flush_required)(bool enc);
>  	bool (*enc_cache_flush_required)(void);
>  };
> diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
> index d5dc5a92635a..a7143bb7dd93 100644
> --- a/arch/x86/kernel/x86_init.c
> +++ b/arch/x86/kernel/x86_init.c
> @@ -134,8 +134,8 @@ struct x86_cpuinit_ops x86_cpuinit = {
> 
>  static void default_nmi_init(void) { };
> 
> -static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; }
> -static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return true; }
> +static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
> +static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; }
>  static bool enc_tlb_flush_required_noop(bool enc) { return false; }
>  static bool enc_cache_flush_required_noop(void) { return false; }
>  static bool is_private_mmio_noop(u64 addr) {return false; }
> diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
> index 422602f6039b..e7b67519ddb5 100644
> --- a/arch/x86/mm/mem_encrypt_amd.c
> +++ b/arch/x86/mm/mem_encrypt_amd.c
> @@ -283,7 +283,7 @@ static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
>  #endif
>  }
> 
> -static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
> +static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
>  {
>  	/*
>  	 * To maintain the security guarantees of SEV-SNP guests, make sure
> @@ -292,11 +292,11 @@ static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool
>  	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
>  		snp_set_memory_shared(vaddr, npages);
> 
> -	return true;
> +	return 0;
>  }
> 
>  /* Return true unconditionally: return value doesn't matter for the SEV side */
> -static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
> +static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
>  {
>  	/*
>  	 * After memory is mapped encrypted in the page table, validate it
> @@ -308,7 +308,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
>  	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
>  		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
> 
> -	return true;
> +	return 0;
>  }
> 
>  static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index 80c9037ffadf..e5b454036bf3 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -2156,7 +2156,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
>  		cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
> 
>  	/* Notify hypervisor that we are about to set/clr encryption attribute. */
> -	if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
> +	ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
> +	if (ret)
>  		goto vmm_fail;
> 
>  	ret = __change_page_attr_set_clr(&cpa, 1);
> @@ -2174,7 +2175,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
>  		return ret;
> 
>  	/* Notify hypervisor that we have successfully set/clr encryption attribute. */
> -	if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
> +	ret = x86_platform.guest.enc_status_change_finish(addr, numpages, enc);
> +	if (ret)
>  		goto vmm_fail;
> 
>  	return 0;
> @@ -2183,7 +2185,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
>  	WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
>  		  (void *)addr, numpages, enc ? "private" : "shared");

Nit: Now that there's an error code instead of just a boolean, it would be nice
to have this warning message include the error code.  Some of the callers of
set_memory_decrypted()/encrypted() also output a message on failure that
includes the error code, in which case this message will be redundant.  But
many callers do not.

> 
> -	return -EIO;
> +	return ret;
>  }
> 
>  static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
> --
> 2.43.0
> 

My nit notwithstanding,

Reviewed-by: Michael Kelley <mhklinux at outlook.com>



More information about the kexec mailing list