[PATCH 25/30] KVM: arm64: Implement the MEM_UNSHARE hypercall for protected VMs

Vincent Donnefort vdonnefort at google.com
Tue Jan 6 07:50:07 PST 2026


On Mon, Jan 05, 2026 at 03:49:33PM +0000, Will Deacon wrote:
> Implement the ARM_SMCCC_KVM_FUNC_MEM_UNSHARE hypercall to allow
> protected VMs to unshare memory that was previously shared with the host
> using the ARM_SMCCC_KVM_FUNC_MEM_SHARE hypercall.
> 
> Signed-off-by: Will Deacon <will at kernel.org>


Reviewed-by: Vincent Donnefort <vdonnefort at google.com>


> ---
>  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  1 +
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 32 +++++++++++++++++++
>  arch/arm64/kvm/hyp/nvhe/pkvm.c                | 22 +++++++++++++
>  3 files changed, 55 insertions(+)
> 
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> index 42fd60c5cfc9..e41a128b0854 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -36,6 +36,7 @@ extern unsigned long hyp_nr_cpus;
>  int __pkvm_prot_finalize(void);
>  int __pkvm_host_share_hyp(u64 pfn);
>  int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
> +int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
>  int __pkvm_host_unshare_hyp(u64 pfn);
>  int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
>  int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 365c769c82a4..c1600b88c316 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -920,6 +920,38 @@ int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn)
>  	return ret;
>  }
>  
> +int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn)
> +{
> +	struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
> +	u64 phys, ipa = hyp_pfn_to_phys(gfn);
> +	kvm_pte_t pte;
> +	int ret;
> +
> +	host_lock_component();
> +	guest_lock_component(vm);
> +
> +	ret = get_valid_guest_pte(vm, ipa, &pte, &phys);
> +	if (ret)
> +		goto unlock;
> +
> +	ret = -EPERM;
> +	if (pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)) != PKVM_PAGE_SHARED_OWNED)
> +		goto unlock;
> +	if (__host_check_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_BORROWED))
> +		goto unlock;
> +
> +	ret = 0;
> +	WARN_ON(host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_GUEST));
> +	WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
> +				       pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_OWNED),
> +				       &vcpu->vcpu.arch.pkvm_memcache, 0));
> +unlock:
> +	guest_unlock_component(vm);
> +	host_unlock_component();
> +
> +	return ret;
> +}
> +
>  int __pkvm_host_unshare_hyp(u64 pfn)
>  {
>  	u64 phys = hyp_pfn_to_phys(pfn);
> diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> index d8afa2b98542..2890328f4a78 100644
> --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
> +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> @@ -988,6 +988,19 @@ static bool pkvm_memshare_call(u64 *ret, struct kvm_vcpu *vcpu, u64 *exit_code)
>  	return false;
>  }
>  
> +static void pkvm_memunshare_call(u64 *ret, struct kvm_vcpu *vcpu)
> +{
> +	struct pkvm_hyp_vcpu *hyp_vcpu;
> +	u64 ipa = smccc_get_arg1(vcpu);
> +
> +	if (!PAGE_ALIGNED(ipa))
> +		return;
> +
> +	hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
> +	if (!__pkvm_guest_unshare_host(hyp_vcpu, hyp_phys_to_pfn(ipa)))
> +		ret[0] = SMCCC_RET_SUCCESS;
> +}
> +
>  /*
>   * Handler for protected VM HVC calls.
>   *
> @@ -1005,6 +1018,7 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
>  		val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
>  		val[0] |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO);
>  		val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_SHARE);
> +		val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE);
>  		break;
>  	case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
>  		if (smccc_get_arg1(vcpu) ||
> @@ -1023,6 +1037,14 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
>  
>  		handled = pkvm_memshare_call(val, vcpu, exit_code);
>  		break;
> +	case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
> +		if (smccc_get_arg2(vcpu) ||
> +		    smccc_get_arg3(vcpu)) {
> +			break;
> +		}
> +
> +		pkvm_memunshare_call(val, vcpu);
> +		break;
>  	default:
>  		/* Punt everything else back to the host, for now. */
>  		handled = false;
> -- 
> 2.52.0.351.gbe84eed79e-goog
> 



More information about the linux-arm-kernel mailing list