[RFC PATCH 12/45] KVM: arm64: pkvm: Unify pkvm_pkvm_teardown_donated_memory()
Sebastian Ene
sebastianene at google.com
Mon Jan 15 06:33:50 PST 2024
On Wed, Feb 01, 2023 at 12:52:56PM +0000, Jean-Philippe Brucker wrote:
Hi Jean,
> Tearing down donated memory requires clearing the memory, pushing the
> pages into the reclaim memcache, and moving the mapping into the host
> stage-2. Keep these operations in a single function.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
> ---
> arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 3 +-
> arch/arm64/kvm/hyp/nvhe/pkvm.c | 50 +++++++------------
> 3 files changed, 22 insertions(+), 33 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> index d4f4ffbb7dbb..021825aee854 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -86,6 +86,8 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
>
> void *pkvm_map_donated_memory(unsigned long host_va, size_t size);
> void pkvm_unmap_donated_memory(void *va, size_t size);
> +void pkvm_teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr,
> + size_t dirty_size);
>
> static __always_inline void __load_host_stage2(void)
> {
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 410361f41e38..cad5736026d5 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -314,8 +314,7 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
> addr = hyp_alloc_pages(&vm->pool, 0);
> while (addr) {
> memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
> - push_hyp_memcache(mc, addr, hyp_virt_to_phys);
> - WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
> + pkvm_teardown_donated_memory(mc, addr, 0);
> addr = hyp_alloc_pages(&vm->pool, 0);
> }
> }
> diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> index a3711979bbd3..c51a8a592849 100644
> --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
> +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> @@ -602,27 +602,28 @@ void *pkvm_map_donated_memory(unsigned long host_va, size_t size)
> return va;
> }
>
> -static void __unmap_donated_memory(void *va, size_t size)
> +void pkvm_teardown_donated_memory(struct kvm_hyp_memcache *mc, void *va,
> + size_t dirty_size)
> {
> - WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
> - PAGE_ALIGN(size) >> PAGE_SHIFT));
> -}
> + size_t size = max(PAGE_ALIGN(dirty_size), PAGE_SIZE);
>
> -void pkvm_unmap_donated_memory(void *va, size_t size)
> -{
> if (!va)
> return;
>
> - memset(va, 0, size);
> - __unmap_donated_memory(va, size);
> + memset(va, 0, dirty_size);
> +
> + if (mc) {
> + for (void *start = va; start < va + size; start += PAGE_SIZE)
> + push_hyp_memcache(mc, start, hyp_virt_to_phys);
> + }
> +
> + WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
> + size >> PAGE_SHIFT));
> }
>
> -static void unmap_donated_memory_noclear(void *va, size_t size)
> +void pkvm_unmap_donated_memory(void *va, size_t size)
> {
> - if (!va)
> - return;
> -
> - __unmap_donated_memory(va, size);
> + pkvm_teardown_donated_memory(NULL, va, size);
> }
>
> /*
> @@ -759,18 +760,6 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
> return ret;
> }
>
> -static void
> -teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
> -{
> - size = PAGE_ALIGN(size);
> - memset(addr, 0, size);
> -
> - for (void *start = addr; start < addr + size; start += PAGE_SIZE)
> - push_hyp_memcache(mc, start, hyp_virt_to_phys);
> -
> - unmap_donated_memory_noclear(addr, size);
> -}
> -
> int __pkvm_teardown_vm(pkvm_handle_t handle)
> {
> size_t vm_size, last_ran_size;
> @@ -813,19 +802,18 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
> vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
> while (vcpu_mc->nr_pages) {
> addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
> - push_hyp_memcache(mc, addr, hyp_virt_to_phys);
> - unmap_donated_memory_noclear(addr, PAGE_SIZE);
> + pkvm_teardown_donated_memory(mc, addr, 0);
Here we probably need to pass PAGE_SIZE as an argument instead of "0"
to make sure that we clear out the content of the page before tearing it
down.
> }
>
> - teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
> + pkvm_teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
> }
>
> last_ran_size = pkvm_get_last_ran_size();
> - teardown_donated_memory(mc, hyp_vm->kvm.arch.mmu.last_vcpu_ran,
> - last_ran_size);
> + pkvm_teardown_donated_memory(mc, hyp_vm->kvm.arch.mmu.last_vcpu_ran,
> + last_ran_size);
>
> vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
> - teardown_donated_memory(mc, hyp_vm, vm_size);
> + pkvm_teardown_donated_memory(mc, hyp_vm, vm_size);
> hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
> return 0;
>
> --
> 2.39.0
>
Thanks,
Seb
More information about the linux-arm-kernel
mailing list