[PATCH v13 14/25] virt: geniezone: Optimize performance of protected VM memory
Liju-clr Chen
liju-clr.chen at mediatek.com
Thu Nov 14 02:07:51 PST 2024
From: Yingshiuan Pan <yingshiuan.pan at mediatek.com>
The memory protection mechanism performs better with batch operations on
memory pages. To leverage this, we pre-allocate memory for VMs that are
set to protected mode. As a result, the memory protection mechanism can
proactively protect the pre-allocated memory in advance through batch
operations, leading to improved performance during VM booting.
Signed-off-by: Yingshiuan Pan <yingshiuan.pan at mediatek.com>
Co-developed-by: Jerry Wang <ze-yu.wang at mediatek.com>
Signed-off-by: Jerry Wang <ze-yu.wang at mediatek.com>
Signed-off-by: Yi-De Wu <yi-de.wu at mediatek.com>
Signed-off-by: Liju Chen <liju-clr.chen at mediatek.com>
---
arch/arm64/geniezone/vm.c | 127 ++++++++++++++++++++++++++
include/linux/soc/mediatek/gzvm_drv.h | 1 +
2 files changed, 128 insertions(+)
diff --git a/arch/arm64/geniezone/vm.c b/arch/arm64/geniezone/vm.c
index 44cf1c5bfdb9..cc6c7e99851c 100644
--- a/arch/arm64/geniezone/vm.c
+++ b/arch/arm64/geniezone/vm.c
@@ -11,6 +11,8 @@
#include <linux/soc/mediatek/gzvm_drv.h>
#include "gzvm_arch_common.h"
+#define PAR_PA47_MASK GENMASK_ULL(47, 12)
+
/**
* gzvm_hypcall_wrapper() - the wrapper for hvc calls
* @a0: arguments passed in registers 0
@@ -210,6 +212,126 @@ static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm,
return 0;
}
+/**
+ * fill_constituents() - Populate pa to buffer until full
+ * @consti: Pointer to struct mem_region_addr_range.
+ * @consti_cnt: Constituent count.
+ * @max_nr_consti: Maximum number of constituent count.
+ * @gfn: Guest frame number.
+ * @total_pages: Total page numbers.
+ * @slot: Pointer to struct gzvm_memslot.
+ *
+ * Return: how many pages we've fill in, negative if error
+ */
+static int fill_constituents(struct mem_region_addr_range *consti,
+ int *consti_cnt, int max_nr_consti, u64 gfn,
+ u32 total_pages, struct gzvm_memslot *slot)
+{
+ u64 pfn = 0, prev_pfn = 0, gfn_end = 0;
+ int nr_pages = 0;
+ int i = -1;
+
+ if (unlikely(total_pages == 0))
+ return -EINVAL;
+ gfn_end = gfn + total_pages;
+
+ while (i < max_nr_consti && gfn < gfn_end) {
+ if (pfn == (prev_pfn + 1)) {
+ consti[i].pg_cnt++;
+ } else {
+ i++;
+ if (i >= max_nr_consti)
+ break;
+ consti[i].address = PFN_PHYS(pfn);
+ consti[i].pg_cnt = 1;
+ }
+ prev_pfn = pfn;
+ gfn++;
+ nr_pages++;
+ }
+ if (i != max_nr_consti)
+ i++;
+ *consti_cnt = i;
+
+ return nr_pages;
+}
+
+/**
+ * gzvm_vm_populate_mem_region() - Iterate all mem slot and populate pa to
+ * buffer until it's full
+ * @gzvm: Pointer to struct gzvm.
+ * @slot_id: Memory slot id to be populated.
+ *
+ * Return: 0 if it is successful, negative if error
+ */
+int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id)
+{
+ struct gzvm_memslot *memslot = &gzvm->memslot[slot_id];
+ struct gzvm_memory_region_ranges *region;
+ int max_nr_consti, remain_pages;
+ u64 gfn, gfn_end;
+ u32 buf_size;
+
+ buf_size = PAGE_SIZE * 2;
+ region = alloc_pages_exact(buf_size, GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ max_nr_consti = (buf_size - sizeof(*region)) /
+ sizeof(struct mem_region_addr_range);
+
+ region->slot = memslot->slot_id;
+ remain_pages = memslot->npages;
+ gfn = memslot->base_gfn;
+ gfn_end = gfn + remain_pages;
+
+ while (gfn < gfn_end) {
+ int nr_pages;
+
+ nr_pages = fill_constituents(region->constituents,
+ ®ion->constituent_cnt,
+ max_nr_consti, gfn,
+ remain_pages, memslot);
+
+ if (nr_pages < 0) {
+ pr_err("Failed to fill constituents\n");
+ free_pages_exact(region, buf_size);
+ return -EFAULT;
+ }
+
+ region->gpa = PFN_PHYS(gfn);
+ region->total_pages = nr_pages;
+ remain_pages -= nr_pages;
+ gfn += nr_pages;
+
+ if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
+ virt_to_phys(region))) {
+ pr_err("Failed to register memregion to hypervisor\n");
+ free_pages_exact(region, buf_size);
+ return -EFAULT;
+ }
+ }
+ free_pages_exact(region, buf_size);
+
+ return 0;
+}
+
+static int populate_all_mem_regions(struct gzvm *gzvm)
+{
+ int ret, i;
+
+ for (i = 0; i < GZVM_MAX_MEM_REGION; i++) {
+ if (gzvm->memslot[i].npages == 0)
+ continue;
+
+ ret = gzvm_vm_populate_mem_region(gzvm, i);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_PROTECTED_VM's subcommands
* @gzvm: Pointer to struct gzvm.
@@ -231,6 +353,11 @@ static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
case GZVM_CAP_PVM_SET_PVMFW_GPA:
fallthrough;
case GZVM_CAP_PVM_SET_PROTECTED_VM:
+ /*
+ * To improve performance for protected VM, we have to populate VM's memory
+ * before VM booting
+ */
+ populate_all_mem_regions(gzvm);
ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
return ret;
case GZVM_CAP_PVM_GET_PVMFW_SIZE:
diff --git a/include/linux/soc/mediatek/gzvm_drv.h b/include/linux/soc/mediatek/gzvm_drv.h
index 835706586e93..07ab42357328 100644
--- a/include/linux/soc/mediatek/gzvm_drv.h
+++ b/include/linux/soc/mediatek/gzvm_drv.h
@@ -177,6 +177,7 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
u64 *hva_memslot);
+int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id);
int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid);
int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
--
2.18.0
More information about the Linux-mediatek
mailing list