[RFC PATCH v11 13/29] KVM: Add transparent hugepage support for dedicated guest memory
Sean Christopherson
seanjc at google.com
Tue Jul 18 16:44:56 PDT 2023
Signed-off-by: Sean Christopherson <seanjc at google.com>
---
include/uapi/linux/kvm.h | 2 ++
virt/kvm/guest_mem.c | 52 ++++++++++++++++++++++++++++++++++++----
2 files changed, 50 insertions(+), 4 deletions(-)
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 9b344fc98598..17b12ee8b70e 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -2290,6 +2290,8 @@ struct kvm_memory_attributes {
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
+#define KVM_GUEST_MEMFD_ALLOW_HUGEPAGE (1ULL << 0)
+
struct kvm_create_guest_memfd {
__u64 size;
__u64 flags;
diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index 1b705fd63fa8..384671a55b41 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -17,15 +17,48 @@ struct kvm_gmem {
struct list_head entry;
};
-static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
+static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index)
{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned long huge_index = round_down(index, HPAGE_PMD_NR);
+ unsigned long flags = (unsigned long)inode->i_private;
+ struct address_space *mapping = inode->i_mapping;
+ gfp_t gfp = mapping_gfp_mask(mapping);
struct folio *folio;
- /* TODO: Support huge pages. */
- folio = filemap_grab_folio(file->f_mapping, index);
+ if (!(flags & KVM_GUEST_MEMFD_ALLOW_HUGEPAGE))
+ return NULL;
+
+ if (filemap_range_has_page(mapping, huge_index << PAGE_SHIFT,
+ (huge_index + HPAGE_PMD_NR - 1) << PAGE_SHIFT))
+ return NULL;
+
+ folio = filemap_alloc_folio(gfp, HPAGE_PMD_ORDER);
if (!folio)
return NULL;
+ if (filemap_add_folio(mapping, folio, huge_index, gfp)) {
+ folio_put(folio);
+ return NULL;
+ }
+
+ return folio;
+#else
+ return NULL;
+#endif
+}
+
+static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+{
+ struct folio *folio;
+
+ folio = kvm_gmem_get_huge_folio(inode, index);
+ if (!folio) {
+ folio = filemap_grab_folio(inode->i_mapping, index);
+ if (!folio)
+ return NULL;
+ }
+
/*
* Use the up-to-date flag to track whether or not the memory has been
* zeroed before being handed off to the guest. There is no backing
@@ -332,7 +365,8 @@ static const struct inode_operations kvm_gmem_iops = {
.setattr = kvm_gmem_setattr,
};
-static int __kvm_gmem_create(struct kvm *kvm, loff_t size, struct vfsmount *mnt)
+static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags,
+ struct vfsmount *mnt)
{
const char *anon_name = "[kvm-gmem]";
const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
@@ -355,6 +389,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, struct vfsmount *mnt)
inode->i_mode |= S_IFREG;
inode->i_size = size;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
+ mapping_set_large_folios(inode->i_mapping);
mapping_set_unevictable(inode->i_mapping);
mapping_set_unmovable(inode->i_mapping);
@@ -404,6 +439,12 @@ static bool kvm_gmem_is_valid_size(loff_t size, u64 flags)
if (size < 0 || !PAGE_ALIGNED(size))
return false;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if ((flags & KVM_GUEST_MEMFD_ALLOW_HUGEPAGE) &&
+ !IS_ALIGNED(size, HPAGE_PMD_SIZE))
+ return false;
+#endif
+
return true;
}
@@ -413,6 +454,9 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
u64 flags = args->flags;
u64 valid_flags = 0;
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ valid_flags |= KVM_GUEST_MEMFD_ALLOW_HUGEPAGE;
+
if (flags & ~valid_flags)
return -EINVAL;
--
2.41.0.255.g8b1d071c50-goog
More information about the linux-riscv
mailing list