[PATCH v10 12/26] gunyah: vm_mgr: Add/remove user memory regions
Elliot Berman
quic_eberman at quicinc.com
Fri Feb 24 17:03:10 PST 2023
On 2/23/2023 4:34 PM, Alex Elder wrote:
> On 2/14/23 3:24 PM, Elliot Berman wrote:
>>
>> When launching a virtual machine, Gunyah userspace allocates memory for
>> the guest and informs Gunyah about these memory regions through
>> SET_USER_MEMORY_REGION ioctl.
>>
>> Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu at quicinc.com>
>> Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu at quicinc.com>
>> Signed-off-by: Elliot Berman <quic_eberman at quicinc.com>
>> ---
>> drivers/virt/gunyah/Makefile | 2 +-
>> drivers/virt/gunyah/vm_mgr.c | 44 ++++++
>> drivers/virt/gunyah/vm_mgr.h | 25 ++++
>> drivers/virt/gunyah/vm_mgr_mm.c | 235 ++++++++++++++++++++++++++++++++
>> include/uapi/linux/gunyah.h | 33 +++++
>> 5 files changed, 338 insertions(+), 1 deletion(-)
>> create mode 100644 drivers/virt/gunyah/vm_mgr_mm.c
>>
>> diff --git a/drivers/virt/gunyah/Makefile b/drivers/virt/gunyah/Makefile
>> index 03951cf82023..ff8bc4925392 100644
>> --- a/drivers/virt/gunyah/Makefile
>> +++ b/drivers/virt/gunyah/Makefile
>> @@ -2,5 +2,5 @@
>> obj-$(CONFIG_GUNYAH) += gunyah.o
>> -gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o
>> +gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o
>> obj-$(CONFIG_GUNYAH) += gunyah_rsc_mgr.o
>> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
>> index fd890a57172e..84102bac03cc 100644
>> --- a/drivers/virt/gunyah/vm_mgr.c
>> +++ b/drivers/virt/gunyah/vm_mgr.c
>> @@ -18,8 +18,16 @@
>> static void gh_vm_free(struct work_struct *work)
>> {
>> struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
>> + struct gh_vm_mem *mapping, *tmp;
>> int ret;
>> + mutex_lock(&ghvm->mm_lock);
>> + list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings,
>> list) {
>> + gh_vm_mem_reclaim(ghvm, mapping);
>> + kfree(mapping);
>> + }
>> + mutex_unlock(&ghvm->mm_lock);
>> +
>> ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
>> if (ret)
>> pr_warn("Failed to deallocate vmid: %d\n", ret);
>> @@ -48,11 +56,46 @@ static __must_check struct gh_vm
>> *gh_vm_alloc(struct gh_rm *rm)
>> ghvm->vmid = vmid;
>> ghvm->rm = rm;
>> + mutex_init(&ghvm->mm_lock);
>> + INIT_LIST_HEAD(&ghvm->memory_mappings);
>> INIT_WORK(&ghvm->free_work, gh_vm_free);
>> return ghvm;
>> }
>> +static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned
>> long arg)
>> +{
>> + struct gh_vm *ghvm = filp->private_data;
>> + void __user *argp = (void __user *)arg;
>> + long r;
>> +
>> + switch (cmd) {
>> + case GH_VM_SET_USER_MEM_REGION: {
>> + struct gh_userspace_memory_region region;
>> +
>> + if (copy_from_user(®ion, argp, sizeof(region)))
>> + return -EFAULT;
>> +
>> + /* All other flag bits are reserved for future use */
>> + if (region.flags & ~(GH_MEM_ALLOW_READ | GH_MEM_ALLOW_WRITE |
>> GH_MEM_ALLOW_EXEC |
>> + GH_MEM_LENT))
>> + return -EINVAL;
>> +
>> +
>> + if (region.memory_size)
>
> Would there be any value in allowing a zero-size memory
> region to be created? Maybe that doesn't make sense, but
> I guess i'm questioning whether a zero memory region size
> have special meaning in this interface is a good thing to
> do. You could sensibly have a separate REMOVE_USER_MEM_REGION
> request, and still permit 0 to be a valid size.
>
I don't think zero-size memory region makes sense. At best, it only
registers an empty region with guest and causes memory overhead for
bookkeeping.
>> + r = gh_vm_mem_alloc(ghvm, ®ion);
>> + else
>> + r = gh_vm_mem_free(ghvm, region.label);
>> + break;
>> + }
>> + default:
>> + r = -ENOTTY;
>> + break;
>> + }
>> +
>> + return r;
>> +}
>> +
>> static int gh_vm_release(struct inode *inode, struct file *filp)
>> {
>> struct gh_vm *ghvm = filp->private_data;
>> @@ -65,6 +108,7 @@ static int gh_vm_release(struct inode *inode,
>> struct file *filp)
>> }
>> static const struct file_operations gh_vm_fops = {
>> + .unlocked_ioctl = gh_vm_ioctl,
>> .release = gh_vm_release,
>> .compat_ioctl = compat_ptr_ioctl,
>> .llseek = noop_llseek,
>> diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
>> index 76954da706e9..97bc00c34878 100644
>> --- a/drivers/virt/gunyah/vm_mgr.h
>> +++ b/drivers/virt/gunyah/vm_mgr.h
>> @@ -7,16 +7,41 @@
>> #define _GH_PRIV_VM_MGR_H
>> #include <linux/gunyah_rsc_mgr.h>
>> +#include <linux/list.h>
>> +#include <linux/miscdevice.h>
>> +#include <linux/mutex.h>
>> #include <uapi/linux/gunyah.h>
>> long gh_dev_vm_mgr_ioctl(struct gh_rm *rm, unsigned int cmd,
>> unsigned long arg);
>> +enum gh_vm_mem_share_type {
>> + VM_MEM_SHARE,
>> + VM_MEM_LEND,
>
> Are there any other share types anticipated? Even if
> there were, for now you could use a Boolean to distinguish
> between shared or lent (at least until a third option
> materializes).
>
There is VM_MEM_DONATE. I can add the type, but it's only used special
VMs (there's nothing really stopping a generic unauth VM to use it, but
I don't think anyone will want to).
>> +};
>> +
>> +struct gh_vm_mem {
>> + struct list_head list;
>> + enum gh_vm_mem_share_type share_type;
>> + struct gh_rm_mem_parcel parcel;
>> +
>> + __u64 guest_phys_addr;
>> + struct page **pages;
>> + unsigned long npages;
>> +};
>> +
>> struct gh_vm {
>> u16 vmid;
>> struct gh_rm *rm;
>> struct work_struct free_work;
>> + struct mutex mm_lock;
>> + struct list_head memory_mappings;
>> };
>> +int gh_vm_mem_alloc(struct gh_vm *ghvm, struct
>> gh_userspace_memory_region *region);
>> +void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping);
>> +int gh_vm_mem_free(struct gh_vm *ghvm, u32 label);
>> +struct gh_vm_mem *gh_vm_mem_find(struct gh_vm *ghvm, u32 label);
>> +
>> #endif
>> diff --git a/drivers/virt/gunyah/vm_mgr_mm.c
>> b/drivers/virt/gunyah/vm_mgr_mm.c
>> new file mode 100644
>> index 000000000000..03e71a36ea3b
>> --- /dev/null
>> +++ b/drivers/virt/gunyah/vm_mgr_mm.c
>> @@ -0,0 +1,235 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All
>> rights reserved.
>> + */
>> +
>> +#define pr_fmt(fmt) "gh_vm_mgr: " fmt
>> +
>> +#include <linux/gunyah_rsc_mgr.h>
>> +#include <linux/mm.h>
>> +
>> +#include <uapi/linux/gunyah.h>
>> +
>> +#include "vm_mgr.h"
>> +
>> +static inline bool page_contiguous(phys_addr_t p, phys_addr_t t)
>
> Is there not some existing function that captures this?
> In any case, it's used in one place and I think it would
> be clearer to just put the logic there rather than hiding
> it behind this function.
>
Done.
>> +{
>> + return t - p == PAGE_SIZE;
>> +}
>> +
>> +static struct gh_vm_mem *__gh_vm_mem_find(struct gh_vm *ghvm, u32 label)
>> + __must_hold(&ghvm->mm_lock)
>> +{
>> + struct gh_vm_mem *mapping;
>> +
>> + list_for_each_entry(mapping, &ghvm->memory_mappings, list)
>> + if (mapping->parcel.label == label)
>> + return mapping;
>> +
>> + return NULL;
>> +}
>> +
>
> . . .
>
>> diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
>> index 10ba32d2b0a6..d85d12119a48 100644
>> --- a/include/uapi/linux/gunyah.h
>> +++ b/include/uapi/linux/gunyah.h
>> @@ -20,4 +20,37 @@
>> */
>> #define GH_CREATE_VM _IO(GH_IOCTL_TYPE, 0x0) /* Returns a
>> Gunyah VM fd */
>> +/*
>> + * ioctls for VM fds
>> + */
>> +
>> +/**
>> + * struct gh_userspace_memory_region - Userspace memory descripion
>> for GH_VM_SET_USER_MEM_REGION
>> + * @label: Unique identifer to the region.
>
> Maybe this is described somewhere, but what is the purpose
> of the label? Who uses it? Is it meant to be a value
> only the current owner of a resource understands? Or does
> resource manager use it internally, or what?
>
The label is used by kernel, userspace, and Gunyah. Userspace decides
all the labels and there are no special labels.
- Userspace can delete memory parcels by label (kernel looks up parcel
by label)
- The VM's DTB configuration describes where Gunyah should map memory
parcels into guest's memory. The VM DTB uses the memory parcel's label
as the reference.
Thanks,
Elliot
>> + * @flags: Flags for memory parcel behavior
>> + * @guest_phys_addr: Location of the memory region in guest's memory
>> space (page-aligned)
>> + * @memory_size: Size of the region (page-aligned)
>> + * @userspace_addr: Location of the memory region in caller
>> (userspace)'s memory
>> + *
>> + * See Documentation/virt/gunyah/vm-manager.rst for further details.
>> + */
>> +struct gh_userspace_memory_region {
>> + __u32 label;
>
> Define the possible permission values separate from
> the structure.
>
> -Alex
>
>> +#define GH_MEM_ALLOW_READ (1UL << 0)
>> +#define GH_MEM_ALLOW_WRITE (1UL << 1)
>> +#define GH_MEM_ALLOW_EXEC (1UL << 2)
>> +/*
>> + * The guest will be lent the memory instead of shared.
>> + * In other words, the guest has exclusive access to the memory
>> region and the host loses access.
>> + */
>> +#define GH_MEM_LENT (1UL << 3)
>> + __u32 flags;
>> + __u64 guest_phys_addr;
>> + __u64 memory_size;
>> + __u64 userspace_addr;
>> +};
>> +
>> +#define GH_VM_SET_USER_MEM_REGION _IOW(GH_IOCTL_TYPE, 0x1, \
>> + struct gh_userspace_memory_region)
>> +
>> #endif
>
More information about the linux-arm-kernel
mailing list