[PATCH v2 4/4] KVM: arm64: Use config_lock to protect vgic state

Marc Zyngier maz at kernel.org
Wed Mar 22 05:02:15 PDT 2023


On Thu, 16 Mar 2023 21:14:12 +0000,
Oliver Upton <oliver.upton at linux.dev> wrote:
> 
> Almost all of the vgic state is VM-scoped but accessed from the context
> of a vCPU. These accesses were serialized on the kvm->lock which cannot
> be nested within a vcpu->mutex critical section.
> 
> Move over the vgic state to using the config_lock. Tweak the lock
> ordering where necessary to ensure that the config_lock is acquired
> after the vcpu->mutex. Acquire the config_lock in kvm_vgic_create() to
> avoid a race between the converted flows and GIC creation.
> 
> Signed-off-by: Oliver Upton <oliver.upton at linux.dev>
> ---
>  arch/arm64/kvm/vgic/vgic-debug.c      |  8 ++--
>  arch/arm64/kvm/vgic/vgic-init.c       | 33 ++++++++++-------
>  arch/arm64/kvm/vgic/vgic-its.c        | 29 ++++++---------
>  arch/arm64/kvm/vgic/vgic-kvm-device.c | 53 ++++++++++++---------------
>  arch/arm64/kvm/vgic/vgic-mmio-v3.c    |  4 +-
>  arch/arm64/kvm/vgic/vgic-mmio.c       | 12 +++---
>  arch/arm64/kvm/vgic/vgic-v4.c         | 11 +++---
>  arch/arm64/kvm/vgic/vgic.c            |  2 +-
>  8 files changed, 75 insertions(+), 77 deletions(-)
> 
> diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
> index 78cde687383c..07aa0437125a 100644
> --- a/arch/arm64/kvm/vgic/vgic-debug.c
> +++ b/arch/arm64/kvm/vgic/vgic-debug.c
> @@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
>  	struct kvm *kvm = s->private;
>  	struct vgic_state_iter *iter;
>  
> -	mutex_lock(&kvm->lock);
> +	mutex_lock(&kvm->arch.config_lock);
>  	iter = kvm->arch.vgic.iter;
>  	if (iter) {
>  		iter = ERR_PTR(-EBUSY);
> @@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
>  	if (end_of_vgic(iter))
>  		iter = NULL;
>  out:
> -	mutex_unlock(&kvm->lock);
> +	mutex_unlock(&kvm->arch.config_lock);
>  	return iter;
>  }
>  
> @@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
>  	if (IS_ERR(v))
>  		return;
>  
> -	mutex_lock(&kvm->lock);
> +	mutex_lock(&kvm->arch.config_lock);
>  	iter = kvm->arch.vgic.iter;
>  	kfree(iter->lpi_array);
>  	kfree(iter);
>  	kvm->arch.vgic.iter = NULL;
> -	mutex_unlock(&kvm->lock);
> +	mutex_unlock(&kvm->arch.config_lock);
>  }
>  
>  static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
> diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
> index cd134db41a57..b1690063e17d 100644
> --- a/arch/arm64/kvm/vgic/vgic-init.c
> +++ b/arch/arm64/kvm/vgic/vgic-init.c
> @@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
>  	unsigned long i;
>  	int ret;
>  
> -	if (irqchip_in_kernel(kvm))
> -		return -EEXIST;
> -
>  	/*
>  	 * This function is also called by the KVM_CREATE_IRQCHIP handler,
>  	 * which had no chance yet to check the availability of the GICv2
> @@ -91,6 +88,13 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
>  	if (!lock_all_vcpus(kvm))
>  		return ret;
>  
> +	mutex_lock(&kvm->arch.config_lock);
> +
> +	if (irqchip_in_kernel(kvm)) {
> +		ret = -EEXIST;
> +		goto out_unlock;
> +	}
> +
>  	kvm_for_each_vcpu(i, vcpu, kvm) {
>  		if (vcpu_has_run_once(vcpu))
>  			goto out_unlock;
> @@ -118,6 +122,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
>  		INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
>  
>  out_unlock:
> +	mutex_unlock(&kvm->arch.config_lock);
>  	unlock_all_vcpus(kvm);
>  	return ret;
>  }
> @@ -227,9 +232,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>  	 * KVM io device for the redistributor that belongs to this VCPU.
>  	 */
>  	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
> -		mutex_lock(&vcpu->kvm->lock);
> +		mutex_lock(&vcpu->kvm->arch.config_lock);
>  		ret = vgic_register_redist_iodev(vcpu);
> -		mutex_unlock(&vcpu->kvm->lock);
> +		mutex_unlock(&vcpu->kvm->arch.config_lock);
>  	}
>  	return ret;
>  }
> @@ -250,7 +255,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
>   * The function is generally called when nr_spis has been explicitly set
>   * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
>   * vgic_initialized() returns true when this function has succeeded.
> - * Must be called with kvm->lock held!
>   */
>  int vgic_init(struct kvm *kvm)
>  {
> @@ -259,6 +263,8 @@ int vgic_init(struct kvm *kvm)
>  	int ret = 0, i;
>  	unsigned long idx;
>  
> +	lockdep_assert_held(&kvm->arch.config_lock);
> +
>  	if (vgic_initialized(kvm))
>  		return 0;
>  
> @@ -373,12 +379,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
>  	vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
>  }
>  
> -/* To be called with kvm->lock held */
>  static void __kvm_vgic_destroy(struct kvm *kvm)
>  {
>  	struct kvm_vcpu *vcpu;
>  	unsigned long i;
>  
> +	lockdep_assert_held(&kvm->arch.config_lock);
> +
>  	vgic_debug_destroy(kvm);
>  
>  	kvm_for_each_vcpu(i, vcpu, kvm)
> @@ -389,9 +396,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
>  
>  void kvm_vgic_destroy(struct kvm *kvm)
>  {
> -	mutex_lock(&kvm->lock);
> +	mutex_lock(&kvm->arch.config_lock);
>  	__kvm_vgic_destroy(kvm);
> -	mutex_unlock(&kvm->lock);
> +	mutex_unlock(&kvm->arch.config_lock);
>  }
>  
>  /**
> @@ -414,9 +421,9 @@ int vgic_lazy_init(struct kvm *kvm)
>  		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
>  			return -EBUSY;
>  
> -		mutex_lock(&kvm->lock);
> +		mutex_lock(&kvm->arch.config_lock);
>  		ret = vgic_init(kvm);
> -		mutex_unlock(&kvm->lock);
> +		mutex_unlock(&kvm->arch.config_lock);
>  	}
>  
>  	return ret;
> @@ -441,7 +448,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
>  	if (likely(vgic_ready(kvm)))
>  		return 0;
>  
> -	mutex_lock(&kvm->lock);
> +	mutex_lock(&kvm->arch.config_lock);
>  	if (vgic_ready(kvm))
>  		goto out;
>  
> @@ -459,7 +466,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
>  		dist->ready = true;
>  
>  out:
> -	mutex_unlock(&kvm->lock);
> +	mutex_unlock(&kvm->arch.config_lock);
>  	return ret;
>  }
>  
> diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
> index 2642e9ce2819..ca55065102e7 100644
> --- a/arch/arm64/kvm/vgic/vgic-its.c
> +++ b/arch/arm64/kvm/vgic/vgic-its.c
> @@ -2043,7 +2043,10 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
>  	if (offset & align)
>  		return -EINVAL;
>  
> -	mutex_lock(&dev->kvm->lock);
> +	if (!lock_all_vcpus(dev->kvm))
> +		return -EBUSY;
> +
> +	mutex_lock(&dev->kvm->arch.config_lock);

Huh, that's fishy. The whole "lock the VM and the lock the individual
vcpus" is there to prevent a concurrent creation of a vcpu while we're
doing stuff that affects them all. Allowing a new vcpu to come online
while this sequence is happening is ... unexpected.

Why do we need to drop this initial lock? I'd expect them to be
completely cumulative.

Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.



More information about the linux-arm-kernel mailing list