[RFC PATCH v5 19/38] KVM: arm64: Do not run a VCPU on a CPU without SPE

Alexandru Elisei alexandru.elisei at arm.com
Mon Jan 10 03:40:26 PST 2022


Hello,

This patch will be dropped in the next iteration, and instead I'll
implement the same approach that PMU emulation emulation uses, which is
currently being worked on [1].

Prospective reviewers can safely ignore this patch.

[1] https://lore.kernel.org/linux-arm-kernel/20211213152309.158462-1-alexandru.elisei@arm.com

Thanks,
Alex

On Wed, Nov 17, 2021 at 03:38:23PM +0000, Alexandru Elisei wrote:
> The kernel allows heterogeneous systems where FEAT_SPE is not present on
> all CPUs. This presents a challenge for KVM, as it will have to touch the
> SPE registers when emulating SPE for a guest, and those accesses will cause
> an undefined exception if SPE is not present on the CPU.
> 
> Avoid this situation by keeping a cpumask of CPUs that the VCPU is
> allowed run on, which for SPE is the reunion of all CPUs that support
> SPE, and refuse to run the VCPU on a CPU which is not part of the
> cpumask.
> 
> Signed-off-by: Alexandru Elisei <alexandru.elisei at arm.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |  3 +++
>  arch/arm64/kvm/arm.c              | 15 +++++++++++++++
>  arch/arm64/kvm/spe.c              |  2 ++
>  3 files changed, 20 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 8b3faed48914..96ce98f6135d 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -405,6 +405,9 @@ struct kvm_vcpu_arch {
>  		u64 last_steal;
>  		gpa_t base;
>  	} steal;
> +
> +	cpumask_var_t supported_cpus;
> +	bool cpu_not_supported;
>  };
>  
>  /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index b2997b919be2..8a7c01d1df58 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -351,6 +351,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>  
>  	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
>  
> +	if (!zalloc_cpumask_var(&vcpu->arch.supported_cpus, GFP_KERNEL))
> +		return -ENOMEM;
> +
>  	/* Set up the timer */
>  	kvm_timer_vcpu_init(vcpu);
>  
> @@ -378,6 +381,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
>  	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
>  		static_branch_dec(&userspace_irqchip_in_use);
>  
> +	free_cpumask_var(vcpu->arch.supported_cpus);
>  	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
>  	kvm_timer_vcpu_terminate(vcpu);
>  	kvm_pmu_vcpu_destroy(vcpu);
> @@ -456,6 +460,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  	if (vcpu_has_ptrauth(vcpu))
>  		vcpu_ptrauth_disable(vcpu);
>  	kvm_arch_vcpu_load_debug_state_flags(vcpu);
> +
> +	if (!cpumask_empty(vcpu->arch.supported_cpus) &&
> +	    !cpumask_test_cpu(smp_processor_id(), vcpu->arch.supported_cpus))
> +		vcpu->arch.cpu_not_supported = true;
>  }
>  
>  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
> @@ -893,6 +901,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
>  		 */
>  		preempt_disable();
>  
> +		if (unlikely(vcpu->arch.cpu_not_supported)) {
> +			vcpu->arch.cpu_not_supported = false;
> +			ret = -ENOEXEC;
> +			preempt_enable();
> +			continue;
> +		}
> +
>  		kvm_pmu_flush_hwstate(vcpu);
>  
>  		local_irq_disable();
> diff --git a/arch/arm64/kvm/spe.c b/arch/arm64/kvm/spe.c
> index 7c6f94358cc1..f3863728bab6 100644
> --- a/arch/arm64/kvm/spe.c
> +++ b/arch/arm64/kvm/spe.c
> @@ -40,5 +40,7 @@ int kvm_spe_vcpu_enable_spe(struct kvm_vcpu *vcpu)
>  	if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT))
>  		return -EINVAL;
>  
> +	cpumask_copy(vcpu->arch.supported_cpus, &supported_cpus);
> +
>  	return 0;
>  }
> -- 
> 2.33.1
> 
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



More information about the linux-arm-kernel mailing list