[PATCH 1/6] kvm: arm64: Prevent use of invalid PSCI v0.1 function IDs

Marc Zyngier maz at kernel.org
Tue Dec 8 10:56:39 EST 2020


On 2020-12-08 14:24, David Brazdil wrote:
> PSCI driver exposes a struct containing the PSCI v0.1 function IDs
> configured in the DT. However, the struct does not convey the
> information whether these were set from DT or contain the default value
> zero. This could be a problem for PSCI proxy in KVM protected mode.
> 
> Extend config passed to KVM with a bit mask with individual bits set
> depending on whether the corresponding function pointer in psci_ops is
> set, eg. set bit for PSCI_CPU_SUSPEND if psci_ops.cpu_suspend != NULL.
> 
> Previously config was split into multiple global variables. Put
> everything into a single struct for convenience.
> 
> Reported-by: Mark Rutland <mark.rutland at arm.com>
> Signed-off-by: David Brazdil <dbrazdil at google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h    | 20 +++++++++++
>  arch/arm64/kvm/arm.c                 | 14 +++++---
>  arch/arm64/kvm/hyp/nvhe/psci-relay.c | 53 +++++++++++++++++++++-------
>  3 files changed, 70 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h
> b/arch/arm64/include/asm/kvm_host.h
> index 11beda85ee7e..828d50d40dc2 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -17,6 +17,7 @@
>  #include <linux/jump_label.h>
>  #include <linux/kvm_types.h>
>  #include <linux/percpu.h>
> +#include <linux/psci.h>
>  #include <asm/arch_gicv3.h>
>  #include <asm/barrier.h>
>  #include <asm/cpufeature.h>
> @@ -240,6 +241,25 @@ struct kvm_host_data {
>  	struct kvm_pmu_events pmu_events;
>  };
> 
> +#define KVM_HOST_PSCI_0_1_CPU_SUSPEND	BIT(0)
> +#define KVM_HOST_PSCI_0_1_CPU_ON	BIT(1)
> +#define KVM_HOST_PSCI_0_1_CPU_OFF	BIT(2)
> +#define KVM_HOST_PSCI_0_1_MIGRATE	BIT(3)
> +
> +struct kvm_host_psci_config {
> +	/* PSCI version used by host. */
> +	u32 version;
> +
> +	/* Function IDs used by host if version is v0.1. */
> +	struct psci_0_1_function_ids function_ids_0_1;
> +
> +	/* Bitmask of functions enabled for v0.1, bits KVM_HOST_PSCI_0_1_*. 
> */
> +	unsigned int enabled_functions_0_1;

Nit: the conventional type for bitmaps is 'unsigned long'.
Also, "enabled" seems odd. Isn't it actually "available"?

> +};
> +
> +extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
> +#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
> +
>  struct vcpu_reset_state {
>  	unsigned long	pc;
>  	unsigned long	r0;
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 6e637d2b4cfb..6a2f4e01b04f 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -66,8 +66,6 @@ static DEFINE_PER_CPU(unsigned char,
> kvm_arm_hardware_enabled);
>  DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
> 
>  extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
> -extern u32 kvm_nvhe_sym(kvm_host_psci_version);
> -extern struct psci_0_1_function_ids
> kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
> 
>  int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
>  {
> @@ -1618,8 +1616,16 @@ static bool init_psci_relay(void)
>  		return false;
>  	}
> 
> -	kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
> -	kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = 
> get_psci_0_1_function_ids();
> +	kvm_host_psci_config.version = psci_ops.get_version();
> +
> +	if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
> +		kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
> +		kvm_host_psci_config.enabled_functions_0_1 =
> +			(psci_ops.cpu_suspend ? KVM_HOST_PSCI_0_1_CPU_SUSPEND : 0) |
> +			(psci_ops.cpu_off ? KVM_HOST_PSCI_0_1_CPU_OFF : 0) |
> +			(psci_ops.cpu_on ? KVM_HOST_PSCI_0_1_CPU_ON : 0) |
> +			(psci_ops.migrate ? KVM_HOST_PSCI_0_1_MIGRATE : 0);
> +	}
>  	return true;
>  }
> 
> diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> index 08dc9de69314..0d6f4aa39621 100644
> --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
> @@ -22,9 +22,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
>  void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
> 
>  /* Config options set by the host. */
> -__ro_after_init u32 kvm_host_psci_version;
> -__ro_after_init struct psci_0_1_function_ids 
> kvm_host_psci_0_1_function_ids;
> -__ro_after_init s64 hyp_physvirt_offset;
> +struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
> +s64 __ro_after_init hyp_physvirt_offset;

Unrelated change?

> 
>  #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
> 
> @@ -54,12 +53,41 @@ static u64 get_psci_func_id(struct kvm_cpu_context
> *host_ctxt)
>  	return func_id;
>  }
> 
> +static inline bool is_psci_0_1_function_enabled(unsigned int fn_bit)

Don't bother with "inline" outside of an include file. It really
doesn't mean much (the compiler is free to ignore it), and it is
likely that the compiler will optimise better without guidance
(not to mention this is hardly a fast path anyway).

> +{
> +	return kvm_host_psci_config.enabled_functions_0_1 & fn_bit;
> +}
> +
> +static inline bool is_psci_0_1_cpu_suspend(u64 func_id)
> +{
> +	return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_SUSPEND) &&
> +	       (func_id == 
> kvm_host_psci_config.function_ids_0_1.cpu_suspend);
> +}
> +
> +static inline bool is_psci_0_1_cpu_on(u64 func_id)
> +{
> +	return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_ON) &&
> +	       (func_id == kvm_host_psci_config.function_ids_0_1.cpu_on);
> +}
> +
> +static inline bool is_psci_0_1_cpu_off(u64 func_id)
> +{
> +	return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_CPU_OFF) &&
> +	       (func_id == kvm_host_psci_config.function_ids_0_1.cpu_off);
> +}
> +
> +static inline bool is_psci_0_1_migrate(u64 func_id)
> +{
> +	return is_psci_0_1_function_enabled(KVM_HOST_PSCI_0_1_MIGRATE) &&
> +	       (func_id == kvm_host_psci_config.function_ids_0_1.migrate);
> +}
> +
>  static bool is_psci_0_1_call(u64 func_id)
>  {
> -	return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
> -	       (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
> -	       (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
> -	       (func_id == kvm_host_psci_0_1_function_ids.migrate);
> +	return is_psci_0_1_cpu_suspend(func_id) ||
> +	       is_psci_0_1_cpu_on(func_id) ||
> +	       is_psci_0_1_cpu_off(func_id) ||
> +	       is_psci_0_1_migrate(func_id);
>  }
> 
>  static bool is_psci_0_2_call(u64 func_id)
> @@ -71,7 +99,7 @@ static bool is_psci_0_2_call(u64 func_id)
> 
>  static bool is_psci_call(u64 func_id)
>  {
> -	switch (kvm_host_psci_version) {
> +	switch (kvm_host_psci_config.version) {
>  	case PSCI_VERSION(0, 1):
>  		return is_psci_0_1_call(func_id);
>  	default:
> @@ -248,12 +276,11 @@ asmlinkage void __noreturn
> kvm_host_psci_cpu_entry(bool is_cpu_on)
> 
>  static unsigned long psci_0_1_handler(u64 func_id, struct
> kvm_cpu_context *host_ctxt)
>  {
> -	if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
> -	    (func_id == kvm_host_psci_0_1_function_ids.migrate))
> +	if (is_psci_0_1_cpu_off(func_id) || is_psci_0_1_migrate(func_id))
>  		return psci_forward(host_ctxt);
> -	else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
> +	else if (is_psci_0_1_cpu_on(func_id))
>  		return psci_cpu_on(func_id, host_ctxt);
> -	else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
> +	else if (is_psci_0_1_cpu_suspend(func_id))
>  		return psci_cpu_suspend(func_id, host_ctxt);
>  	else
>  		return PSCI_RET_NOT_SUPPORTED;
> @@ -304,7 +331,7 @@ bool kvm_host_psci_handler(struct kvm_cpu_context
> *host_ctxt)
>  	if (!is_psci_call(func_id))
>  		return false;
> 
> -	switch (kvm_host_psci_version) {
> +	switch (kvm_host_psci_config.version) {
>  	case PSCI_VERSION(0, 1):
>  		ret = psci_0_1_handler(func_id, host_ctxt);
>  		break;

Otherwise looks OK. Don't bother respinning the series for
my comments, I can tidy things up as I apply it if there
are no other issues.

Thanks,

         M.
-- 
Jazz is not dead. It just smells funny...



More information about the linux-arm-kernel mailing list