[PATCH v1 23/24] kvm: arm64: Trap host SMCs in protected mode.

Marc Zyngier maz at kernel.org
Tue Nov 10 04:03:15 EST 2020


On 2020-11-09 11:32, David Brazdil wrote:
> While protected nVHE KVM is installed, start trapping all host SMCs.
> By default, these are simply forwarded to EL3, but PSCI SMCs are
> validated first.
> 
> Create new constant HCR_HOST_NVHE_PROTECTED_FLAGS with the new set of 
> HCR
> flags to use while the nVHE vector is installed when the kernel was
> booted with the protected flag enabled. Switch back to the default HCR
> flags when switching back to the stub vector.
> 
> Signed-off-by: David Brazdil <dbrazdil at google.com>
> ---
>  arch/arm64/include/asm/kvm_arm.h   |  1 +
>  arch/arm64/kernel/image-vars.h     |  4 ++++
>  arch/arm64/kvm/arm.c               | 35 ++++++++++++++++++++++++++++++
>  arch/arm64/kvm/hyp/nvhe/hyp-init.S |  8 +++++++
>  arch/arm64/kvm/hyp/nvhe/switch.c   |  5 ++++-
>  5 files changed, 52 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_arm.h 
> b/arch/arm64/include/asm/kvm_arm.h
> index 64ce29378467..4e90c2debf70 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -80,6 +80,7 @@
>  			 HCR_FMO | HCR_IMO | HCR_PTW )
>  #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
>  #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
> +#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
>  #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
> 
>  /* TCR_EL2 Registers bits */
> diff --git a/arch/arm64/kernel/image-vars.h 
> b/arch/arm64/kernel/image-vars.h
> index 78a42a7cdb72..75cda51674f4 100644
> --- a/arch/arm64/kernel/image-vars.h
> +++ b/arch/arm64/kernel/image-vars.h
> @@ -62,9 +62,13 @@ __efistub__ctype		= _ctype;
>   */
> 
>  /* Alternative callbacks for init-time patching of nVHE hyp code. */
> +KVM_NVHE_ALIAS(kvm_patch_hcr_flags);
>  KVM_NVHE_ALIAS(kvm_patch_vector_branch);
>  KVM_NVHE_ALIAS(kvm_update_va_mask);
> 
> +/* Static key enabled when the user opted into nVHE protected mode. */
> +KVM_NVHE_ALIAS(kvm_protected_mode);
> +
>  /* Global kernel state accessed by nVHE hyp code. */
>  KVM_NVHE_ALIAS(kvm_vgic_global_state);
> 
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 574aa2d026e6..c09b95cfa00a 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1861,6 +1861,41 @@ void kvm_arch_exit(void)
>  	kvm_perf_teardown();
>  }
> 
> +static inline u32 __init __gen_mov_hcr_insn(u64 hcr, u32 rd, int i)
> +{
> +	int shift = 48 - (i * 16);
> +	u16 imm = (hcr >> shift) & GENMASK(16, 0);

I really doubt you want to encode 17 bits.

> +
> +	return aarch64_insn_gen_movewide(rd, imm, shift,
> +					 AARCH64_INSN_VARIANT_64BIT,
> +					 (i == 0) ? AARCH64_INSN_MOVEWIDE_ZERO
> +						  : AARCH64_INSN_MOVEWIDE_KEEP);
> +}

I've added a generate_mov_q() helper as part of my host EL2 entry 
rework.
We can probably share some stuff here.

> +
> +void __init kvm_patch_hcr_flags(struct alt_instr *alt,
> +				__le32 *origptr, __le32 *updptr, int nr_inst)
> +{
> +	int i;
> +	u32 rd;
> +
> +	BUG_ON(nr_inst != 4);
> +
> +	/* Skip for VHE and unprotected nVHE modes. */
> +	if (!is_kvm_protected_mode())
> +		return;
> +
> +	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
> +					  le32_to_cpu(origptr[0]));
> +
> +	for (i = 0; i < nr_inst; i++) {
> +		u32 oinsn = __gen_mov_hcr_insn(HCR_HOST_NVHE_FLAGS, rd, i);
> +		u32 insn = __gen_mov_hcr_insn(HCR_HOST_NVHE_PROTECTED_FLAGS, rd, i);
> +
> +		BUG_ON(oinsn != le32_to_cpu(origptr[i]));
> +		updptr[i] = cpu_to_le32(insn);
> +	}
> +}
> +
>  static int __init early_kvm_protected_cfg(char *buf)
>  {
>  	bool val;
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> index f999a35b2c8c..bbe6c5f558e0 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> @@ -88,6 +88,12 @@ SYM_CODE_END(__kvm_hyp_init)
>   * x0: struct kvm_nvhe_init_params PA
>   */
>  SYM_CODE_START(___kvm_hyp_init)
> +alternative_cb kvm_patch_hcr_flags
> +	mov_q	x1, HCR_HOST_NVHE_FLAGS

You really want to be careful here: the mov_q macro expands to 2, 3 or 4
instructions, depending on the input data...

It is also odd that you have both a static key and a patching 
alternative.
Why isn't "protected KVM" a capability that can be evaluated as a a non
patching alternative? In general, I'd like to reserve patching 
alternatives
to values that cannot be evaluated at compile time (VM offsets, for 
example).

> +alternative_cb_end
> +	msr	hcr_el2, x1
> +	isb
> +
>  	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
>  	msr	tpidr_el2, x1
> 
> @@ -220,6 +226,8 @@ reset:
>  	bic	x5, x5, x6		// Clear SCTL_M and etc
>  	pre_disable_mmu_workaround
>  	msr	sctlr_el2, x5
> +	mov_q	x5, HCR_HOST_NVHE_FLAGS
> +	msr	hcr_el2, x5
>  	isb
> 
>  	/* Install stub vectors */
> diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c 
> b/arch/arm64/kvm/hyp/nvhe/switch.c
> index 8ae8160bc93a..f605b25a9afc 100644
> --- a/arch/arm64/kvm/hyp/nvhe/switch.c
> +++ b/arch/arm64/kvm/hyp/nvhe/switch.c
> @@ -96,7 +96,10 @@ static void __deactivate_traps(struct kvm_vcpu 
> *vcpu)
>  	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
> 
>  	write_sysreg(mdcr_el2, mdcr_el2);
> -	write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
> +	if (is_kvm_protected_mode())
> +		write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2);
> +	else
> +		write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
>  	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
>  	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
>  }

Thanks,

         M.
-- 
Jazz is not dead. It just smells funny...



More information about the linux-arm-kernel mailing list