[PATCH v5 29/69] KVM: arm64: nv: Respect the virtual HCR_EL2.NV bit setting
Ganapatrao Kulkarni
gankulkarni at os.amperecomputing.com
Sun Dec 19 23:11:03 PST 2021
Hi Marc,
On 30-11-2021 01:31 am, Marc Zyngier wrote:
> From: Jintack Lim <jintack.lim at linaro.org>
>
> Forward traps due to HCR_EL2.NV bit to the virtual EL2 if they are not
> coming from the virtual EL2 and the virtual HCR_EL2.NV bit is set.
>
> In addition to EL2 register accesses, setting NV bit will also make EL12
> register accesses trap to EL2. To emulate this for the virtual EL2,
> forword traps due to EL12 register accessses to the virtual EL2 if the
> virtual HCR_EL2.NV bit is set.
>
> This is for recursive nested virtualization.
What is recursive nested virtualization means?
Are we going to set NV/NV1/NV2 bits of ID_AA64MMFR2_EL1 of
Guest-Hypervisor to support NV in Guest-Hypervisor?
>
> Signed-off-by: Jintack Lim <jintack.lim at linaro.org>
> [Moved code to emulate-nested.c]
> Signed-off-by: Christoffer Dall <christoffer.dall at arm.com>
> Signed-off-by: Marc Zyngier <maz at kernel.org>
> ---
> arch/arm64/include/asm/kvm_arm.h | 1 +
> arch/arm64/include/asm/kvm_nested.h | 2 ++
> arch/arm64/kvm/emulate-nested.c | 27 +++++++++++++++++++++++++++
> arch/arm64/kvm/handle_exit.c | 7 +++++++
> arch/arm64/kvm/sys_regs.c | 24 ++++++++++++++++++++++++
> 5 files changed, 61 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> index 2eccf883e8fe..9759bc893a51 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -20,6 +20,7 @@
> #define HCR_AMVOFFEN (UL(1) << 51)
> #define HCR_FIEN (UL(1) << 47)
> #define HCR_FWB (UL(1) << 46)
> +#define HCR_NV (UL(1) << 42)
> #define HCR_API (UL(1) << 41)
> #define HCR_APK (UL(1) << 40)
> #define HCR_TEA (UL(1) << 37)
> diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
> index 4c2ac9650a3e..26cba7b4d743 100644
> --- a/arch/arm64/include/asm/kvm_nested.h
> +++ b/arch/arm64/include/asm/kvm_nested.h
> @@ -62,5 +62,7 @@ static inline u64 translate_cnthctl_el2_to_cntkctl_el1(u64 cnthctl)
> }
>
> int handle_wfx_nested(struct kvm_vcpu *vcpu, bool is_wfe);
> +extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
> +extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
>
> #endif /* __ARM64_KVM_NESTED_H */
> diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
> index 339e8272b01e..8c7f2fe24bc6 100644
> --- a/arch/arm64/kvm/emulate-nested.c
> +++ b/arch/arm64/kvm/emulate-nested.c
> @@ -25,11 +25,38 @@
>
> #include "trace.h"
>
> +bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
> +{
> + bool control_bit_set;
> +
> + if (!nested_virt_in_use(vcpu))
> + return false;
> +
> + control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
> + if (!vcpu_mode_el2(vcpu) && control_bit_set) {
> + kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
> + return true;
> + }
> + return false;
> +}
> +
> +bool forward_nv_traps(struct kvm_vcpu *vcpu)
> +{
> + return forward_traps(vcpu, HCR_NV);
> +}
> +
> void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
> {
> u64 spsr, elr, mode;
> bool direct_eret;
>
> + /*
> + * Forward this trap to the virtual EL2 if the virtual
> + * HCR_EL2.NV bit is set and this is coming from !EL2.
> + */
> + if (forward_nv_traps(vcpu))
> + return;
> +
> /*
> * Going through the whole put/load motions is a waste of time
> * if this is a VHE guest hypervisor returning to its own
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 7721c7c36137..6ff709c124d0 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -64,6 +64,13 @@ static int handle_smc(struct kvm_vcpu *vcpu)
> {
> int ret;
>
> + /*
> + * Forward this trapped smc instruction to the virtual EL2 if
> + * the guest has asked for it.
> + */
> + if (forward_traps(vcpu, HCR_TSC))
> + return 1;
> +
> /*
> * "If an SMC instruction executed at Non-secure EL1 is
> * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 6490b0e3dcaf..3468b8df8661 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -267,10 +267,19 @@ static u32 get_ccsidr(u32 csselr)
> return ccsidr;
> }
>
> +static bool el12_reg(struct sys_reg_params *p)
> +{
> + /* All *_EL12 registers have Op1=5. */
> + return (p->Op1 == 5);
> +}
> +
> static bool access_rw(struct kvm_vcpu *vcpu,
> struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> + if (el12_reg(p) && forward_nv_traps(vcpu))
> + return false;
> +
> if (p->is_write)
> vcpu_write_sys_reg(vcpu, p->regval, r->reg);
> else
> @@ -283,6 +292,9 @@ static bool access_sctlr_el2(struct kvm_vcpu *vcpu,
> struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> + if (el12_reg(p) && forward_nv_traps(vcpu))
> + return false;
> +
> if (p->is_write) {
> u64 val = p->regval;
>
> @@ -367,6 +379,9 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
> bool was_enabled = vcpu_has_cache_enabled(vcpu);
> u64 val, mask, shift;
>
> + if (el12_reg(p) && forward_nv_traps(vcpu))
> + return false;
> +
> /* We don't expect TRVM on the host */
> BUG_ON(!vcpu_mode_el2(vcpu) && !p->is_write);
>
> @@ -1664,6 +1679,9 @@ static bool access_elr(struct kvm_vcpu *vcpu,
> struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> + if (el12_reg(p) && forward_nv_traps(vcpu))
> + return false;
> +
> if (p->is_write)
> vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
> else
> @@ -1676,6 +1694,9 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
> struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> + if (el12_reg(p) && forward_nv_traps(vcpu))
> + return false;
> +
> if (p->is_write)
> __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
> else
> @@ -1688,6 +1709,9 @@ static bool access_spsr_el2(struct kvm_vcpu *vcpu,
> struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> + if (el12_reg(p) && forward_nv_traps(vcpu))
> + return false;
> +
> if (p->is_write)
> vcpu_write_sys_reg(vcpu, p->regval, SPSR_EL2);
> else
Thanks,
Ganapat
More information about the linux-arm-kernel
mailing list