[PATCH v5 5/7] KVM: arm64: Always set HCR_TID2

Reiji Watanabe reijiw at google.com
Thu Jan 5 20:29:28 PST 2023


On Fri, Dec 30, 2022 at 1:55 AM Akihiko Odaki <akihiko.odaki at daynix.com> wrote:
>
> Always set HCR_TID2 to trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and
> CSSELR_EL1. This saves a few lines of code and allows to employ their
> access trap handlers for more purposes anticipated by the old
> condition for setting HCR_TID2.
>
> Suggested-by: Marc Zyngier <maz at kernel.org>
> Signed-off-by: Akihiko Odaki <akihiko.odaki at daynix.com>
> ---
>  arch/arm64/include/asm/kvm_arm.h           | 3 ++-
>  arch/arm64/include/asm/kvm_emulate.h       | 4 ----
>  arch/arm64/include/asm/kvm_host.h          | 2 --
>  arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 2 --
>  4 files changed, 2 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> index 8aa8492dafc0..44be46c280c1 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -81,11 +81,12 @@
>   * SWIO:       Turn set/way invalidates into set/way clean+invalidate
>   * PTW:                Take a stage2 fault if a stage1 walk steps in device memory
>   * TID3:       Trap EL1 reads of group 3 ID registers
> + * TID2:       Trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and CSSELR_EL1
>   */
>  #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
>                          HCR_BSU_IS | HCR_FB | HCR_TACR | \
>                          HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
> -                        HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 )
> +                        HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID2)
>  #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
>  #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
>  #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index 9bdba47f7e14..30c4598d643b 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -88,10 +88,6 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
>         if (vcpu_el1_is_32bit(vcpu))
>                 vcpu->arch.hcr_el2 &= ~HCR_RW;
>
> -       if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
> -           vcpu_el1_is_32bit(vcpu))
> -               vcpu->arch.hcr_el2 |= HCR_TID2;
> -
>         if (kvm_has_mte(vcpu->kvm))
>                 vcpu->arch.hcr_el2 |= HCR_ATA;
>  }
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 45e2136322ba..cc2ede0eaed4 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -621,7 +621,6 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
>                 return false;
>
>         switch (reg) {
> -       case CSSELR_EL1:        *val = read_sysreg_s(SYS_CSSELR_EL1);   break;
>         case SCTLR_EL1:         *val = read_sysreg_s(SYS_SCTLR_EL12);   break;
>         case CPACR_EL1:         *val = read_sysreg_s(SYS_CPACR_EL12);   break;
>         case TTBR0_EL1:         *val = read_sysreg_s(SYS_TTBR0_EL12);   break;
> @@ -666,7 +665,6 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
>                 return false;
>
>         switch (reg) {
> -       case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    break;
>         case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    break;
>         case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    break;
>         case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    break;
> diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
> index baa5b9b3dde5..147cb4c846c6 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
> @@ -39,7 +39,6 @@ static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
>
>  static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
>  {
> -       ctxt_sys_reg(ctxt, CSSELR_EL1)  = read_sysreg(csselr_el1);
>         ctxt_sys_reg(ctxt, SCTLR_EL1)   = read_sysreg_el1(SYS_SCTLR);
>         ctxt_sys_reg(ctxt, CPACR_EL1)   = read_sysreg_el1(SYS_CPACR);
>         ctxt_sys_reg(ctxt, TTBR0_EL1)   = read_sysreg_el1(SYS_TTBR0);
> @@ -95,7 +94,6 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
>  static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
>  {
>         write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1),     vmpidr_el2);
> -       write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1),    csselr_el1);
>
>         if (has_vhe() ||
>             !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
> --

Reviewed-by: Reiji Watanabe <reijiw at google.com>

Nit: access_csselr() can now directly use __vcpu_sys_reg() (instead
of using it through via vcpu_{write,read}_sys_reg()), as most codes
do when there is no need to use vcpu_{write,read}_sys_reg().
The same comment is applied to access_ccsidr(), which uses
vcpu_read_sys_reg() to get CSSELR_EL1 value for the vCPU.

Thank you,
Reiji



More information about the linux-arm-kernel mailing list