[PATCH v1 05/12] arm64/sme: Remove _EL0 from name of SVCR - FIXME sysreg.h
Mark Rutland
mark.rutland at arm.com
Fri May 13 07:16:44 PDT 2022
On Tue, May 10, 2022 at 05:12:01PM +0100, Mark Brown wrote:
> The defines for SVCR call it SVCR_EL0 however the architecture calls the
> register SVCR with no _EL0 suffix. In preparation for generating the sysreg
> definitions rename to match the architecture, no functional change.
>
> Signed-off-by: Mark Brown <broonie at kernel.org>
What's the "FIXME sysreg.h" in the title for? Is that an accidental
leftover, or is there something to do there?
Other than that this looks fine to me.
Thanks,
Mark.
> ---
> arch/arm64/include/asm/fpsimd.h | 4 ++--
> arch/arm64/include/asm/processor.h | 2 +-
> arch/arm64/include/asm/sysreg.h | 6 +++---
> arch/arm64/kernel/fpsimd.c | 26 +++++++++++++-------------
> arch/arm64/kernel/ptrace.c | 8 ++++----
> arch/arm64/kernel/signal.c | 14 +++++++-------
> arch/arm64/kernel/syscall.c | 4 ++--
> arch/arm64/kvm/fpsimd.c | 4 ++--
> arch/arm64/kvm/sys_regs.c | 2 +-
> 9 files changed, 35 insertions(+), 35 deletions(-)
>
> diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
> index aa11dbec0d70..9bb1873f5295 100644
> --- a/arch/arm64/include/asm/fpsimd.h
> +++ b/arch/arm64/include/asm/fpsimd.h
> @@ -67,12 +67,12 @@ extern void fpsimd_save_and_flush_cpu_state(void);
>
> static inline bool thread_sm_enabled(struct thread_struct *thread)
> {
> - return system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK);
> + return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
> }
>
> static inline bool thread_za_enabled(struct thread_struct *thread)
> {
> - return system_supports_sme() && (thread->svcr & SVCR_EL0_ZA_MASK);
> + return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
> }
>
> /* Maximum VL that SVE/SME VL-agnostic software can transparently support */
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index 69ce163d2fb2..8de5a4fc06e3 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -192,7 +192,7 @@ static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
>
> static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
> {
> - if (system_supports_sme() && (thread->svcr & SVCR_EL0_SM_MASK))
> + if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
> return thread_get_sme_vl(thread);
> else
> return thread_get_sve_vl(thread);
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 4459cd4a37f5..a2f0759f65b2 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -479,9 +479,9 @@
> #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
> #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
>
> -#define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2)
> -#define SVCR_EL0_ZA_MASK 2
> -#define SVCR_EL0_SM_MASK 1
> +#define SYS_SVCR sys_reg(3, 3, 4, 2, 2)
> +#define SVCR_ZA_MASK 2
> +#define SVCR_SM_MASK 1
>
> #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
> #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
> diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
> index 8b48e870e14e..fbffafb55552 100644
> --- a/arch/arm64/kernel/fpsimd.c
> +++ b/arch/arm64/kernel/fpsimd.c
> @@ -398,7 +398,7 @@ static void task_fpsimd_load(void)
> if (test_thread_flag(TIF_SME))
> sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
>
> - write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
> + write_sysreg_s(current->thread.svcr, SYS_SVCR);
>
> if (thread_za_enabled(¤t->thread))
> za_load_state(current->thread.za_state);
> @@ -450,15 +450,15 @@ static void fpsimd_save(void)
>
> if (system_supports_sme()) {
> u64 *svcr = last->svcr;
> - *svcr = read_sysreg_s(SYS_SVCR_EL0);
> + *svcr = read_sysreg_s(SYS_SVCR);
>
> - *svcr = read_sysreg_s(SYS_SVCR_EL0);
> + *svcr = read_sysreg_s(SYS_SVCR);
>
> - if (*svcr & SYS_SVCR_EL0_ZA_MASK)
> + if (*svcr & SVCR_ZA_MASK)
> za_save_state(last->za_state);
>
> /* If we are in streaming mode override regular SVE. */
> - if (*svcr & SYS_SVCR_EL0_SM_MASK) {
> + if (*svcr & SVCR_SM_MASK) {
> save_sve_regs = true;
> save_ffr = system_supports_fa64();
> vl = last->sme_vl;
> @@ -840,8 +840,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
> sve_to_fpsimd(task);
>
> if (system_supports_sme() && type == ARM64_VEC_SME) {
> - task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK |
> - SYS_SVCR_EL0_ZA_MASK);
> + task->thread.svcr &= ~(SVCR_SM_MASK |
> + SVCR_ZA_MASK);
> clear_thread_flag(TIF_SME);
> }
>
> @@ -1890,10 +1890,10 @@ void __efi_fpsimd_begin(void)
> __this_cpu_write(efi_sve_state_used, true);
>
> if (system_supports_sme()) {
> - svcr = read_sysreg_s(SYS_SVCR_EL0);
> + svcr = read_sysreg_s(SYS_SVCR);
>
> if (!system_supports_fa64())
> - ffr = svcr & SVCR_EL0_SM_MASK;
> + ffr = svcr & SVCR_SM_MASK;
>
> __this_cpu_write(efi_sm_state, ffr);
> }
> @@ -1903,8 +1903,8 @@ void __efi_fpsimd_begin(void)
> ffr);
>
> if (system_supports_sme())
> - sysreg_clear_set_s(SYS_SVCR_EL0,
> - SVCR_EL0_SM_MASK, 0);
> + sysreg_clear_set_s(SYS_SVCR,
> + SVCR_SM_MASK, 0);
>
> } else {
> fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
> @@ -1937,9 +1937,9 @@ void __efi_fpsimd_end(void)
> */
> if (system_supports_sme()) {
> if (__this_cpu_read(efi_sm_state)) {
> - sysreg_clear_set_s(SYS_SVCR_EL0,
> + sysreg_clear_set_s(SYS_SVCR,
> 0,
> - SVCR_EL0_SM_MASK);
> + SVCR_SM_MASK);
> if (!system_supports_fa64())
> ffr = efi_sm_state;
> }
> diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
> index 60ebc3060cf1..21da83187a60 100644
> --- a/arch/arm64/kernel/ptrace.c
> +++ b/arch/arm64/kernel/ptrace.c
> @@ -867,10 +867,10 @@ static int sve_set_common(struct task_struct *target,
>
> switch (type) {
> case ARM64_VEC_SVE:
> - target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
> + target->thread.svcr &= ~SVCR_SM_MASK;
> break;
> case ARM64_VEC_SME:
> - target->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
> + target->thread.svcr |= SVCR_SM_MASK;
> break;
> default:
> WARN_ON_ONCE(1);
> @@ -1100,7 +1100,7 @@ static int za_set(struct task_struct *target,
>
> /* If there is no data then disable ZA */
> if (!count) {
> - target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
> + target->thread.svcr &= ~SVCR_ZA_MASK;
> goto out;
> }
>
> @@ -1125,7 +1125,7 @@ static int za_set(struct task_struct *target,
>
> /* Mark ZA as active and let userspace use it */
> set_tsk_thread_flag(target, TIF_SME);
> - target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
> + target->thread.svcr |= SVCR_ZA_MASK;
>
> out:
> fpsimd_flush_task_state(target);
> diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
> index 2295948d97fd..18bf590dc1c7 100644
> --- a/arch/arm64/kernel/signal.c
> +++ b/arch/arm64/kernel/signal.c
> @@ -288,7 +288,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
>
> if (sve.head.size <= sizeof(*user->sve)) {
> clear_thread_flag(TIF_SVE);
> - current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
> + current->thread.svcr &= ~SVCR_SM_MASK;
> goto fpsimd_only;
> }
>
> @@ -321,7 +321,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
> return -EFAULT;
>
> if (sve.flags & SVE_SIG_FLAG_SM)
> - current->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
> + current->thread.svcr |= SVCR_SM_MASK;
> else
> set_thread_flag(TIF_SVE);
>
> @@ -398,7 +398,7 @@ static int restore_za_context(struct user_ctxs __user *user)
> return -EINVAL;
>
> if (za.head.size <= sizeof(*user->za)) {
> - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
> + current->thread.svcr &= ~SVCR_ZA_MASK;
> return 0;
> }
>
> @@ -419,7 +419,7 @@ static int restore_za_context(struct user_ctxs __user *user)
>
> sme_alloc(current);
> if (!current->thread.za_state) {
> - current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
> + current->thread.svcr &= ~SVCR_ZA_MASK;
> clear_thread_flag(TIF_SME);
> return -ENOMEM;
> }
> @@ -432,7 +432,7 @@ static int restore_za_context(struct user_ctxs __user *user)
> return -EFAULT;
>
> set_thread_flag(TIF_SME);
> - current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
> + current->thread.svcr |= SVCR_ZA_MASK;
>
> return 0;
> }
> @@ -922,8 +922,8 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
>
> /* Signal handlers are invoked with ZA and streaming mode disabled */
> if (system_supports_sme()) {
> - current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK |
> - SYS_SVCR_EL0_SM_MASK);
> + current->thread.svcr &= ~(SVCR_ZA_MASK |
> + SVCR_SM_MASK);
> sme_smstop();
> }
>
> diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
> index 92c69e5ac269..733451fe7e41 100644
> --- a/arch/arm64/kernel/syscall.c
> +++ b/arch/arm64/kernel/syscall.c
> @@ -174,9 +174,9 @@ static inline void fp_user_discard(void)
> * need updating.
> */
> if (system_supports_sme() && test_thread_flag(TIF_SME)) {
> - u64 svcr = read_sysreg_s(SYS_SVCR_EL0);
> + u64 svcr = read_sysreg_s(SYS_SVCR);
>
> - if (svcr & SYS_SVCR_EL0_SM_MASK)
> + if (svcr & SVCR_SM_MASK)
> sme_smstop_sm();
> }
>
> diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
> index 441edb9c398c..3d251a4d2cf7 100644
> --- a/arch/arm64/kvm/fpsimd.c
> +++ b/arch/arm64/kvm/fpsimd.c
> @@ -96,8 +96,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
> if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
> vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
>
> - if (read_sysreg_s(SYS_SVCR_EL0) &
> - (SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK)) {
> + if (read_sysreg_s(SYS_SVCR) &
> + (SVCR_SM_MASK | SVCR_ZA_MASK)) {
> vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
> fpsimd_save_and_flush_cpu_state();
> }
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index a4ef986adb5e..f7f494961eda 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1685,7 +1685,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> { SYS_DESC(SYS_SMIDR_EL1), undef_access },
> { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
> { SYS_DESC(SYS_CTR_EL0), access_ctr },
> - { SYS_DESC(SYS_SVCR_EL0), undef_access },
> + { SYS_DESC(SYS_SVCR), undef_access },
>
> { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
> .reset = reset_pmcr, .reg = PMCR_EL0 },
> --
> 2.30.2
>
More information about the linux-arm-kernel
mailing list