[PATCH v2 6/6] arm64/signal: Consistently invalidate the in register FP state in restore
Mark Brown
broonie at kernel.org
Wed Dec 4 07:20:54 PST 2024
When restoring the SVE and SME specific floating point register states we
flush the task floating point state, marking the hardware state as stale so
that preemption does not result in us saving register state from the signal
handler on top of the restored context and forcing a reload from memory.
For the plain FPSIMD state we don't do this, we just copy the state from
userspace and then force an immediate reload of the register state.
This isn't racy against context switch since we copy the incoming data
onto the stack rather than directly into the task struct but it's still
messy and inconsistent.
Simplify things and avoid a potential source of error by moving the
invalidation of the CPU state to the main restore_sigframe() and
reworking the restore of the FPSIMD state to update the task struct and
rely on loading as part of the general do_notify_resume() handling for
return to user like we do for the SVE and SME state.
As a result of this the only user of fpsimd_update_current_state() is
the 32 bit signal code which should not have any SVE state, add an
assert there that we don't have SVE enabled.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/kernel/fpsimd.c | 9 +++---
arch/arm64/kernel/signal.c | 70 +++++++++++++++-------------------------------
2 files changed, 27 insertions(+), 52 deletions(-)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 2b045d4d8f71ace5bf01a596dda279285a0998a5..74080204073d06819838873996b8cb60043d89de 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1868,7 +1868,8 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
get_cpu_fpsimd_context();
current->thread.uw.fpsimd_state = *state;
- if (test_thread_flag(TIF_SVE))
+ /* This should only ever be used for 32 bit processes */
+ if (WARN_ON_ONCE(test_thread_flag(TIF_SVE)))
fpsimd_to_sve(current);
task_fpsimd_load();
@@ -1894,9 +1895,9 @@ void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
/*
- * If we don't support fpsimd, bail out after we have
- * reset the fpsimd_cpu for this task and clear the
- * FPSTATE.
+ * If we don't support fpsimd, bail out after we have reset
+ * the fpsimd_cpu for this task and clear the FPSTATE. We
+ * check here rather than forcing callers to check.
*/
if (!system_supports_fpsimd())
return;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 79c9c5cd0802149b3cde20b398617437d79181f2..335c2327baf74eac9634cf594855dbf26a7d6b01 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -271,7 +271,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
static int restore_fpsimd_context(struct user_ctxs *user)
{
- struct user_fpsimd_state fpsimd;
+ struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state;
int err = 0;
/* check the size information */
@@ -279,18 +279,14 @@ static int restore_fpsimd_context(struct user_ctxs *user)
return -EINVAL;
/* copy the FP and status/control registers */
- err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
- sizeof(fpsimd.vregs));
- __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
- __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
+ err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
+ sizeof(fpsimd->vregs));
+ __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
+ __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
clear_thread_flag(TIF_SVE);
current->thread.fp_type = FP_STATE_FPSIMD;
- /* load the hardware registers from the fpsimd_state structure */
- if (!err)
- fpsimd_update_current_state(&fpsimd);
-
return err ? -EFAULT : 0;
}
@@ -396,7 +392,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
{
int err = 0;
unsigned int vl, vq;
- struct user_fpsimd_state fpsimd;
+ struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state;
u16 user_vl, flags;
if (user->sve_size < sizeof(*user->sve))
@@ -439,16 +435,6 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
return -EINVAL;
- /*
- * Careful: we are about __copy_from_user() directly into
- * thread.sve_state with preemption enabled, so protection is
- * needed to prevent a racing context switch from writing stale
- * registers back over the new data.
- */
-
- fpsimd_flush_task_state(current);
- /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
-
sve_alloc(current, true);
if (!current->thread.sve_state) {
clear_thread_flag(TIF_SVE);
@@ -471,14 +457,10 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
fpsimd_only:
/* copy the FP and status/control registers */
/* restore_sigframe() already checked that user->fpsimd != NULL. */
- err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
- sizeof(fpsimd.vregs));
- __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
- __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
-
- /* load the hardware registers from the fpsimd_state structure */
- if (!err)
- fpsimd_update_current_state(&fpsimd);
+ err = __copy_from_user(fpsimd->vregs, user->fpsimd->vregs,
+ sizeof(fpsimd->vregs));
+ __get_user_error(fpsimd->fpsr, &user->fpsimd->fpsr, err);
+ __get_user_error(fpsimd->fpcr, &user->fpsimd->fpcr, err);
return err ? -EFAULT : 0;
}
@@ -587,16 +569,6 @@ static int restore_za_context(struct user_ctxs *user)
if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
return -EINVAL;
- /*
- * Careful: we are about __copy_from_user() directly into
- * thread.sme_state with preemption enabled, so protection is
- * needed to prevent a racing context switch from writing stale
- * registers back over the new data.
- */
-
- fpsimd_flush_task_state(current);
- /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
-
sme_alloc(current, true);
if (!current->thread.sme_state) {
current->thread.svcr &= ~SVCR_ZA_MASK;
@@ -664,16 +636,6 @@ static int restore_zt_context(struct user_ctxs *user)
if (nregs != 1)
return -EINVAL;
- /*
- * Careful: we are about __copy_from_user() directly into
- * thread.zt_state with preemption enabled, so protection is
- * needed to prevent a racing context switch from writing stale
- * registers back over the new data.
- */
-
- fpsimd_flush_task_state(current);
- /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
-
err = __copy_from_user(thread_zt_state(¤t->thread),
(char __user const *)user->zt +
ZT_SIG_REGS_OFFSET,
@@ -1028,6 +990,18 @@ static int restore_sigframe(struct pt_regs *regs,
if (err == 0)
err = parse_user_sigframe(&user, sf);
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread floating point state with preemption enabled, so
+ * protection is needed to prevent a racing context switch
+ * from writing stale registers back over the new data. Mark
+ * the register floating point state as invalid and unbind the
+ * task from the CPU to force a reload before we return to
+ * userspace. fpsimd_flush_task_state() has a check for FP
+ * support.
+ */
+ fpsimd_flush_task_state(current);
+
if (err == 0 && system_supports_fpsimd()) {
if (!user.fpsimd)
return -EINVAL;
--
2.39.5
More information about the linux-arm-kernel
mailing list