[PATCH v2 3/6] RISC-V: Check scalar unaligned access on all CPUs
Jesse Taube
jesse at rivosinc.com
Thu Jun 13 12:16:12 PDT 2024
Originally, the check_unaligned_access_emulated_all_cpus function
only checked the boot hart. This fixes the function to check all
harts.
Check for Zicclsm before checking for unaligned access. This will
greatly reduce the boot up time as finding the access speed is no longer
necessary.
Signed-off-by: Jesse Taube <jesse at rivosinc.com>
---
V1 -> V2:
- New patch
---
arch/riscv/kernel/traps_misaligned.c | 23 ++++++----------------
arch/riscv/kernel/unaligned_access_speed.c | 23 +++++++++++++---------
2 files changed, 20 insertions(+), 26 deletions(-)
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index b62d5a2f4541..8fadbe00dd62 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -526,31 +526,17 @@ int handle_misaligned_store(struct pt_regs *regs)
return 0;
}
-static bool check_unaligned_access_emulated(int cpu)
+static void check_unaligned_access_emulated(struct work_struct *unused)
{
+ int cpu = smp_processor_id();
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
unsigned long tmp_var, tmp_val;
- bool misaligned_emu_detected;
*mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
__asm__ __volatile__ (
" "REG_L" %[tmp], 1(%[ptr])\n"
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
-
- misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
- /*
- * If unaligned_ctl is already set, this means that we detected that all
- * CPUS uses emulated misaligned access at boot time. If that changed
- * when hotplugging the new cpu, this is something we don't handle.
- */
- if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
- pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
- while (true)
- cpu_relax();
- }
-
- return misaligned_emu_detected;
}
bool check_unaligned_access_emulated_all_cpus(void)
@@ -562,8 +548,11 @@ bool check_unaligned_access_emulated_all_cpus(void)
* accesses emulated since tasks requesting such control can run on any
* CPU.
*/
+ schedule_on_each_cpu(check_unaligned_access_emulated);
+
for_each_online_cpu(cpu)
- if (!check_unaligned_access_emulated(cpu))
+ if (per_cpu(misaligned_access_speed, cpu)
+ != RISCV_HWPROBE_MISALIGNED_EMULATED)
return false;
unaligned_ctl = true;
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index a9a6bcb02acf..70c1588fc353 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -259,23 +259,28 @@ static int check_unaligned_access_speed_all_cpus(void)
kfree(bufs);
return 0;
}
+#endif /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
static int check_unaligned_access_all_cpus(void)
{
- bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
+ bool all_cpus_emulated;
+ int cpu;
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICCLSM)) {
+ for_each_online_cpu(cpu) {
+ per_cpu(misaligned_access_speed, cpu) = RISCV_HWPROBE_MISALIGNED_FAST;
+ }
+ return 0;
+ }
+
+ all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
+
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
if (!all_cpus_emulated)
return check_unaligned_access_speed_all_cpus();
+#endif
return 0;
}
-#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
-static int check_unaligned_access_all_cpus(void)
-{
- check_unaligned_access_emulated_all_cpus();
-
- return 0;
-}
-#endif
arch_initcall(check_unaligned_access_all_cpus);
--
2.43.0
More information about the linux-riscv
mailing list