[RFC PATCH 2/6] riscv: add support for misaligned handling in S-mode
Clément Léger
cleger at rivosinc.com
Sat Jun 24 05:20:45 PDT 2023
Misalignment handling is only supported for M-mode and uses direct
accesses to user memory. in S-mode, this requires to use the
get_user()/put_user() accessors. Implement load_u8(), store_u8() and
get_insn() using these accessors. Also, use CSR_TVAL instead of
hardcoded mtval in csr_read() call which will work for both S-mode and
M-mode. When used in S-mode, we do not handle misaligned accesses that
are triggered from kernel mode.
Signed-off-by: Clément Léger <cleger at rivosinc.com>
---
arch/riscv/kernel/Makefile | 2 +-
arch/riscv/kernel/traps_misaligned.c | 111 +++++++++++++++++++++++----
2 files changed, 99 insertions(+), 14 deletions(-)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 153864e4f399..61bad09280a6 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -56,9 +56,9 @@ obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
obj-y += probes/
+obj-y += traps_misaligned.o
obj-$(CONFIG_MMU) += vdso.o vdso/
-obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index e7bfb33089c1..e4a273ab77c9 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -12,6 +12,7 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
+#include <asm/bug.h>
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
@@ -151,21 +152,25 @@
#define PRECISION_S 0
#define PRECISION_D 1
-static inline u8 load_u8(const u8 *addr)
+#ifdef CONFIG_RISCV_M_MODE
+static inline int load_u8(const u8 *addr, u8 *r_val)
{
u8 val;
asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
+ *r_val = val;
- return val;
+ return 0;
}
-static inline void store_u8(u8 *addr, u8 val)
+static inline int store_u8(u8 *addr, u8 val)
{
asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
+
+ return 0;
}
-static inline ulong get_insn(ulong mepc)
+static inline int get_insn(ulong mepc, ulong *r_insn)
{
register ulong __mepc asm ("a2") = mepc;
ulong val, rvc_mask = 3, tmp;
@@ -194,9 +199,63 @@ static inline ulong get_insn(ulong mepc)
: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
[xlen_minus_16] "i" (XLEN_MINUS_16));
- return val;
+ *r_insn = val;
+
+ return 0;
+}
+#else
+static inline int load_u8(const u8 *addr, u8 *r_val)
+{
+ return __get_user(*r_val, addr);
+}
+
+static inline int store_u8(u8 *addr, u8 val)
+{
+ return __put_user(val, addr);
}
+static inline int get_insn(ulong mepc, ulong *r_insn)
+{
+ ulong insn = 0;
+
+ if (mepc & 0x2) {
+ ulong tmp = 0;
+ u16 __user *insn_addr = (u16 __user *)mepc;
+
+ if (__get_user(insn, insn_addr))
+ return -EFAULT;
+ /* __get_user() uses regular "lw" which sign extend the loaded
+ * value make sure to clear higher order bits in case we "or" it
+ * below with the upper 16 bits half.
+ */
+ insn &= GENMASK(15, 0);
+ if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
+ *r_insn = insn;
+ return 0;
+ }
+ insn_addr++;
+ if (__get_user(tmp, insn_addr))
+ return -EFAULT;
+ *r_insn = (tmp << 16) | insn;
+
+ return 0;
+ } else {
+ u32 __user *insn_addr = (u32 __user *)mepc;
+
+ if (__get_user(insn, insn_addr))
+ return -EFAULT;
+ if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
+ *r_insn = insn;
+ return 0;
+ }
+ insn &= GENMASK(15, 0);
+ *r_insn = insn;
+
+ return 0;
+ }
+}
+#endif
+
union reg_data {
u8 data_bytes[8];
ulong data_ulong;
@@ -207,10 +266,21 @@ int handle_misaligned_load(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
- unsigned long insn = get_insn(epc);
- unsigned long addr = csr_read(mtval);
+ unsigned long insn;
+ unsigned long addr;
int i, fp = 0, shift = 0, len = 0;
+ /*
+ * When running in supervisor mode, we only handle misaligned accesses
+ * triggered from user mode.
+ */
+ if (!IS_ENABLED(CONFIG_RISCV_M_MODE) && !user_mode(regs))
+ return -1;
+
+ if (get_insn(epc, &insn))
+ return -1;
+
+ addr = csr_read(CSR_TVAL);
regs->epc = 0;
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
@@ -274,8 +344,10 @@ int handle_misaligned_load(struct pt_regs *regs)
}
val.data_u64 = 0;
- for (i = 0; i < len; i++)
- val.data_bytes[i] = load_u8((void *)(addr + i));
+ for (i = 0; i < len; i++) {
+ if (load_u8((void *)(addr + i), &val.data_bytes[i]))
+ return -1;
+ }
if (fp)
return -1;
@@ -290,10 +362,21 @@ int handle_misaligned_store(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
- unsigned long insn = get_insn(epc);
- unsigned long addr = csr_read(mtval);
+ unsigned long insn;
+ unsigned long addr;
int i, len = 0;
+ /*
+ * When running in supervisor mode, we only handle misaligned accesses
+ * triggered from user mode.
+ */
+ if (!IS_ENABLED(CONFIG_RISCV_M_MODE) && !user_mode(regs))
+ return -1;
+
+ if (get_insn(epc, &insn))
+ return -1;
+
+ addr = csr_read(CSR_TVAL);
regs->epc = 0;
val.data_ulong = GET_RS2(insn, regs);
@@ -327,8 +410,10 @@ int handle_misaligned_store(struct pt_regs *regs)
return -1;
}
- for (i = 0; i < len; i++)
- store_u8((void *)(addr + i), val.data_bytes[i]);
+ for (i = 0; i < len; i++) {
+ if (store_u8((void *)(addr + i), val.data_bytes[i]))
+ return -1;
+ }
regs->epc = epc + INSN_LEN(insn);
--
2.40.1
More information about the linux-riscv
mailing list