[PATCH v13 2/5] arm64: add support for ARCH_HAS_COPY_MC
Tong Tiangen
tongtiangen at huawei.com
Sun Dec 8 18:42:54 PST 2024
For the arm64 kernel, when it processes hardware memory errors for
synchronize notifications(do_sea()), if the errors is consumed within the
kernel, the current processing is panic. However, it is not optimal.
Take copy_from/to_user for example, If ld* triggers a memory error, even in
kernel mode, only the associated process is affected. Killing the user
process and isolating the corrupt page is a better choice.
Add new fixup type EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR to identify insn
that can recover from memory errors triggered by access to kernel memory,
and this fixup type is used in __arch_copy_to_user(), This make the regular
copy_to_user() will handle kernel memory errors.
Signed-off-by: Tong Tiangen <tongtiangen at huawei.com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/asm-extable.h | 31 +++++++++++++++++++++++-----
arch/arm64/include/asm/asm-uaccess.h | 4 ++++
arch/arm64/include/asm/extable.h | 1 +
arch/arm64/lib/copy_to_user.S | 10 ++++-----
arch/arm64/mm/extable.c | 19 +++++++++++++++++
arch/arm64/mm/fault.c | 30 ++++++++++++++++++++-------
7 files changed, 78 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 100570a048c5..5fa54d31162c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,6 +21,7 @@ config ARM64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CC_PLATFORM
+ select ARCH_HAS_COPY_MC if ACPI_APEI_GHES
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index b8a5861dc7b7..0f9123efca0a 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -5,11 +5,13 @@
#include <linux/bits.h>
#include <asm/gpr-num.h>
-#define EX_TYPE_NONE 0
-#define EX_TYPE_BPF 1
-#define EX_TYPE_UACCESS_ERR_ZERO 2
-#define EX_TYPE_KACCESS_ERR_ZERO 3
-#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+#define EX_TYPE_NONE 0
+#define EX_TYPE_BPF 1
+#define EX_TYPE_UACCESS_ERR_ZERO 2
+#define EX_TYPE_KACCESS_ERR_ZERO 3
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+/* kernel access memory error safe */
+#define EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR 5
/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
#define EX_DATA_REG_ERR_SHIFT 0
@@ -51,6 +53,17 @@
#define _ASM_EXTABLE_UACCESS(insn, fixup) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+#define _ASM_EXTABLE_KACCESS_ERR_ZERO_MEM_ERR(insn, fixup, err, zero) \
+ __ASM_EXTABLE_RAW(insn, fixup, \
+ EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR, \
+ ( \
+ EX_DATA_REG(ERR, err) | \
+ EX_DATA_REG(ZERO, zero) \
+ ))
+
+#define _ASM_EXTABLE_KACCESS_MEM_ERR(insn, fixup) \
+ _ASM_EXTABLE_KACCESS_ERR_ZERO_MEM_ERR(insn, fixup, wzr, wzr)
+
/*
* Create an exception table entry for uaccess `insn`, which will branch to `fixup`
* when an unhandled fault is taken.
@@ -69,6 +82,14 @@
.endif
.endm
+/*
+ * Create an exception table entry for kaccess `insn`, which will branch to
+ * `fixup` when an unhandled fault is taken.
+ */
+ .macro _asm_extable_kaccess_mem_err, insn, fixup
+ _ASM_EXTABLE_KACCESS_MEM_ERR(\insn, \fixup)
+ .endm
+
#else /* __ASSEMBLY__ */
#include <linux/stringify.h>
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 5b6efe8abeeb..19aa0180f645 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -57,6 +57,10 @@ alternative_else_nop_endif
.endm
#endif
+#define KERNEL_MEM_ERR(l, x...) \
+9999: x; \
+ _asm_extable_kaccess_mem_err 9999b, l
+
#define USER(l, x...) \
9999: x; \
_asm_extable_uaccess 9999b, l
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 72b0e71cc3de..bc49443bc502 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
#endif /* !CONFIG_BPF_JIT */
bool fixup_exception(struct pt_regs *regs);
+bool fixup_exception_me(struct pt_regs *regs);
#endif
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 802231772608..bedab1678431 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -20,7 +20,7 @@
* x0 - bytes not copied
*/
.macro ldrb1 reg, ptr, val
- ldrb \reg, [\ptr], \val
+ KERNEL_MEM_ERR(9998f, ldrb \reg, [\ptr], \val)
.endm
.macro strb1 reg, ptr, val
@@ -28,7 +28,7 @@
.endm
.macro ldrh1 reg, ptr, val
- ldrh \reg, [\ptr], \val
+ KERNEL_MEM_ERR(9998f, ldrh \reg, [\ptr], \val)
.endm
.macro strh1 reg, ptr, val
@@ -36,7 +36,7 @@
.endm
.macro ldr1 reg, ptr, val
- ldr \reg, [\ptr], \val
+ KERNEL_MEM_ERR(9998f, ldr \reg, [\ptr], \val)
.endm
.macro str1 reg, ptr, val
@@ -44,7 +44,7 @@
.endm
.macro ldp1 reg1, reg2, ptr, val
- ldp \reg1, \reg2, [\ptr], \val
+ KERNEL_MEM_ERR(9998f, ldp \reg1, \reg2, [\ptr], \val)
.endm
.macro stp1 reg1, reg2, ptr, val
@@ -64,7 +64,7 @@ SYM_FUNC_START(__arch_copy_to_user)
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
- ldrb tmp1w, [srcin]
+KERNEL_MEM_ERR(9998f, ldrb tmp1w, [srcin])
USER(9998f, sttrb tmp1w, [dst])
add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 228d681a8715..9ad2b6473b60 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -72,7 +72,26 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
+ case EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR:
+ return false;
}
BUG();
}
+
+bool fixup_exception_me(struct pt_regs *regs)
+{
+ const struct exception_table_entry *ex;
+
+ ex = search_exception_tables(instruction_pointer(regs));
+ if (!ex)
+ return false;
+
+ switch (ex->type) {
+ case EX_TYPE_UACCESS_ERR_ZERO:
+ case EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR:
+ return ex_handler_uaccess_err_zero(ex, regs);
+ }
+
+ return false;
+}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index ef63651099a9..278e67357f49 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -801,21 +801,35 @@ static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs)
return 1; /* "fault" */
}
+/*
+ * APEI claimed this as a firmware-first notification.
+ * Some processing deferred to task_work before ret_to_user().
+ */
+static int do_apei_claim_sea(struct pt_regs *regs)
+{
+ int ret;
+
+ ret = apei_claim_sea(regs);
+ if (ret)
+ return ret;
+
+ if (!user_mode(regs) && IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) {
+ if (!fixup_exception_me(regs))
+ return -ENOENT;
+ }
+
+ return ret;
+}
+
static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf;
unsigned long siaddr;
- inf = esr_to_fault_info(esr);
-
- if (user_mode(regs) && apei_claim_sea(regs) == 0) {
- /*
- * APEI claimed this as a firmware-first notification.
- * Some processing deferred to task_work before ret_to_user().
- */
+ if (do_apei_claim_sea(regs) == 0)
return 0;
- }
+ inf = esr_to_fault_info(esr);
if (esr & ESR_ELx_FnV) {
siaddr = 0;
} else {
--
2.25.1
More information about the linux-arm-kernel
mailing list