[PATCH v2 12/13] arm64: extable: add load_unaligned_zeropad() handler

Mark Rutland mark.rutland at arm.com
Tue Oct 19 09:02:18 PDT 2021


For inline assembly, we place exception fixups out-of-line in the
`.fixup` section such that these are out of the way of the fast path.
This has a few drawbacks:

* Since the fixup code is anonymous, backtraces will symbolize fixups as
  offsets from the nearest prior symbol, currently
  `__entry_tramp_text_end`. This is confusing, and painful to debug
  without access to the relevant vmlinux.

* Since the exception handler adjusts the PC to execute the fixup, and
  the fixup uses a direct branch back into the function it fixes,
  backtraces of fixups miss the original function. This is confusing,
  and violates requirements for RELIABLE_STACKTRACE (and therefore
  LIVEPATCH).

* Inline assembly and associated fixups are generated from templates,
  and we have many copies of logically identical fixups which only
  differ in which specific registers are written to and which address is
  branched to at the end of the fixup. This is potentially wasteful of
  I-cache resources, and makes it hard to add additional logic to fixups
  without significant bloat.

* In the case of load_unaligned_zeropad(), the logic in the fixup
  requires a temporary register that we must allocate even in the
  fast-path where it will not be used.

This patch address all four concerns for load_unaligned_zeropad() fixups
by adding a dedicated exception handler which performs the fixup logic
in exception context and subsequent returns back after the faulting
instruction. For the moment, the fixup logic is identical to the old
assembly fixup logic, but in future we could enhance this by taking the
ESR and FAR into account to constrain the faults we try to fix up, or to
specialize fixups for MTE tag check faults.

Other than backtracing, there should be no functional change as a result
of this patch.

Signed-off-by: Mark Rutland <mark.rutland at arm.com>
Reviewed-by: Ard Biesheuvel <ardb at kernel.org>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: James Morse <james.morse at arm.com>
Cc: Robin Murphy <robin.murphy at arm.com>
Cc: Will Deacon <will at kernel.org>
---
 arch/arm64/include/asm/asm-extable.h    | 15 +++++++++++++++
 arch/arm64/include/asm/word-at-a-time.h | 21 ++++-----------------
 arch/arm64/mm/extable.c                 | 29 +++++++++++++++++++++++++++++
 3 files changed, 48 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 11209da19c62..c39f2437e08e 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -6,6 +6,7 @@
 #define EX_TYPE_FIXUP			1
 #define EX_TYPE_BPF			2
 #define EX_TYPE_UACCESS_ERR_ZERO	3
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD	4
 
 #ifdef __ASSEMBLY__
 
@@ -75,6 +76,20 @@
 #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)			\
 	_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
 
+#define EX_DATA_REG_DATA_SHIFT	0
+#define EX_DATA_REG_DATA	GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT	5
+#define EX_DATA_REG_ADDR	GENMASK(9, 5)
+
+#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr)		\
+	__DEFINE_ASM_GPR_NUMS							\
+	__ASM_EXTABLE_RAW(#insn, #fixup,					\
+			  __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD),		\
+			  "("							\
+			    EX_DATA_REG(DATA, data) " | "			\
+			    EX_DATA_REG(ADDR, addr)				\
+			  ")")
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 2dcb104c645b..1c8e4f2490bf 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -53,29 +53,16 @@ static inline unsigned long find_zero(unsigned long mask)
  */
 static inline unsigned long load_unaligned_zeropad(const void *addr)
 {
-	unsigned long ret, tmp;
+	unsigned long ret;
 
 	__uaccess_enable_tco_async();
 
 	/* Load word from unaligned pointer addr */
 	asm(
-	"1:	ldr	%0, %3\n"
+	"1:	ldr	%0, %2\n"
 	"2:\n"
-	"	.pushsection .fixup,\"ax\"\n"
-	"	.align 2\n"
-	"3:	bic	%1, %2, #0x7\n"
-	"	ldr	%0, [%1]\n"
-	"	and	%1, %2, #0x7\n"
-	"	lsl	%1, %1, #0x3\n"
-#ifndef __AARCH64EB__
-	"	lsr	%0, %0, %1\n"
-#else
-	"	lsl	%0, %0, %1\n"
-#endif
-	"	b	2b\n"
-	"	.popsection\n"
-	_ASM_EXTABLE(1b, 3b)
-	: "=&r" (ret), "=&r" (tmp)
+	_ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
+	: "=&r" (ret)
 	: "r" (addr), "Q" (*(unsigned long *)addr));
 
 	__uaccess_disable_tco_async();
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index bbbc95313f2e..c3d53811a15e 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -39,6 +39,33 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
 	return true;
 }
 
+static bool
+ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
+				  struct pt_regs *regs)
+{
+	int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type);
+	int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type);
+	unsigned long data, addr, offset;
+
+	addr = pt_regs_read_reg(regs, reg_addr);
+
+	offset = addr & 0x7UL;
+	addr &= ~0x7UL;
+
+	data = *(unsigned long*)addr;
+
+#ifndef __AARCH64EB__
+	data >>= 8 * offset;
+#else
+	data <<= 8 * offset;
+#endif
+
+	pt_regs_write_reg(regs, reg_data, data);
+
+	regs->pc = get_ex_fixup(ex);
+	return true;
+}
+
 bool fixup_exception(struct pt_regs *regs)
 {
 	const struct exception_table_entry *ex;
@@ -54,6 +81,8 @@ bool fixup_exception(struct pt_regs *regs)
 		return ex_handler_bpf(ex, regs);
 	case EX_TYPE_UACCESS_ERR_ZERO:
 		return ex_handler_uaccess_err_zero(ex, regs);
+	case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
+		return ex_handler_load_unaligned_zeropad(ex, regs);
 	}
 
 	BUG();
-- 
2.11.0




More information about the linux-arm-kernel mailing list