[openwrt/openwrt] kernel: backport patch to allow bpf fallback to interpreter

LEDE Commits lede-commits at lists.infradead.org
Tue Sep 9 21:35:05 PDT 2025


nbd pushed a commit to openwrt/openwrt.git, branch main:
https://git.openwrt.org/20d761cf195e8cbe4c0c423e8a8bdc353305274b

commit 20d761cf195e8cbe4c0c423e8a8bdc353305274b
Author: Felix Fietkau <nbd at nbd.name>
AuthorDate: Wed Sep 10 06:29:29 2025 +0200

    kernel: backport patch to allow bpf fallback to interpreter
    
    Deal with JIT failure more gracefully
    
    Fixes: https://github.com/openwrt/openwrt/issues/19405
    Signed-off-by: Felix Fietkau <nbd at nbd.name>
---
 ...all-back-to-interpreter-for-programs-with.patch | 97 ++++++++++++++++++++++
 1 file changed, 97 insertions(+)

diff --git a/target/linux/generic/backport-6.12/630-v6.17-bpf-Allow-fall-back-to-interpreter-for-programs-with.patch b/target/linux/generic/backport-6.12/630-v6.17-bpf-Allow-fall-back-to-interpreter-for-programs-with.patch
new file mode 100644
index 0000000000..9943f84a90
--- /dev/null
+++ b/target/linux/generic/backport-6.12/630-v6.17-bpf-Allow-fall-back-to-interpreter-for-programs-with.patch
@@ -0,0 +1,97 @@
+From: KaFai Wan <kafai.wan at linux.dev>
+Date: Tue, 9 Sep 2025 22:46:14 +0800
+Subject: [PATCH] bpf: Allow fall back to interpreter for programs with stack
+ size <= 512
+
+OpenWRT users reported regression on ARMv6 devices after updating to latest
+HEAD, where tcpdump filter:
+
+tcpdump "not ether host 3c37121a2b3c and not ether host 184ecbca2a3a \
+and not ether host 14130b4d3f47 and not ether host f0f61cf440b7 \
+and not ether host a84b4dedf471 and not ether host d022be17e1d7 \
+and not ether host 5c497967208b and not ether host 706655784d5b"
+
+fails with warning: "Kernel filter failed: No error information"
+when using config:
+ # CONFIG_BPF_JIT_ALWAYS_ON is not set
+ CONFIG_BPF_JIT_DEFAULT_ON=y
+
+The issue arises because commits:
+1. "bpf: Fix array bounds error with may_goto" changed default runtime to
+   __bpf_prog_ret0_warn when jit_requested = 1
+2. "bpf: Avoid __bpf_prog_ret0_warn when jit fails" returns error when
+   jit_requested = 1 but jit fails
+
+This change restores interpreter fallback capability for BPF programs with
+stack size <= 512 bytes when jit fails.
+
+Reported-by: Felix Fietkau <nbd at nbd.name>
+Closes: https://lore.kernel.org/bpf/2e267b4b-0540-45d8-9310-e127bf95fc63@nbd.name/
+Fixes: 6ebc5030e0c5 ("bpf: Fix array bounds error with may_goto")
+Signed-off-by: KaFai Wan <kafai.wan at linux.dev>
+Acked-by: Eduard Zingerman <eddyz87 at gmail.com>
+Link: https://lore.kernel.org/r/20250909144614.2991253-1-kafai.wan@linux.dev
+Signed-off-by: Alexei Starovoitov <ast at kernel.org>
+---
+
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2299,8 +2299,7 @@ static unsigned int __bpf_prog_ret0_warn
+ 					 const struct bpf_insn *insn)
+ {
+ 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+-	 * is not working properly, or interpreter is being used when
+-	 * prog->jit_requested is not 0, so warn about it!
++	 * is not working properly, so warn about it!
+ 	 */
+ 	WARN_ON_ONCE(1);
+ 	return 0;
+@@ -2385,8 +2384,9 @@ out:
+ 	return ret;
+ }
+ 
+-static void bpf_prog_select_func(struct bpf_prog *fp)
++static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
+ {
++	bool select_interpreter = false;
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+ 	u32 idx = (round_up(stack_depth, 32) / 32) - 1;
+@@ -2395,15 +2395,16 @@ static void bpf_prog_select_func(struct
+ 	 * But for non-JITed programs, we don't need bpf_func, so no bounds
+ 	 * check needed.
+ 	 */
+-	if (!fp->jit_requested &&
+-	    !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
++	if (idx < ARRAY_SIZE(interpreters)) {
+ 		fp->bpf_func = interpreters[idx];
++		select_interpreter = true;
+ 	} else {
+ 		fp->bpf_func = __bpf_prog_ret0_warn;
+ 	}
+ #else
+ 	fp->bpf_func = __bpf_prog_ret0_warn;
+ #endif
++	return select_interpreter;
+ }
+ 
+ /**
+@@ -2422,7 +2423,7 @@ struct bpf_prog *bpf_prog_select_runtime
+ 	/* In case of BPF to BPF calls, verifier did all the prep
+ 	 * work with regards to JITing, etc.
+ 	 */
+-	bool jit_needed = fp->jit_requested;
++	bool jit_needed = false;
+ 
+ 	if (fp->bpf_func)
+ 		goto finalize;
+@@ -2431,7 +2432,8 @@ struct bpf_prog *bpf_prog_select_runtime
+ 	    bpf_prog_has_kfunc_call(fp))
+ 		jit_needed = true;
+ 
+-	bpf_prog_select_func(fp);
++	if (!bpf_prog_select_interpreter(fp))
++		jit_needed = true;
+ 
+ 	/* eBPF JITs can rewrite the program in case constant
+ 	 * blinding is active. However, in case of error during




More information about the lede-commits mailing list