[PATCH bpf-next v4 2/2] bpf, arm64: inline bpf_get_smp_processor_id() helper
Puranjay Mohan
puranjay at kernel.org
Mon Apr 29 06:16:47 PDT 2024
As ARM64 JIT now implements BPF_MOV64_PERCPU_REG instruction, inline
bpf_get_smp_processor_id().
ARM64 uses the per-cpu variable cpu_number to store the cpu id.
Here is how the BPF and ARM64 JITed assembly changes after this commit:
BPF
=====
BEFORE AFTER
-------- -------
int cpu = bpf_get_smp_processor_id(); int cpu = bpf_get_smp_processor_id();
(85) call bpf_get_smp_processor_id#229032 (18) r0 = 0xffff800082072008
(bf) r0 = &(void __percpu *)(r0)
(61) r0 = *(u32 *)(r0 +0)
ARM64 JIT
===========
BEFORE AFTER
-------- -------
int cpu = bpf_get_smp_processor_id(); int cpu = bpf_get_smp_processor_id();
mov x10, #0xfffffffffffff4d0 mov x7, #0xffff8000ffffffff
movk x10, #0x802b, lsl #16 movk x7, #0x8207, lsl #16
movk x10, #0x8000, lsl #32 movk x7, #0x2008
blr x10 mrs x10, tpidr_el1
add x7, x0, #0x0 add x7, x7, x10
ldr w7, [x7]
Performance improvement using benchmark[1]
BEFORE AFTER
-------- -------
glob-arr-inc : 23.817 ± 0.019M/s glob-arr-inc : 24.631 ± 0.027M/s [+ 3.41%]
arr-inc : 23.253 ± 0.019M/s arr-inc : 23.742 ± 0.023M/s [+ 2.10%]
hash-inc : 12.258 ± 0.010M/s hash-inc : 12.625 ± 0.004M/s [+ 3.00%]
[1] https://github.com/anakryiko/linux/commit/8dec900975ef
Signed-off-by: Puranjay Mohan <puranjay at kernel.org>
Acked-by: Andrii Nakryiko <andrii at kernel.org>
---
kernel/bpf/verifier.c | 27 ++++++++++++++++++++-------
1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4e474ef44e9c..d0725b1c7bec 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20273,19 +20273,33 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
goto next_insn;
}
-#ifdef CONFIG_X86_64
/* Implement bpf_get_smp_processor_id() inline. */
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
prog->jit_requested && bpf_jit_supports_percpu_insn()) {
/* BPF_FUNC_get_smp_processor_id inlining is an
- * optimization, so if pcpu_hot.cpu_number is ever
+ * optimization, so if cpu_number_addr is ever
* changed in some incompatible and hard to support
* way, it's fine to back out this inlining logic
*/
- insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
- insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
- insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
- cnt = 3;
+ u64 cpu_number_addr;
+ struct bpf_insn ld_insn[2] = {
+ BPF_LD_IMM64(BPF_REG_0, 0)
+ };
+
+#if defined(CONFIG_X86_64)
+ cpu_number_addr = (u64)&pcpu_hot.cpu_number;
+#elif defined(CONFIG_ARM64)
+ cpu_number_addr = (u64)&cpu_number;
+#else
+ goto next_insn;
+#endif
+ ld_insn[0].imm = (u32)cpu_number_addr;
+ ld_insn[1].imm = (u32)(cpu_number_addr >> 32);
+ insn_buf[0] = ld_insn[0];
+ insn_buf[1] = ld_insn[1];
+ insn_buf[2] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+ insn_buf[3] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
+ cnt = 4;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
@@ -20296,7 +20310,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
insn = new_prog->insnsi + i + delta;
goto next_insn;
}
-#endif
/* Implement bpf_get_func_arg inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_arg) {
--
2.40.1
More information about the linux-arm-kernel
mailing list